Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 11 Jan 2012 05:46:36 +0000 (21:46 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 11 Jan 2012 05:46:36 +0000 (21:46 -0800)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  autofs4: deal with autofs4_write/autofs4_write races
  autofs4: catatonic_mode vs. notify_daemon race
  autofs4: autofs4_wait() vs. autofs4_catatonic_mode() race
  hfsplus: creation of hidden dir on mount can fail
  block_dev: Suppress bdev_cache_init() kmemleak warninig
  fix shrink_dcache_parent() livelock
  coda: switch coda_cnode_make() to sane API as well, clean coda_lookup()
  coda: deal correctly with allocation failure from coda_cnode_makectl()
  securityfs: fix object creation races

1419 files changed:
Documentation/ABI/stable/sysfs-bus-xen-backend [new file with mode: 0644]
Documentation/ABI/stable/sysfs-devices-system-xen_memory [new file with mode: 0644]
Documentation/ABI/testing/sysfs-bus-pci
Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration [new file with mode: 0644]
Documentation/ABI/testing/sysfs-devices-platform-docg3 [new file with mode: 0644]
Documentation/ABI/testing/sysfs-driver-hid-multitouch [new file with mode: 0644]
Documentation/ABI/testing/sysfs-driver-hid-roccat-isku [new file with mode: 0644]
Documentation/ABI/testing/sysfs-driver-hid-wiimote
Documentation/ABI/testing/sysfs-driver-wacom
Documentation/devicetree/bindings/c6x/clocks.txt [new file with mode: 0644]
Documentation/devicetree/bindings/c6x/dscr.txt [new file with mode: 0644]
Documentation/devicetree/bindings/c6x/emifa.txt [new file with mode: 0644]
Documentation/devicetree/bindings/c6x/interrupt.txt [new file with mode: 0644]
Documentation/devicetree/bindings/c6x/soc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/c6x/timer64.txt [new file with mode: 0644]
Documentation/devicetree/bindings/input/tegra-kbc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/gpio-control-nand.txt [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/fixed-regulator.txt [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/regulator.txt [new file with mode: 0644]
Documentation/devicetree/bindings/rtc/twl-rtc.txt [new file with mode: 0644]
Documentation/feature-removal-schedule.txt
Documentation/filesystems/ext4.txt
Documentation/filesystems/proc.txt
Documentation/input/alps.txt [new file with mode: 0644]
Documentation/input/gpio-tilt.txt [new file with mode: 0644]
Documentation/input/sentelic.txt
Documentation/kernel-parameters.txt
Documentation/pinctrl.txt
Documentation/power/regulator/regulator.txt
Documentation/trace/events-kmem.txt
Documentation/trace/postprocess/trace-pagealloc-postprocess.pl
Documentation/trace/tracepoint-analysis.txt
Documentation/virtual/kvm/api.txt
Documentation/watchdog/00-INDEX
Documentation/watchdog/convert_drivers_to_kernel_api.txt
Documentation/watchdog/watchdog-kernel-api.txt
MAINTAINERS
arch/alpha/Kconfig
arch/alpha/kernel/pci-noop.c
arch/alpha/kernel/pci.c
arch/arm/Kconfig
arch/arm/include/asm/io.h
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-omap2/devices.c
arch/arm/mach-tegra/include/mach/kbc.h
arch/arm/mach-u300/Kconfig
arch/arm/mach-u300/core.c
arch/arm/mach-u300/include/mach/gpio-u300.h
arch/arm/mach-u300/include/mach/irqs.h
arch/arm/mach-u300/mmc.c
arch/arm/mach-u300/u300-gpio.h [new file with mode: 0644]
arch/arm/mm/iomap.c
arch/arm/plat-omap/include/plat/iommu.h
arch/arm/plat-omap/include/plat/iovmm.h
arch/arm/plat-samsung/include/plat/keypad.h
arch/blackfin/configs/BF518F-EZBRD_defconfig
arch/blackfin/configs/BF526-EZBRD_defconfig
arch/blackfin/configs/BF527-AD7160-EVAL_defconfig
arch/blackfin/configs/BF527-EZKIT-V2_defconfig
arch/blackfin/configs/BF527-EZKIT_defconfig
arch/blackfin/configs/BF533-EZKIT_defconfig
arch/blackfin/configs/BF533-STAMP_defconfig
arch/blackfin/configs/BF537-STAMP_defconfig
arch/blackfin/configs/BF538-EZKIT_defconfig
arch/blackfin/configs/BF548-EZKIT_defconfig
arch/blackfin/configs/BF561-ACVILON_defconfig
arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
arch/blackfin/configs/BF561-EZKIT_defconfig
arch/blackfin/configs/BlackStamp_defconfig
arch/blackfin/configs/CM-BF527_defconfig
arch/blackfin/configs/CM-BF533_defconfig
arch/blackfin/configs/CM-BF537E_defconfig
arch/blackfin/configs/CM-BF537U_defconfig
arch/blackfin/configs/CM-BF548_defconfig
arch/blackfin/configs/CM-BF561_defconfig
arch/blackfin/configs/DNP5370_defconfig
arch/blackfin/configs/H8606_defconfig
arch/blackfin/configs/IP0X_defconfig
arch/blackfin/configs/PNAV-10_defconfig
arch/blackfin/configs/SRV1_defconfig
arch/blackfin/configs/TCM-BF518_defconfig
arch/blackfin/configs/TCM-BF537_defconfig
arch/blackfin/include/asm/bfin_serial.h
arch/blackfin/include/asm/cpu.h
arch/blackfin/include/asm/smp.h
arch/blackfin/kernel/setup.c
arch/blackfin/kernel/time-ts.c
arch/blackfin/mach-bf518/boards/ezbrd.c
arch/blackfin/mach-bf518/boards/tcm-bf518.c
arch/blackfin/mach-bf527/boards/ad7160eval.c
arch/blackfin/mach-bf527/boards/cm_bf527.c
arch/blackfin/mach-bf527/boards/ezbrd.c
arch/blackfin/mach-bf527/boards/ezkit.c
arch/blackfin/mach-bf527/boards/tll6527m.c
arch/blackfin/mach-bf533/boards/H8606.c
arch/blackfin/mach-bf533/boards/blackstamp.c
arch/blackfin/mach-bf533/boards/cm_bf533.c
arch/blackfin/mach-bf533/boards/ezkit.c
arch/blackfin/mach-bf533/boards/ip0x.c
arch/blackfin/mach-bf533/boards/stamp.c
arch/blackfin/mach-bf537/boards/cm_bf537e.c
arch/blackfin/mach-bf537/boards/cm_bf537u.c
arch/blackfin/mach-bf537/boards/dnp5370.c
arch/blackfin/mach-bf537/boards/minotaur.c
arch/blackfin/mach-bf537/boards/pnav10.c
arch/blackfin/mach-bf537/boards/stamp.c
arch/blackfin/mach-bf537/boards/tcm_bf537.c
arch/blackfin/mach-bf538/boards/ezkit.c
arch/blackfin/mach-bf548/boards/cm_bf548.c
arch/blackfin/mach-bf548/boards/ezkit.c
arch/blackfin/mach-bf561/boards/acvilon.c
arch/blackfin/mach-bf561/boards/cm_bf561.c
arch/blackfin/mach-bf561/boards/ezkit.c
arch/blackfin/mach-bf561/include/mach/pll.h
arch/blackfin/mach-bf561/smp.c
arch/blackfin/mach-common/smp.c
arch/c6x/Kconfig [new file with mode: 0644]
arch/c6x/Makefile [new file with mode: 0644]
arch/c6x/boot/Makefile [new file with mode: 0644]
arch/c6x/boot/dts/dsk6455.dts [new file with mode: 0644]
arch/c6x/boot/dts/evmc6457.dts [new file with mode: 0644]
arch/c6x/boot/dts/evmc6472.dts [new file with mode: 0644]
arch/c6x/boot/dts/evmc6474.dts [new file with mode: 0644]
arch/c6x/boot/dts/tms320c6455.dtsi [new file with mode: 0644]
arch/c6x/boot/dts/tms320c6457.dtsi [new file with mode: 0644]
arch/c6x/boot/dts/tms320c6472.dtsi [new file with mode: 0644]
arch/c6x/boot/dts/tms320c6474.dtsi [new file with mode: 0644]
arch/c6x/boot/linked_dtb.S [new file with mode: 0644]
arch/c6x/configs/dsk6455_defconfig [new file with mode: 0644]
arch/c6x/configs/evmc6457_defconfig [new file with mode: 0644]
arch/c6x/configs/evmc6472_defconfig [new file with mode: 0644]
arch/c6x/configs/evmc6474_defconfig [new file with mode: 0644]
arch/c6x/include/asm/Kbuild [new file with mode: 0644]
arch/c6x/include/asm/asm-offsets.h [new file with mode: 0644]
arch/c6x/include/asm/bitops.h [new file with mode: 0644]
arch/c6x/include/asm/byteorder.h [new file with mode: 0644]
arch/c6x/include/asm/cache.h [new file with mode: 0644]
arch/c6x/include/asm/cacheflush.h [new file with mode: 0644]
arch/c6x/include/asm/checksum.h [new file with mode: 0644]
arch/c6x/include/asm/clkdev.h [new file with mode: 0644]
arch/c6x/include/asm/clock.h [new file with mode: 0644]
arch/c6x/include/asm/delay.h [new file with mode: 0644]
arch/c6x/include/asm/dma-mapping.h [new file with mode: 0644]
arch/c6x/include/asm/dscr.h [new file with mode: 0644]
arch/c6x/include/asm/elf.h [new file with mode: 0644]
arch/c6x/include/asm/ftrace.h [new file with mode: 0644]
arch/c6x/include/asm/hardirq.h [new file with mode: 0644]
arch/c6x/include/asm/irq.h [new file with mode: 0644]
arch/c6x/include/asm/irqflags.h [new file with mode: 0644]
arch/c6x/include/asm/linkage.h [new file with mode: 0644]
arch/c6x/include/asm/megamod-pic.h [new file with mode: 0644]
arch/c6x/include/asm/mmu.h [new file with mode: 0644]
arch/c6x/include/asm/module.h [new file with mode: 0644]
arch/c6x/include/asm/mutex.h [new file with mode: 0644]
arch/c6x/include/asm/page.h [new file with mode: 0644]
arch/c6x/include/asm/pgtable.h [new file with mode: 0644]
arch/c6x/include/asm/processor.h [new file with mode: 0644]
arch/c6x/include/asm/procinfo.h [new file with mode: 0644]
arch/c6x/include/asm/prom.h [new file with mode: 0644]
arch/c6x/include/asm/ptrace.h [new file with mode: 0644]
arch/c6x/include/asm/sections.h [new file with mode: 0644]
arch/c6x/include/asm/setup.h [new file with mode: 0644]
arch/c6x/include/asm/sigcontext.h [new file with mode: 0644]
arch/c6x/include/asm/signal.h [new file with mode: 0644]
arch/c6x/include/asm/soc.h [new file with mode: 0644]
arch/c6x/include/asm/string.h [new file with mode: 0644]
arch/c6x/include/asm/swab.h [new file with mode: 0644]
arch/c6x/include/asm/syscall.h [new file with mode: 0644]
arch/c6x/include/asm/syscalls.h [new file with mode: 0644]
arch/c6x/include/asm/system.h [new file with mode: 0644]
arch/c6x/include/asm/thread_info.h [new file with mode: 0644]
arch/c6x/include/asm/timer64.h [new file with mode: 0644]
arch/c6x/include/asm/timex.h [new file with mode: 0644]
arch/c6x/include/asm/tlb.h [new file with mode: 0644]
arch/c6x/include/asm/traps.h [new file with mode: 0644]
arch/c6x/include/asm/uaccess.h [new file with mode: 0644]
arch/c6x/include/asm/unaligned.h [new file with mode: 0644]
arch/c6x/include/asm/unistd.h [new file with mode: 0644]
arch/c6x/kernel/Makefile [new file with mode: 0644]
arch/c6x/kernel/asm-offsets.c [new file with mode: 0644]
arch/c6x/kernel/c6x_ksyms.c [new file with mode: 0644]
arch/c6x/kernel/devicetree.c [new file with mode: 0644]
arch/c6x/kernel/dma.c [new file with mode: 0644]
arch/c6x/kernel/entry.S [new file with mode: 0644]
arch/c6x/kernel/head.S [new file with mode: 0644]
arch/c6x/kernel/irq.c [new file with mode: 0644]
arch/c6x/kernel/module.c [new file with mode: 0644]
arch/c6x/kernel/process.c [new file with mode: 0644]
arch/c6x/kernel/ptrace.c [new file with mode: 0644]
arch/c6x/kernel/setup.c [new file with mode: 0644]
arch/c6x/kernel/signal.c [new file with mode: 0644]
arch/c6x/kernel/soc.c [new file with mode: 0644]
arch/c6x/kernel/switch_to.S [new file with mode: 0644]
arch/c6x/kernel/sys_c6x.c [new file with mode: 0644]
arch/c6x/kernel/time.c [new file with mode: 0644]
arch/c6x/kernel/traps.c [new file with mode: 0644]
arch/c6x/kernel/vectors.S [new file with mode: 0644]
arch/c6x/kernel/vmlinux.lds.S [new file with mode: 0644]
arch/c6x/lib/Makefile [new file with mode: 0644]
arch/c6x/lib/checksum.c [new file with mode: 0644]
arch/c6x/lib/csum_64plus.S [new file with mode: 0644]
arch/c6x/lib/divi.S [new file with mode: 0644]
arch/c6x/lib/divremi.S [new file with mode: 0644]
arch/c6x/lib/divremu.S [new file with mode: 0644]
arch/c6x/lib/divu.S [new file with mode: 0644]
arch/c6x/lib/llshl.S [new file with mode: 0644]
arch/c6x/lib/llshr.S [new file with mode: 0644]
arch/c6x/lib/llshru.S [new file with mode: 0644]
arch/c6x/lib/memcpy_64plus.S [new file with mode: 0644]
arch/c6x/lib/mpyll.S [new file with mode: 0644]
arch/c6x/lib/negll.S [new file with mode: 0644]
arch/c6x/lib/pop_rts.S [new file with mode: 0644]
arch/c6x/lib/push_rts.S [new file with mode: 0644]
arch/c6x/lib/remi.S [new file with mode: 0644]
arch/c6x/lib/remu.S [new file with mode: 0644]
arch/c6x/lib/strasgi.S [new file with mode: 0644]
arch/c6x/lib/strasgi_64plus.S [new file with mode: 0644]
arch/c6x/mm/Makefile [new file with mode: 0644]
arch/c6x/mm/dma-coherent.c [new file with mode: 0644]
arch/c6x/mm/init.c [new file with mode: 0644]
arch/c6x/platforms/Kconfig [new file with mode: 0644]
arch/c6x/platforms/Makefile [new file with mode: 0644]
arch/c6x/platforms/cache.c [new file with mode: 0644]
arch/c6x/platforms/dscr.c [new file with mode: 0644]
arch/c6x/platforms/emif.c [new file with mode: 0644]
arch/c6x/platforms/megamod-pic.c [new file with mode: 0644]
arch/c6x/platforms/platform.c [new file with mode: 0644]
arch/c6x/platforms/pll.c [new file with mode: 0644]
arch/c6x/platforms/plldata.c [new file with mode: 0644]
arch/c6x/platforms/timer64.c [new file with mode: 0644]
arch/cris/Kconfig
arch/cris/arch-v32/drivers/axisflashmap.c
arch/frv/Kconfig
arch/frv/include/asm/io.h
arch/frv/mb93090-mb00/Makefile
arch/frv/mb93090-mb00/pci-iomap.c [deleted file]
arch/hexagon/Kconfig
arch/ia64/Kconfig
arch/ia64/include/asm/iommu.h
arch/ia64/include/asm/xen/interface.h
arch/ia64/kernel/pci-dma.c
arch/ia64/kvm/kvm-ia64.c
arch/m68k/Kconfig
arch/microblaze/Kconfig
arch/microblaze/include/asm/irq.h
arch/microblaze/include/asm/page.h
arch/microblaze/include/asm/setup.h
arch/microblaze/include/asm/unistd.h
arch/microblaze/kernel/early_printk.c
arch/microblaze/kernel/entry.S
arch/microblaze/kernel/intc.c
arch/microblaze/kernel/irq.c
arch/microblaze/kernel/module.c
arch/microblaze/kernel/setup.c
arch/microblaze/kernel/syscall_table.S
arch/microblaze/kernel/timer.c
arch/microblaze/lib/Makefile
arch/microblaze/lib/cmpdi2.c [new file with mode: 0644]
arch/microblaze/pci/iomap.c
arch/microblaze/pci/pci-common.c
arch/mips/Kconfig
arch/mips/bcm63xx/boards/board_bcm963xx.c
arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
arch/mips/lib/iomap-pci.c
arch/mn10300/Kconfig
arch/mn10300/include/asm/io.h
arch/mn10300/unit-asb2305/Makefile
arch/mn10300/unit-asb2305/pci-iomap.c [deleted file]
arch/openrisc/Kconfig
arch/parisc/Kconfig
arch/parisc/lib/iomap.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/kvm.h
arch/powerpc/kernel/iomap.c
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_hv_builtin.c
arch/powerpc/platforms/Kconfig
arch/score/Kconfig
arch/sh/Kconfig
arch/sh/drivers/pci/pci.c
arch/sparc/Kconfig
arch/sparc/include/asm/io_32.h
arch/sparc/include/asm/io_64.h
arch/sparc/include/asm/signal.h
arch/sparc/lib/iomap.c
arch/tile/Kconfig
arch/tile/include/asm/io.h
arch/tile/include/asm/pci.h
arch/tile/kernel/pci.c
arch/unicore32/Kconfig
arch/unicore32/include/asm/io.h
arch/unicore32/kernel/puv3-nb0916.c
arch/unicore32/kernel/setup.c
arch/unicore32/kernel/signal.c
arch/unicore32/kernel/time.c
arch/x86/Kconfig
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/iommu.h
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/kvm.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/signal.c
arch/x86/kvm/Kconfig
arch/x86/kvm/Makefile
arch/x86/kvm/cpuid.c [new file with mode: 0644]
arch/x86/kvm/cpuid.h [new file with mode: 0644]
arch/x86/kvm/emulate.c
arch/x86/kvm/i8254.c
arch/x86/kvm/i8259.c
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu_audit.c
arch/x86/kvm/mmutrace.h
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/pmu.c [new file with mode: 0644]
arch/x86/kvm/svm.c
arch/x86/kvm/timer.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/xen/Kconfig
arch/x86/xen/grant-table.c
arch/x86/xen/mmu.c
drivers/acpi/pci_root.c
drivers/ata/Kconfig
drivers/ata/ahci.c
drivers/ata/ahci_platform.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-scsi.c
drivers/ata/libata-sff.c
drivers/ata/libata-transport.c
drivers/ata/libata.h
drivers/ata/pata_arasan_cf.c
drivers/ata/pata_at91.c
drivers/ata/pata_bf54x.c
drivers/ata/pata_cs5536.c
drivers/ata/pata_imx.c
drivers/ata/pata_ixp4xx_cf.c
drivers/ata/pata_mpc52xx.c
drivers/ata/pata_of_platform.c
drivers/ata/pata_palmld.c
drivers/ata/pata_platform.c
drivers/ata/pata_pxa.c
drivers/ata/pata_rb532_cf.c
drivers/ata/sata_dwc_460ex.c
drivers/ata/sata_fsl.c
drivers/block/xen-blkback/xenbus.c
drivers/block/xen-blkfront.c
drivers/block/xsysace.c
drivers/char/agp/generic.c
drivers/gpio/Kconfig
drivers/gpio/Makefile
drivers/gpio/gpio-u300.c [deleted file]
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/drm_context.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_edid_modes.h
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_lock.c
drivers/gpu/drm/drm_sman.c [deleted file]
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/Makefile
drivers/gpu/drm/exynos/exynos_ddc.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_buf.c
drivers/gpu/drm/exynos/exynos_drm_buf.h
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_encoder.c
drivers/gpu/drm/exynos/exynos_drm_encoder.h
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fb.h
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/exynos/exynos_drm_hdmi.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_hdmi.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_plane.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_plane.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_hdmi.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_hdmi.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_hdmiphy.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_mixer.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_mixer.h [new file with mode: 0644]
drivers/gpu/drm/exynos/regs-hdmi.h [new file with mode: 0644]
drivers/gpu/drm/exynos/regs-mixer.h [new file with mode: 0644]
drivers/gpu/drm/exynos/regs-vp.h [new file with mode: 0644]
drivers/gpu/drm/gma500/Kconfig [new file with mode: 0644]
drivers/gpu/drm/gma500/Makefile [new file with mode: 0644]
drivers/gpu/drm/gma500/accel_2d.c [new file with mode: 0644]
drivers/gpu/drm/gma500/backlight.c [new file with mode: 0644]
drivers/gpu/drm/gma500/cdv_device.c [new file with mode: 0644]
drivers/gpu/drm/gma500/cdv_device.h [new file with mode: 0644]
drivers/gpu/drm/gma500/cdv_intel_crt.c [new file with mode: 0644]
drivers/gpu/drm/gma500/cdv_intel_display.c [new file with mode: 0644]
drivers/gpu/drm/gma500/cdv_intel_hdmi.c [new file with mode: 0644]
drivers/gpu/drm/gma500/cdv_intel_lvds.c [new file with mode: 0644]
drivers/gpu/drm/gma500/framebuffer.c [new file with mode: 0644]
drivers/gpu/drm/gma500/framebuffer.h [new file with mode: 0644]
drivers/gpu/drm/gma500/gem.c [new file with mode: 0644]
drivers/gpu/drm/gma500/gem_glue.c [new file with mode: 0644]
drivers/gpu/drm/gma500/gem_glue.h [new file with mode: 0644]
drivers/gpu/drm/gma500/gtt.c [new file with mode: 0644]
drivers/gpu/drm/gma500/gtt.h [new file with mode: 0644]
drivers/gpu/drm/gma500/intel_bios.c [new file with mode: 0644]
drivers/gpu/drm/gma500/intel_bios.h [new file with mode: 0644]
drivers/gpu/drm/gma500/intel_gmbus.c [new file with mode: 0644]
drivers/gpu/drm/gma500/intel_i2c.c [new file with mode: 0644]
drivers/gpu/drm/gma500/intel_opregion.c [new file with mode: 0644]
drivers/gpu/drm/gma500/mid_bios.c [new file with mode: 0644]
drivers/gpu/drm/gma500/mid_bios.h [new file with mode: 0644]
drivers/gpu/drm/gma500/mmu.c [new file with mode: 0644]
drivers/gpu/drm/gma500/oaktrail.h [new file with mode: 0644]
drivers/gpu/drm/gma500/oaktrail_crtc.c [new file with mode: 0644]
drivers/gpu/drm/gma500/oaktrail_device.c [new file with mode: 0644]
drivers/gpu/drm/gma500/oaktrail_hdmi.c [new file with mode: 0644]
drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c [new file with mode: 0644]
drivers/gpu/drm/gma500/oaktrail_lvds.c [new file with mode: 0644]
drivers/gpu/drm/gma500/power.c [new file with mode: 0644]
drivers/gpu/drm/gma500/power.h [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_device.c [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_drv.c [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_drv.h [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_intel_display.c [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_intel_display.h [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_intel_drv.h [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_intel_lvds.c [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_intel_modes.c [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_intel_reg.h [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_intel_sdvo.c [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_irq.c [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_irq.h [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_lid.c [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_reg.h [new file with mode: 0644]
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i810/i810_drv.c
drivers/gpu/drm/i810/i810_drv.h
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sprite.c [new file with mode: 0644]
drivers/gpu/drm/mga/mga_drv.c
drivers/gpu/drm/nouveau/Makefile
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_bios.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_channel.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_connector.h
drivers/gpu/drm/nouveau/nouveau_crtc.h
drivers/gpu/drm/nouveau/nouveau_debugfs.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_dma.c
drivers/gpu/drm/nouveau/nouveau_dp.c
drivers/gpu/drm/nouveau/nouveau_drv.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_fb.h
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_gpio.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_gpio.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_hdmi.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_hwsq.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_i2c.c
drivers/gpu/drm/nouveau/nouveau_i2c.h
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_mxm.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_notifier.c
drivers/gpu/drm/nouveau/nouveau_object.c
drivers/gpu/drm/nouveau/nouveau_perf.c
drivers/gpu/drm/nouveau/nouveau_pm.c
drivers/gpu/drm/nouveau/nouveau_pm.h
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/nouveau/nouveau_temp.c
drivers/gpu/drm/nouveau/nouveau_vm.c
drivers/gpu/drm/nouveau/nouveau_vm.h
drivers/gpu/drm/nouveau/nouveau_volt.c
drivers/gpu/drm/nouveau/nv04_crtc.c
drivers/gpu/drm/nouveau/nv04_dac.c
drivers/gpu/drm/nouveau/nv04_dfp.c
drivers/gpu/drm/nouveau/nv04_display.c
drivers/gpu/drm/nouveau/nv04_pm.c
drivers/gpu/drm/nouveau/nv04_timer.c
drivers/gpu/drm/nouveau/nv10_gpio.c
drivers/gpu/drm/nouveau/nv17_tv.c
drivers/gpu/drm/nouveau/nv40_pm.c
drivers/gpu/drm/nouveau/nv50_crtc.c
drivers/gpu/drm/nouveau/nv50_dac.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nv50_display.h
drivers/gpu/drm/nouveau/nv50_evo.c
drivers/gpu/drm/nouveau/nv50_fifo.c
drivers/gpu/drm/nouveau/nv50_gpio.c
drivers/gpu/drm/nouveau/nv50_graph.c
drivers/gpu/drm/nouveau/nv50_pm.c
drivers/gpu/drm/nouveau/nv50_sor.c
drivers/gpu/drm/nouveau/nv50_vm.c
drivers/gpu/drm/nouveau/nv84_bsp.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nv84_vp.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nv98_crypt.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nv98_ppp.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nva3_copy.fuc
drivers/gpu/drm/nouveau/nva3_copy.fuc.h
drivers/gpu/drm/nouveau/nva3_pm.c
drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
drivers/gpu/drm/nouveau/nvc0_graph.c
drivers/gpu/drm/nouveau/nvc0_graph.fuc
drivers/gpu/drm/nouveau/nvc0_graph.h
drivers/gpu/drm/nouveau/nvc0_grctx.c
drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
drivers/gpu/drm/nouveau/nvc0_grhub.fuc
drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
drivers/gpu/drm/nouveau/nvc0_pm.c
drivers/gpu/drm/nouveau/nvd0_display.c
drivers/gpu/drm/r128/r128_drv.c
drivers/gpu/drm/radeon/Makefile
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_blit_kms.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/evergreen_reg.h
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/nid.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r200.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r420.c
drivers/gpu/drm/radeon/r500_reg.h
drivers/gpu/drm/radeon/r520.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_audio.c
drivers/gpu/drm/radeon/r600_blit_kms.c
drivers/gpu/drm/radeon/r600_cp.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_benchmark.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_sa.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_semaphore.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_test.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/savage/savage_drv.c
drivers/gpu/drm/sis/sis_drv.c
drivers/gpu/drm/sis/sis_drv.h
drivers/gpu/drm/sis/sis_mm.c
drivers/gpu/drm/tdfx/tdfx_drv.c
drivers/gpu/drm/ttm/Makefile
drivers/gpu/drm/ttm/ttm_agp_backend.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/ttm/ttm_memory.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/via/via_drv.c
drivers/gpu/drm/via/via_drv.h
drivers/gpu/drm/via/via_map.c
drivers/gpu/drm/via/via_mm.c
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/hid/Kconfig
drivers/hid/Makefile
drivers/hid/hid-core.c
drivers/hid/hid-debug.c
drivers/hid/hid-emsff.c
drivers/hid/hid-hyperv.c [new file with mode: 0644]
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-lg4ff.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-picolcd.c
drivers/hid/hid-quanta.c [deleted file]
drivers/hid/hid-roccat-common.c
drivers/hid/hid-roccat-isku.c [new file with mode: 0644]
drivers/hid/hid-roccat-isku.h [new file with mode: 0644]
drivers/hid/hid-roccat-kone.c
drivers/hid/hid-wacom.c
drivers/hid/hid-wiimote-core.c [new file with mode: 0644]
drivers/hid/hid-wiimote-debug.c [new file with mode: 0644]
drivers/hid/hid-wiimote-ext.c [new file with mode: 0644]
drivers/hid/hid-wiimote.c [deleted file]
drivers/hid/hid-wiimote.h [new file with mode: 0644]
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/usbhid/usbhid.h
drivers/hid/usbhid/usbkbd.c
drivers/i2c/busses/i2c-puv3.c
drivers/input/evdev.c
drivers/input/input-polldev.c
drivers/input/keyboard/Kconfig
drivers/input/keyboard/Makefile
drivers/input/keyboard/adp5520-keys.c
drivers/input/keyboard/amikbd.c
drivers/input/keyboard/atkbd.c
drivers/input/keyboard/bf54x-keys.c
drivers/input/keyboard/davinci_keyscan.c
drivers/input/keyboard/ep93xx_keypad.c
drivers/input/keyboard/gpio_keys_polled.c
drivers/input/keyboard/imx_keypad.c
drivers/input/keyboard/jornada680_kbd.c
drivers/input/keyboard/jornada720_kbd.c
drivers/input/keyboard/lm8323.c
drivers/input/keyboard/matrix_keypad.c
drivers/input/keyboard/nomadik-ske-keypad.c
drivers/input/keyboard/omap-keypad.c
drivers/input/keyboard/omap4-keypad.c
drivers/input/keyboard/opencores-kbd.c
drivers/input/keyboard/pmic8xxx-keypad.c
drivers/input/keyboard/pxa27x_keypad.c
drivers/input/keyboard/pxa930_rotary.c
drivers/input/keyboard/samsung-keypad.c
drivers/input/keyboard/sh_keysc.c
drivers/input/keyboard/spear-keyboard.c
drivers/input/keyboard/stmpe-keypad.c
drivers/input/keyboard/tc3589x-keypad.c
drivers/input/keyboard/tca8418_keypad.c [new file with mode: 0644]
drivers/input/keyboard/tegra-kbc.c
drivers/input/keyboard/tnetv107x-keypad.c
drivers/input/keyboard/twl4030_keypad.c
drivers/input/keyboard/w90p910_keypad.c
drivers/input/misc/88pm860x_onkey.c
drivers/input/misc/Kconfig
drivers/input/misc/Makefile
drivers/input/misc/ab8500-ponkey.c
drivers/input/misc/adxl34x-spi.c
drivers/input/misc/adxl34x.c
drivers/input/misc/ati_remote2.c
drivers/input/misc/bfin_rotary.c
drivers/input/misc/cobalt_btns.c
drivers/input/misc/dm355evm_keys.c
drivers/input/misc/gp2ap002a00f.c [new file with mode: 0644]
drivers/input/misc/gpio_tilt_polled.c [new file with mode: 0644]
drivers/input/misc/ixp4xx-beeper.c
drivers/input/misc/max8925_onkey.c
drivers/input/misc/mc13783-pwrbutton.c
drivers/input/misc/mpu3050.c
drivers/input/misc/pcap_keys.c
drivers/input/misc/pcf50633-input.c
drivers/input/misc/pcspkr.c
drivers/input/misc/pm8xxx-vibrator.c
drivers/input/misc/pmic8xxx-pwrkey.c
drivers/input/misc/pwm-beeper.c
drivers/input/misc/rb532_button.c
drivers/input/misc/rotary_encoder.c
drivers/input/misc/sgi_btns.c
drivers/input/misc/twl4030-pwrbutton.c
drivers/input/misc/twl4030-vibra.c
drivers/input/misc/twl6040-vibra.c
drivers/input/misc/wm831x-on.c
drivers/input/misc/xen-kbdfront.c
drivers/input/mouse/alps.c
drivers/input/mouse/alps.h
drivers/input/mouse/amimouse.c
drivers/input/mouse/elantech.c
drivers/input/mouse/elantech.h
drivers/input/mouse/gpio_mouse.c
drivers/input/mouse/hgpk.c
drivers/input/mouse/logips2pp.c
drivers/input/mouse/psmouse-base.c
drivers/input/mouse/psmouse.h
drivers/input/mouse/pxa930_trkball.c
drivers/input/mouse/sentelic.c
drivers/input/mouse/synaptics.c
drivers/input/mouse/synaptics.h
drivers/input/mouse/trackpoint.c
drivers/input/serio/altera_ps2.c
drivers/input/serio/at32psif.c
drivers/input/serio/i8042.c
drivers/input/serio/rpckbd.c
drivers/input/serio/xilinx_ps2.c
drivers/input/tablet/aiptek.c
drivers/input/tablet/wacom_sys.c
drivers/input/tablet/wacom_wac.c
drivers/input/tablet/wacom_wac.h
drivers/input/touchscreen/88pm860x-ts.c
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/Makefile
drivers/input/touchscreen/ad7877.c
drivers/input/touchscreen/ad7879-i2c.c
drivers/input/touchscreen/ad7879-spi.c
drivers/input/touchscreen/ad7879.c
drivers/input/touchscreen/ad7879.h
drivers/input/touchscreen/ads7846.c
drivers/input/touchscreen/atmel-wm97xx.c
drivers/input/touchscreen/atmel_tsadcc.c
drivers/input/touchscreen/auo-pixcir-ts.c [new file with mode: 0644]
drivers/input/touchscreen/da9034-ts.c
drivers/input/touchscreen/egalax_ts.c [new file with mode: 0644]
drivers/input/touchscreen/htcpen.c
drivers/input/touchscreen/intel-mid-touch.c
drivers/input/touchscreen/jornada720_ts.c
drivers/input/touchscreen/lpc32xx_ts.c
drivers/input/touchscreen/mainstone-wm97xx.c
drivers/input/touchscreen/mc13783_ts.c
drivers/input/touchscreen/migor_ts.c
drivers/input/touchscreen/pcap_ts.c
drivers/input/touchscreen/pixcir_i2c_ts.c [new file with mode: 0644]
drivers/input/touchscreen/s3c2410_ts.c
drivers/input/touchscreen/stmpe-ts.c
drivers/input/touchscreen/tnetv107x-ts.c
drivers/input/touchscreen/tps6507x-ts.c
drivers/input/touchscreen/ucb1400_ts.c
drivers/input/touchscreen/usbtouchscreen.c
drivers/input/touchscreen/w90p910_ts.c
drivers/input/touchscreen/wm831x-ts.c
drivers/input/touchscreen/zylonite-wm97xx.c
drivers/iommu/Kconfig
drivers/iommu/Makefile
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_proto.h
drivers/iommu/amd_iommu_types.h
drivers/iommu/amd_iommu_v2.c [new file with mode: 0644]
drivers/iommu/intel-iommu.c
drivers/iommu/iommu.c
drivers/iommu/msm_iommu.c
drivers/iommu/omap-iommu.c
drivers/iommu/omap-iovmm.c
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/leds-88pm860x.c
drivers/leds/leds-adp5520.c
drivers/leds/leds-ams-delta.c
drivers/leds/leds-asic3.c
drivers/leds/leds-atmel-pwm.c
drivers/leds/leds-bd2802.c
drivers/leds/leds-cobalt-qube.c
drivers/leds/leds-da903x.c
drivers/leds/leds-dac124s085.c
drivers/leds/leds-fsg.c
drivers/leds/leds-gpio.c
drivers/leds/leds-hp6xx.c
drivers/leds/leds-lm3530.c
drivers/leds/leds-lp3944.c
drivers/leds/leds-lp5521.c
drivers/leds/leds-lp5523.c
drivers/leds/leds-lt3593.c
drivers/leds/leds-mc13783.c
drivers/leds/leds-netxbig.c
drivers/leds/leds-ns2.c
drivers/leds/leds-pca9532.c
drivers/leds/leds-pca955x.c
drivers/leds/leds-pwm.c
drivers/leds/leds-rb532.c
drivers/leds/leds-regulator.c
drivers/leds/leds-renesas-tpu.c
drivers/leds/leds-s3c24xx.c
drivers/leds/leds-tca6507.c [new file with mode: 0644]
drivers/leds/leds-wm831x-status.c
drivers/leds/leds-wm8350.c
drivers/media/video/omap3isp/isp.c
drivers/media/video/omap3isp/isp.h
drivers/media/video/omap3isp/ispccdc.c
drivers/media/video/omap3isp/ispstat.c
drivers/media/video/omap3isp/ispvideo.c
drivers/message/fusion/lsi/mpi_cnfg.h
drivers/message/fusion/mptbase.c
drivers/message/fusion/mptbase.h
drivers/message/fusion/mptsas.c
drivers/mtd/Kconfig
drivers/mtd/Makefile
drivers/mtd/afs.c
drivers/mtd/ar7part.c
drivers/mtd/bcm63xxpart.c [new file with mode: 0644]
drivers/mtd/chips/cfi_cmdset_0020.c
drivers/mtd/devices/Kconfig
drivers/mtd/devices/block2mtd.c
drivers/mtd/devices/doc2000.c
drivers/mtd/devices/doc2001.c
drivers/mtd/devices/doc2001plus.c
drivers/mtd/devices/docg3.c
drivers/mtd/devices/docg3.h
drivers/mtd/devices/docprobe.c
drivers/mtd/devices/m25p80.c
drivers/mtd/devices/mtd_dataflash.c
drivers/mtd/devices/sst25l.c
drivers/mtd/ftl.c
drivers/mtd/inftlcore.c
drivers/mtd/inftlmount.c
drivers/mtd/lpddr/lpddr_cmds.c
drivers/mtd/maps/Kconfig
drivers/mtd/maps/Makefile
drivers/mtd/maps/bcm963xx-flash.c [deleted file]
drivers/mtd/maps/bfin-async-flash.c
drivers/mtd/maps/gpio-addr-flash.c
drivers/mtd/maps/ixp2000.c
drivers/mtd/maps/ixp4xx.c
drivers/mtd/maps/lantiq-flash.c
drivers/mtd/maps/latch-addr-flash.c
drivers/mtd/maps/physmap.c
drivers/mtd/maps/physmap_of.c
drivers/mtd/maps/pxa2xx-flash.c
drivers/mtd/maps/rbtx4939-flash.c
drivers/mtd/maps/sa1100-flash.c
drivers/mtd/maps/scb2_flash.c
drivers/mtd/maps/sun_uflash.c
drivers/mtd/mtd_blkdevs.c
drivers/mtd/mtdblock.c
drivers/mtd/mtdblock_ro.c
drivers/mtd/mtdchar.c
drivers/mtd/mtdconcat.c
drivers/mtd/mtdcore.c
drivers/mtd/mtdoops.c
drivers/mtd/mtdpart.c
drivers/mtd/mtdswap.c
drivers/mtd/nand/Kconfig
drivers/mtd/nand/ams-delta.c
drivers/mtd/nand/bcm_umi_nand.c
drivers/mtd/nand/davinci_nand.c
drivers/mtd/nand/diskonchip.c
drivers/mtd/nand/fsl_elbc_nand.c
drivers/mtd/nand/fsl_upm.c
drivers/mtd/nand/gpio.c
drivers/mtd/nand/jz4740_nand.c
drivers/mtd/nand/mpc5121_nfc.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nand_bbt.c
drivers/mtd/nand/nand_ids.c
drivers/mtd/nand/nandsim.c
drivers/mtd/nand/ndfc.c
drivers/mtd/nand/nomadik_nand.c
drivers/mtd/nand/nuc900_nand.c
drivers/mtd/nand/omap2.c
drivers/mtd/nand/pasemi_nand.c
drivers/mtd/nand/plat_nand.c
drivers/mtd/nand/pxa3xx_nand.c
drivers/mtd/nand/sharpsl.c
drivers/mtd/nand/sm_common.c
drivers/mtd/nand/socrates_nand.c
drivers/mtd/nand/tmio_nand.c
drivers/mtd/nand/txx9ndfmc.c
drivers/mtd/nftlcore.c
drivers/mtd/nftlmount.c
drivers/mtd/onenand/generic.c
drivers/mtd/onenand/onenand_base.c
drivers/mtd/onenand/samsung.c
drivers/mtd/redboot.c
drivers/mtd/rfd_ftl.c
drivers/mtd/sm_ftl.c
drivers/mtd/ssfdc.c
drivers/mtd/tests/mtd_oobtest.c
drivers/mtd/tests/mtd_pagetest.c
drivers/mtd/tests/mtd_readtest.c
drivers/mtd/tests/mtd_speedtest.c
drivers/mtd/tests/mtd_stresstest.c
drivers/mtd/tests/mtd_subpagetest.c
drivers/mtd/tests/mtd_torturetest.c
drivers/mtd/ubi/build.c
drivers/mtd/ubi/debug.c
drivers/mtd/ubi/eba.c
drivers/mtd/ubi/io.c
drivers/mtd/ubi/kapi.c
drivers/mtd/ubi/ubi.h
drivers/mtd/ubi/wl.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/of/fdt.c
drivers/pci/ats.c
drivers/pci/hotplug/pciehp.h
drivers/pci/hotplug/pciehp_core.c
drivers/pci/hotplug/pciehp_ctrl.c
drivers/pci/hotplug/pciehp_hpc.c
drivers/pci/msi.c
drivers/pci/pci-acpi.c
drivers/pci/pcie/aspm.c
drivers/pci/xen-pcifront.c
drivers/pinctrl/Kconfig
drivers/pinctrl/Makefile
drivers/pinctrl/core.c
drivers/pinctrl/core.h
drivers/pinctrl/pinconf.c [new file with mode: 0644]
drivers/pinctrl/pinconf.h [new file with mode: 0644]
drivers/pinctrl/pinctrl-coh901.c [new file with mode: 0644]
drivers/pinctrl/pinctrl-sirf.c [new file with mode: 0644]
drivers/pinctrl/pinctrl-u300.c [new file with mode: 0644]
drivers/pinctrl/pinmux-sirf.c [deleted file]
drivers/pinctrl/pinmux-u300.c [deleted file]
drivers/pinctrl/pinmux.c
drivers/regulator/88pm8607.c
drivers/regulator/Kconfig
drivers/regulator/Makefile
drivers/regulator/aat2870-regulator.c
drivers/regulator/ab3100.c
drivers/regulator/ab8500.c
drivers/regulator/ad5398.c
drivers/regulator/bq24022.c
drivers/regulator/core.c
drivers/regulator/da903x.c
drivers/regulator/da9052-regulator.c [new file with mode: 0644]
drivers/regulator/db8500-prcmu.c
drivers/regulator/dummy.c
drivers/regulator/fixed.c
drivers/regulator/gpio-regulator.c
drivers/regulator/isl6271a-regulator.c
drivers/regulator/lp3971.c
drivers/regulator/lp3972.c
drivers/regulator/max1586.c
drivers/regulator/max8649.c
drivers/regulator/max8660.c
drivers/regulator/max8925-regulator.c
drivers/regulator/max8952.c
drivers/regulator/max8997.c
drivers/regulator/max8998.c
drivers/regulator/mc13783-regulator.c
drivers/regulator/mc13892-regulator.c
drivers/regulator/mc13xxx-regulator-core.c
drivers/regulator/mc13xxx.h
drivers/regulator/of_regulator.c [new file with mode: 0644]
drivers/regulator/pcap-regulator.c
drivers/regulator/pcf50633-regulator.c
drivers/regulator/tps6105x-regulator.c
drivers/regulator/tps65023-regulator.c
drivers/regulator/tps6507x-regulator.c
drivers/regulator/tps6524x-regulator.c
drivers/regulator/tps6586x-regulator.c
drivers/regulator/tps65910-regulator.c
drivers/regulator/tps65912-regulator.c
drivers/regulator/twl-regulator.c
drivers/regulator/userspace-consumer.c
drivers/regulator/virtual.c
drivers/regulator/wm831x-dcdc.c
drivers/regulator/wm831x-isink.c
drivers/regulator/wm831x-ldo.c
drivers/regulator/wm8350-regulator.c
drivers/regulator/wm8400-regulator.c
drivers/regulator/wm8994-regulator.c
drivers/rtc/interface.c
drivers/rtc/rtc-88pm860x.c
drivers/rtc/rtc-ab8500.c
drivers/rtc/rtc-bfin.c
drivers/rtc/rtc-bq4802.c
drivers/rtc/rtc-cmos.c
drivers/rtc/rtc-dm355evm.c
drivers/rtc/rtc-ds1286.c
drivers/rtc/rtc-ds1511.c
drivers/rtc/rtc-ds1553.c
drivers/rtc/rtc-ds1742.c
drivers/rtc/rtc-jz4740.c
drivers/rtc/rtc-lpc32xx.c
drivers/rtc/rtc-m41t93.c
drivers/rtc/rtc-m41t94.c
drivers/rtc/rtc-m48t35.c
drivers/rtc/rtc-m48t59.c
drivers/rtc/rtc-m48t86.c
drivers/rtc/rtc-max6902.c
drivers/rtc/rtc-max8925.c
drivers/rtc/rtc-max8998.c
drivers/rtc/rtc-mc13xxx.c
drivers/rtc/rtc-mpc5121.c
drivers/rtc/rtc-mrst.c
drivers/rtc/rtc-mxc.c
drivers/rtc/rtc-pcf2123.c
drivers/rtc/rtc-pcf50633.c
drivers/rtc/rtc-pm8xxx.c
drivers/rtc/rtc-puv3.c
drivers/rtc/rtc-rs5c348.c
drivers/rtc/rtc-s3c.c
drivers/rtc/rtc-sa1100.c
drivers/rtc/rtc-spear.c
drivers/rtc/rtc-stk17ta8.c
drivers/rtc/rtc-stmp3xxx.c
drivers/rtc/rtc-twl.c
drivers/rtc/rtc-v3020.c
drivers/rtc/rtc-vr41xx.c
drivers/rtc/rtc-vt8500.c
drivers/rtc/rtc-wm831x.c
drivers/rtc/rtc-wm8350.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/bfa/bfa_defs.h
drivers/scsi/bfa/bfa_defs_svc.h
drivers/scsi/bfa/bfa_ioc.c
drivers/scsi/bfa/bfad_debugfs.c
drivers/scsi/device_handler/scsi_dh.c
drivers/scsi/device_handler/scsi_dh_emc.c
drivers/scsi/device_handler/scsi_dh_hp_sw.c
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/hpsa.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_bsg.h
drivers/scsi/lpfc/lpfc_compat.h
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_mbox.c
drivers/scsi/lpfc/lpfc_mem.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_scsi.h
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/lpfc/lpfc_vport.c
drivers/scsi/mac_scsi.c
drivers/scsi/mpt2sas/mpi/mpi2.h
drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
drivers/scsi/mpt2sas/mpi/mpi2_raid.h
drivers/scsi/mpt2sas/mpi/mpi2_tool.h
drivers/scsi/mpt2sas/mpt2sas_base.c
drivers/scsi/mpt2sas/mpt2sas_base.h
drivers/scsi/mpt2sas/mpt2sas_ctl.c
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/mpt2sas/mpt2sas_transport.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_dbg.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/qla4xxx/ql4_dbg.c
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_isr.c
drivers/scsi/qla4xxx/ql4_nx.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/scsi/scsi_error.c
drivers/scsi/scsi_pm.c
drivers/scsi/scsi_priv.h
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/staging/gma500/accel_2d.c
drivers/staging/gma500/cdv_intel_display.c
drivers/staging/gma500/framebuffer.c
drivers/staging/gma500/mdfld_intel_display.c
drivers/staging/gma500/mrst_crtc.c
drivers/staging/gma500/psb_drv.c
drivers/staging/gma500/psb_intel_display.c
drivers/usb/misc/usbled.c
drivers/video/backlight/88pm860x_bl.c
drivers/video/backlight/Kconfig
drivers/video/backlight/Makefile
drivers/video/backlight/adp5520_bl.c
drivers/video/backlight/adx_bl.c [deleted file]
drivers/video/backlight/backlight.c
drivers/video/backlight/da903x_bl.c
drivers/video/backlight/ep93xx_bl.c
drivers/video/backlight/generic_bl.c
drivers/video/backlight/jornada720_bl.c
drivers/video/backlight/jornada720_lcd.c
drivers/video/backlight/lcd.c
drivers/video/backlight/ld9040.c
drivers/video/backlight/max8925_bl.c
drivers/video/backlight/omap1_bl.c
drivers/video/backlight/pcf50633-backlight.c
drivers/video/backlight/platform_lcd.c
drivers/video/backlight/pwm_bl.c
drivers/video/backlight/wm831x_bl.c
drivers/video/xen-fbfront.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/ar7_wdt.c
drivers/watchdog/ath79_wdt.c
drivers/watchdog/bcm63xx_wdt.c
drivers/watchdog/cpu5wdt.c
drivers/watchdog/cpwd.c
drivers/watchdog/davinci_wdt.c
drivers/watchdog/dw_wdt.c
drivers/watchdog/eurotechwdt.c
drivers/watchdog/ibmasr.c
drivers/watchdog/indydog.c
drivers/watchdog/iop_wdt.c
drivers/watchdog/ixp2000_wdt.c
drivers/watchdog/ixp4xx_wdt.c
drivers/watchdog/jz4740_wdt.c
drivers/watchdog/ks8695_wdt.c
drivers/watchdog/lantiq_wdt.c
drivers/watchdog/max63xx_wdt.c
drivers/watchdog/mtx-1_wdt.c
drivers/watchdog/nuc900_wdt.c
drivers/watchdog/of_xilinx_wdt.c
drivers/watchdog/omap_wdt.c
drivers/watchdog/orion_wdt.c
drivers/watchdog/pnx4008_wdt.c
drivers/watchdog/rc32434_wdt.c
drivers/watchdog/rdc321x_wdt.c
drivers/watchdog/riowd.c
drivers/watchdog/s3c2410_wdt.c
drivers/watchdog/stmp3xxx_wdt.c
drivers/watchdog/ts72xx_wdt.c
drivers/watchdog/twl4030_wdt.c
drivers/watchdog/via_wdt.c [new file with mode: 0644]
drivers/watchdog/wm831x_wdt.c
drivers/watchdog/wm8350_wdt.c
drivers/xen/Kconfig
drivers/xen/Makefile
drivers/xen/events.c
drivers/xen/evtchn.c
drivers/xen/gntalloc.c
drivers/xen/gntdev.c
drivers/xen/grant-table.c
drivers/xen/privcmd.c [new file with mode: 0644]
drivers/xen/privcmd.h [new file with mode: 0644]
drivers/xen/swiotlb-xen.c
drivers/xen/xen-pciback/pci_stub.c
drivers/xen/xen-pciback/xenbus.c
drivers/xen/xenbus/Makefile
drivers/xen/xenbus/xenbus_client.c
drivers/xen/xenbus/xenbus_comms.h
drivers/xen/xenbus/xenbus_dev_backend.c [new file with mode: 0644]
drivers/xen/xenbus/xenbus_dev_frontend.c [new file with mode: 0644]
drivers/xen/xenbus/xenbus_probe.c
drivers/xen/xenbus/xenbus_probe.h
drivers/xen/xenbus/xenbus_probe_backend.c
drivers/xen/xenbus/xenbus_probe_frontend.c
drivers/xen/xenbus/xenbus_xs.c
drivers/xen/xenfs/Makefile
drivers/xen/xenfs/privcmd.c [deleted file]
drivers/xen/xenfs/super.c
drivers/xen/xenfs/xenbus.c [deleted file]
drivers/xen/xenfs/xenfs.h
fs/9p/cache.c
fs/9p/fid.c
fs/9p/v9fs.c
fs/9p/vfs_addr.c
fs/9p/vfs_dentry.c
fs/9p/vfs_dir.c
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/9p/vfs_super.c
fs/9p/xattr.c
fs/Kconfig.binfmt
fs/binfmt_elf.c
fs/btrfs/file.c
fs/dlm/config.c
fs/dlm/config.h
fs/dlm/debug_fs.c
fs/dlm/dir.c
fs/dlm/dlm_internal.h
fs/dlm/lock.c
fs/dlm/lockspace.c
fs/dlm/member.c
fs/dlm/member.h
fs/dlm/rcom.c
fs/dlm/rcom.h
fs/dlm/recover.c
fs/dlm/recoverd.c
fs/dlm/user.c
fs/exec.c
fs/ext4/balloc.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/mballoc.c
fs/ext4/resize.c
fs/ext4/super.c
fs/ext4/xattr_security.c
fs/fs-writeback.c
fs/gfs2/lock_dlm.c
fs/inode.c
fs/jbd2/commit.c
fs/jbd2/revoke.c
fs/jbd2/transaction.c
fs/jffs2/erase.c
fs/jffs2/fs.c
fs/jffs2/readinode.c
fs/jffs2/scan.c
fs/jffs2/super.c
fs/jffs2/wbuf.c
fs/jffs2/writev.c
fs/logfs/dev_mtd.c
fs/nfs/callback_proc.c
fs/nfs/client.c
fs/nfs/file.c
fs/nfs/idmap.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs4_fs.h
fs/nfs/nfs4filelayout.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4xdr.c
fs/nfs/objlayout/objio_osd.c
fs/nfs/objlayout/objlayout.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/super.c
fs/nfs/write.c
fs/nfsd/nfs4callback.c
fs/ocfs2/stack_user.c
fs/proc/base.c
fs/proc/inode.c
fs/proc/internal.h
fs/proc/root.c
fs/reiserfs/bitmap.c
fs/reiserfs/journal.c
fs/reiserfs/super.c
fs/romfs/mmap-nommu.c
fs/ubifs/lpt.c
fs/ubifs/tnc.c
fs/ubifs/xattr.c
include/asm-generic/io.h
include/asm-generic/iomap.h
include/asm-generic/page.h
include/asm-generic/pci_iomap.h [new file with mode: 0644]
include/asm-generic/uaccess.h
include/drm/Kbuild
include/drm/drm.h
include/drm/drmP.h
include/drm/drm_crtc.h
include/drm/drm_crtc_helper.h
include/drm/drm_fourcc.h [new file with mode: 0644]
include/drm/drm_mode.h
include/drm/drm_sman.h [deleted file]
include/drm/exynos_drm.h
include/drm/gma_drm.h [new file with mode: 0644]
include/drm/i915_drm.h
include/drm/radeon_drm.h
include/drm/sis_drm.h
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_page_alloc.h
include/drm/via_drm.h
include/linux/acpi.h
include/linux/ahci_platform.h
include/linux/amd-iommu.h
include/linux/compiler-gcc4.h
include/linux/compiler.h
include/linux/dlm.h
include/linux/elf-em.h
include/linux/gfp.h
include/linux/hid.h
include/linux/input/auo-pixcir-ts.h [new file with mode: 0644]
include/linux/input/gp2ap002a00f.h [new file with mode: 0644]
include/linux/input/gpio_tilt.h [new file with mode: 0644]
include/linux/input/pixcir_ts.h [new file with mode: 0644]
include/linux/input/samsung-keypad.h [new file with mode: 0644]
include/linux/input/tca8418_keypad.h [new file with mode: 0644]
include/linux/iommu.h
include/linux/jbd2.h
include/linux/kernel.h
include/linux/kvm_host.h
include/linux/kvm_para.h
include/linux/leds-tca6507.h [new file with mode: 0644]
include/linux/mempolicy.h
include/linux/mfd/mc13xxx.h
include/linux/mfd/tps65910.h
include/linux/mm.h
include/linux/mmzone.h
include/linux/msi.h
include/linux/mtd/cfi.h
include/linux/mtd/cfi_endian.h
include/linux/mtd/map.h
include/linux/mtd/mtd.h
include/linux/mtd/nand.h
include/linux/mtd/physmap.h
include/linux/nfs_fs_sb.h
include/linux/nfs_idmap.h
include/linux/nfs_xdr.h
include/linux/page-debug-flags.h
include/linux/pagevec.h
include/linux/pci-aspm.h
include/linux/pci.h
include/linux/pci_regs.h
include/linux/pid_namespace.h
include/linux/pinctrl/machine.h
include/linux/pinctrl/pinconf.h [new file with mode: 0644]
include/linux/pinctrl/pinctrl.h
include/linux/pinctrl/pinmux.h
include/linux/proc_fs.h
include/linux/regulator/consumer.h
include/linux/regulator/driver.h
include/linux/regulator/of_regulator.h [new file with mode: 0644]
include/linux/rmap.h
include/linux/sched.h
include/linux/signal.h
include/linux/sunrpc/auth.h
include/linux/sunrpc/auth_gss.h
include/linux/sunrpc/xdr.h
include/linux/swap.h
include/linux/swiotlb.h
include/linux/ucb1400.h
include/linux/watchdog.h
include/linux/workqueue.h
include/linux/writeback.h
include/mtd/mtd-abi.h
include/net/9p/9p.h
include/scsi/scsi_device.h
include/scsi/scsi_host.h
include/scsi/scsi_transport_iscsi.h
include/trace/events/ext4.h
include/trace/events/kmem.h
include/trace/events/oom.h [new file with mode: 0644]
include/trace/events/task.h [new file with mode: 0644]
include/trace/events/writeback.h
include/xen/events.h
include/xen/grant_table.h
include/xen/interface/grant_table.h
include/xen/interface/io/xs_wire.h
include/xen/interface/xen.h
include/xen/xenbus.h
include/xen/xenbus_dev.h [new file with mode: 0644]
init/do_mounts.c
ipc/mqueue.c
kernel/exit.c
kernel/fork.c
kernel/jump_label.c
kernel/power/snapshot.c
kernel/signal.c
kernel/workqueue.c
lib/Kconfig
lib/Makefile
lib/btree.c
lib/crc32.c
lib/iomap.c
lib/pci_iomap.c [new file with mode: 0644]
lib/swiotlb.c
mm/Kconfig.debug
mm/bootmem.c
mm/compaction.c
mm/fadvise.c
mm/filemap.c
mm/hugetlb.c
mm/mempolicy.c
mm/mempool.c
mm/migrate.c
mm/mmap.c
mm/mremap.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/rmap.c
mm/slub.c
mm/swap.c
mm/swapfile.c
mm/vmalloc.c
mm/vmscan.c
net/9p/client.c
net/9p/error.c
net/9p/mod.c
net/9p/protocol.c
net/9p/trans_fd.c
net/9p/trans_rdma.c
net/9p/trans_virtio.c
net/9p/util.c
net/sunrpc/auth_generic.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/xdr.c
scripts/checkpatch.pl
scripts/get_maintainer.pl
sound/soc/codecs/sgtl5000.c
tools/perf/Documentation/examples.txt
virt/kvm/coalesced_mmio.c
virt/kvm/ioapic.c
virt/kvm/iommu.c
virt/kvm/kvm_main.c

diff --git a/Documentation/ABI/stable/sysfs-bus-xen-backend b/Documentation/ABI/stable/sysfs-bus-xen-backend
new file mode 100644 (file)
index 0000000..3d5951c
--- /dev/null
@@ -0,0 +1,75 @@
+What:          /sys/bus/xen-backend/devices/*/devtype
+Date:          Feb 2009
+KernelVersion: 2.6.38
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+                The type of the device.  e.g., one of: 'vbd' (block),
+                'vif' (network), or 'vfb' (framebuffer).
+
+What:          /sys/bus/xen-backend/devices/*/nodename
+Date:          Feb 2009
+KernelVersion: 2.6.38
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+                XenStore node (under /local/domain/NNN/) for this
+                backend device.
+
+What:          /sys/bus/xen-backend/devices/vbd-*/physical_device
+Date:          April 2011
+KernelVersion: 3.0
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+                The major:minor number (in hexidecimal) of the
+                physical device providing the storage for this backend
+                block device.
+
+What:          /sys/bus/xen-backend/devices/vbd-*/mode
+Date:          April 2011
+KernelVersion: 3.0
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+                Whether the block device is read-only ('r') or
+                read-write ('w').
+
+What:          /sys/bus/xen-backend/devices/vbd-*/statistics/f_req
+Date:          April 2011
+KernelVersion: 3.0
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+                Number of flush requests from the frontend.
+
+What:          /sys/bus/xen-backend/devices/vbd-*/statistics/oo_req
+Date:          April 2011
+KernelVersion: 3.0
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+                Number of requests delayed because the backend was too
+                busy processing previous requests.
+
+What:          /sys/bus/xen-backend/devices/vbd-*/statistics/rd_req
+Date:          April 2011
+KernelVersion: 3.0
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+                Number of read requests from the frontend.
+
+What:          /sys/bus/xen-backend/devices/vbd-*/statistics/rd_sect
+Date:          April 2011
+KernelVersion: 3.0
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+                Number of sectors read by the frontend.
+
+What:          /sys/bus/xen-backend/devices/vbd-*/statistics/wr_req
+Date:          April 2011
+KernelVersion: 3.0
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+                Number of write requests from the frontend.
+
+What:          /sys/bus/xen-backend/devices/vbd-*/statistics/wr_sect
+Date:          April 2011
+KernelVersion: 3.0
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+                Number of sectors written by the frontend.
diff --git a/Documentation/ABI/stable/sysfs-devices-system-xen_memory b/Documentation/ABI/stable/sysfs-devices-system-xen_memory
new file mode 100644 (file)
index 0000000..caa311d
--- /dev/null
@@ -0,0 +1,77 @@
+What:          /sys/devices/system/xen_memory/xen_memory0/max_retry_count
+Date:          May 2011
+KernelVersion: 2.6.39
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+               The maximum number of times the balloon driver will
+               attempt to increase the balloon before giving up.  See
+               also 'retry_count' below.
+               A value of zero means retry forever and is the default one.
+
+What:          /sys/devices/system/xen_memory/xen_memory0/max_schedule_delay
+Date:          May 2011
+KernelVersion: 2.6.39
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+               The limit that 'schedule_delay' (see below) will be
+               increased to. The default value is 32 seconds.
+
+What:          /sys/devices/system/xen_memory/xen_memory0/retry_count
+Date:          May 2011
+KernelVersion: 2.6.39
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+               The current number of times that the balloon driver
+               has attempted to increase the size of the balloon.
+               The default value is one. With max_retry_count being
+               zero (unlimited), this means that the driver will attempt
+               to retry with a 'schedule_delay' delay.
+
+What:          /sys/devices/system/xen_memory/xen_memory0/schedule_delay
+Date:          May 2011
+KernelVersion: 2.6.39
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+               The time (in seconds) to wait between attempts to
+               increase the balloon.  Each time the balloon cannot be
+               increased, 'schedule_delay' is increased (until
+               'max_schedule_delay' is reached at which point it
+               will use the max value).
+
+What:          /sys/devices/system/xen_memory/xen_memory0/target
+Date:          April 2008
+KernelVersion: 2.6.26
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+               The target number of pages to adjust this domain's
+               memory reservation to.
+
+What:          /sys/devices/system/xen_memory/xen_memory0/target_kb
+Date:          April 2008
+KernelVersion: 2.6.26
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+               As target above, except the value is in KiB.
+
+What:          /sys/devices/system/xen_memory/xen_memory0/info/current_kb
+Date:          April 2008
+KernelVersion: 2.6.26
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+               Current size (in KiB) of this domain's memory
+               reservation.
+
+What:          /sys/devices/system/xen_memory/xen_memory0/info/high_kb
+Date:          April 2008
+KernelVersion: 2.6.26
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+               Amount (in KiB) of high memory in the balloon.
+
+What:          /sys/devices/system/xen_memory/xen_memory0/info/low_kb
+Date:          April 2008
+KernelVersion: 2.6.26
+Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+               Amount (in KiB) of low (or normal) memory in the
+               balloon.
index 349ecf26ce108440a5bd0c9fe9414cfa984aed9d..34f51100f0299cb5a96a77984def99dbfdd9367d 100644 (file)
@@ -66,6 +66,24 @@ Description:
                re-discover previously removed devices.
                Depends on CONFIG_HOTPLUG.
 
+What:          /sys/bus/pci/devices/.../msi_irqs/
+Date:          September, 2011
+Contact:       Neil Horman <nhorman@tuxdriver.com>
+Description:
+               The /sys/devices/.../msi_irqs directory contains a variable set
+               of sub-directories, with each sub-directory being named after a
+               corresponding msi irq vector allocated to that device.  Each
+               numbered sub-directory N contains attributes of that irq.
+               Note that this directory is not created for device drivers which
+               do not support msi irqs
+
+What:          /sys/bus/pci/devices/.../msi_irqs/<N>/mode
+Date:          September 2011
+Contact:       Neil Horman <nhorman@tuxdriver.com>
+Description:
+               This attribute indicates the mode that the irq vector named by
+               the parent directory is in (msi vs. msix)
+
 What:          /sys/bus/pci/devices/.../remove
 Date:          January 2009
 Contact:       Linux PCI developers <linux-pci@vger.kernel.org>
diff --git a/Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration b/Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration
new file mode 100644 (file)
index 0000000..4cf1e72
--- /dev/null
@@ -0,0 +1,12 @@
+What:           Attribute for calibrating ST-Ericsson AB8500 Real Time Clock
+Date:           Oct 2011
+KernelVersion:  3.0
+Contact:        Mark Godfrey <mark.godfrey@stericsson.com>
+Description:    The rtc_calibration attribute allows the userspace to
+                calibrate the AB8500.s 32KHz Real Time Clock.
+                Every 60 seconds the AB8500 will correct the RTC's value
+                by adding to it the value of this attribute.
+                The range of the attribute is -127 to +127 in units of
+                30.5 micro-seconds (half-parts-per-million of the 32KHz clock)
+Users:          The /vendor/st-ericsson/base_utilities/core/rtc_calibration
+                daemon uses this interface.
diff --git a/Documentation/ABI/testing/sysfs-devices-platform-docg3 b/Documentation/ABI/testing/sysfs-devices-platform-docg3
new file mode 100644 (file)
index 0000000..8aa3671
--- /dev/null
@@ -0,0 +1,34 @@
+What:          /sys/devices/platform/docg3/f[0-3]_dps[01]_is_keylocked
+Date:          November 2011
+KernelVersion: 3.3
+Contact:       Robert Jarzmik <robert.jarzmik@free.fr>
+Description:
+               Show whether the floor (0 to 4), protection area (0 or 1) is
+               keylocked. Each docg3 chip (or floor) has 2 protection areas,
+               which can cover any part of it, block aligned, called DPS.
+               The protection has information embedded whether it blocks reads,
+               writes or both.
+               The result is:
+               0 -> the DPS is not keylocked
+               1 -> the DPS is keylocked
+Users:         None identified so far.
+
+What:          /sys/devices/platform/docg3/f[0-3]_dps[01]_protection_key
+Date:          November 2011
+KernelVersion: 3.3
+Contact:       Robert Jarzmik <robert.jarzmik@free.fr>
+Description:
+               Enter the protection key for the floor (0 to 4), protection area
+               (0 or 1). Each docg3 chip (or floor) has 2 protection areas,
+               which can cover any part of it, block aligned, called DPS.
+               The protection has information embedded whether it blocks reads,
+               writes or both.
+               The protection key is a string of 8 bytes (value 0-255).
+               Entering the correct value toggle the lock, and can be observed
+               through f[0-3]_dps[01]_is_keylocked.
+               Possible values are:
+                       - 8 bytes
+               Typical values are:
+                       - "00000000"
+                       - "12345678"
+Users:         None identified so far.
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-multitouch b/Documentation/ABI/testing/sysfs-driver-hid-multitouch
new file mode 100644 (file)
index 0000000..f79839d
--- /dev/null
@@ -0,0 +1,9 @@
+What:          /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/quirks
+Date:          November 2011
+Contact:       Benjamin Tissoires <benjamin.tissoires@gmail.com>
+Description:   The integer value of this attribute corresponds to the
+               quirks actually in place to handle the device's protocol.
+               When read, this attribute returns the current settings (see
+               MT_QUIRKS_* in hid-multitouch.c).
+               When written this attribute change on the fly the quirks, then
+               the protocol to handle the device.
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-isku b/Documentation/ABI/testing/sysfs-driver-hid-roccat-isku
new file mode 100644 (file)
index 0000000..189dc43
--- /dev/null
@@ -0,0 +1,135 @@
+What:          /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/actual_profile
+Date:          June 2011
+Contact:       Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:   The integer value of this attribute ranges from 0-4.
+               When read, this attribute returns the number of the actual
+               profile. This value is persistent, so its equivalent to the
+               profile that's active when the device is powered on next time.
+               When written, this file sets the number of the startup profile
+               and the device activates this profile immediately.
+Users:         http://roccat.sourceforge.net
+
+What:          /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/info
+Date:          June 2011
+Contact:       Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:   When read, this file returns general data like firmware version.
+               The data is 6 bytes long.
+               This file is readonly.
+Users:         http://roccat.sourceforge.net
+
+What:          /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/key_mask
+Date:          June 2011
+Contact:       Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:   When written, this file lets one deactivate certain keys like
+               windows and application keys, to prevent accidental presses.
+               Profile number for which this settings occur is included in
+               written data. The data has to be 6 bytes long.
+               Before reading this file, control has to be written to select
+               which profile to read.
+Users:         http://roccat.sourceforge.net
+
+What:          /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/keys_capslock
+Date:          June 2011
+Contact:       Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:   When written, this file lets one set the function of the
+               capslock key for a specific profile. Profile number is included
+               in written data. The data has to be 6 bytes long.
+               Before reading this file, control has to be written to select
+               which profile to read.
+Users:         http://roccat.sourceforge.net
+
+What:          /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/keys_easyzone
+Date:          June 2011
+Contact:       Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:   When written, this file lets one set the function of the
+               easyzone keys for a specific profile. Profile number is included
+               in written data. The data has to be 65 bytes long.
+               Before reading this file, control has to be written to select
+               which profile to read.
+Users:         http://roccat.sourceforge.net
+
+What:          /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/keys_function
+Date:          June 2011
+Contact:       Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:   When written, this file lets one set the function of the
+               function keys for a specific profile. Profile number is included
+               in written data. The data has to be 41 bytes long.
+               Before reading this file, control has to be written to select
+               which profile to read.
+Users:         http://roccat.sourceforge.net
+
+What:          /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/keys_macro
+Date:          June 2011
+Contact:       Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:   When written, this file lets one set the function of the macro
+               keys for a specific profile. Profile number is included in
+               written data. The data has to be 35 bytes long.
+               Before reading this file, control has to be written to select
+               which profile to read.
+Users:         http://roccat.sourceforge.net
+
+What:          /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/keys_media
+Date:          June 2011
+Contact:       Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:   When written, this file lets one set the function of the media
+               keys for a specific profile. Profile number is included in
+               written data. The data has to be 29 bytes long.
+               Before reading this file, control has to be written to select
+               which profile to read.
+Users:         http://roccat.sourceforge.net
+
+What:          /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/keys_thumbster
+Date:          June 2011
+Contact:       Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:   When written, this file lets one set the function of the
+               thumbster keys for a specific profile. Profile number is included
+               in written data. The data has to be 23 bytes long.
+               Before reading this file, control has to be written to select
+               which profile to read.
+Users:         http://roccat.sourceforge.net
+
+What:          /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/last_set
+Date:          June 2011
+Contact:       Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:   When written, this file lets one set the time in secs since
+               epoch in which the last configuration took place.
+               The data has to be 20 bytes long.
+Users:         http://roccat.sourceforge.net
+
+What:          /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/light
+Date:          June 2011
+Contact:       Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:   When written, this file lets one set the backlight intensity for
+               a specific profile. Profile number is included in written data.
+               The data has to be 10 bytes long.
+               Before reading this file, control has to be written to select
+               which profile to read.
+Users:         http://roccat.sourceforge.net
+
+What:          /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/macro
+Date:          June 2011
+Contact:       Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:   When written, this file lets one store macros with max 500
+               keystrokes for a specific button for a specific profile.
+               Button and profile numbers are included in written data.
+               The data has to be 2083 bytes long.
+               Before reading this file, control has to be written to select
+               which profile and key to read.
+Users:         http://roccat.sourceforge.net
+
+What:          /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/control
+Date:          June 2011
+Contact:       Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:   When written, this file lets one select which data from which
+               profile will be read next. The data has to be 3 bytes long.
+               This file is writeonly.
+Users:         http://roccat.sourceforge.net
+
+What:          /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/talk
+Date:          June 2011
+Contact:       Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:   When written, this file lets one trigger easyshift functionality
+               from the host.
+               The data has to be 16 bytes long.
+               This file is writeonly.
+Users:         http://roccat.sourceforge.net
index 5d5a16ea57c612fb87075658e618e27715149926..3d98009f447a67447956e36e585965403a33610c 100644 (file)
@@ -8,3 +8,15 @@ Contact:       David Herrmann <dh.herrmann@googlemail.com>
 Description:   Make it possible to set/get current led state. Reading from it
                returns 0 if led is off and 1 if it is on. Writing 0 to it
                disables the led, writing 1 enables it.
+
+What:          /sys/bus/hid/drivers/wiimote/<dev>/extension
+Date:          August 2011
+KernelVersion: 3.2
+Contact:       David Herrmann <dh.herrmann@googlemail.com>
+Description:   This file contains the currently connected and initialized
+               extensions. It can be one of: none, motionp, nunchuck, classic,
+               motionp+nunchuck, motionp+classic
+               motionp is the official Nintendo Motion+ extension, nunchuck is
+               the official Nintendo Nunchuck extension and classic is the
+               Nintendo Classic Controller extension. The motionp extension can
+               be combined with the other two.
index 82d4df136444fb4e8d1d273fe08cb12fb2d2c18b..0130d6683c14eecf01c3a9b6226aec0549487a81 100644 (file)
@@ -15,9 +15,9 @@ Contact:      linux-input@vger.kernel.org
 Description:
                Attribute group for control of the status LEDs and the OLEDs.
                This attribute group is only available for Intuos 4 M, L,
-               and XL (with LEDs and OLEDs) and Cintiq 21UX2 (LEDs only).
-               Therefore its presence implicitly signifies the presence of
-               said LEDs and OLEDs on the tablet device.
+               and XL (with LEDs and OLEDs) and Cintiq 21UX2 and Cintiq 24HD
+               (LEDs only). Therefore its presence implicitly signifies the
+               presence of said LEDs and OLEDs on the tablet device.
 
 What:          /sys/bus/usb/devices/<busnum>-<devnum>:<cfg>.<intf>/wacom_led/status0_luminance
 Date:          August 2011
@@ -41,16 +41,17 @@ Date:               August 2011
 Contact:       linux-input@vger.kernel.org
 Description:
                Writing to this file sets which one of the four (for Intuos 4)
-               or of the right four (for Cintiq 21UX2) status LEDs is active (0..3).
-               The other three LEDs on the same side are always inactive.
+               or of the right four (for Cintiq 21UX2 and Cintiq 24HD) status
+               LEDs is active (0..3). The other three LEDs on the same side are
+               always inactive.
 
 What:          /sys/bus/usb/devices/<busnum>-<devnum>:<cfg>.<intf>/wacom_led/status_led1_select
 Date:          September 2011
 Contact:       linux-input@vger.kernel.org
 Description:
-               Writing to this file sets which one of the left four (for Cintiq 21UX2)
-               status LEDs is active (0..3). The other three LEDs on the left are always
-               inactive.
+               Writing to this file sets which one of the left four (for Cintiq 21UX2
+               and Cintiq 24HD) status LEDs is active (0..3). The other three LEDs on
+               the left are always inactive.
 
 What:          /sys/bus/usb/devices/<busnum>-<devnum>:<cfg>.<intf>/wacom_led/buttons_luminance
 Date:          August 2011
diff --git a/Documentation/devicetree/bindings/c6x/clocks.txt b/Documentation/devicetree/bindings/c6x/clocks.txt
new file mode 100644 (file)
index 0000000..a04f5fd
--- /dev/null
@@ -0,0 +1,40 @@
+C6X PLL Clock Controllers
+-------------------------
+
+This is a first-cut support for the SoC clock controllers. This is still
+under development and will probably change as the common device tree
+clock support is added to the kernel.
+
+Required properties:
+
+- compatible: "ti,c64x+pll"
+    May also have SoC-specific value to support SoC-specific initialization
+    in the driver. One of:
+        "ti,c6455-pll"
+        "ti,c6457-pll"
+        "ti,c6472-pll"
+        "ti,c6474-pll"
+
+- reg: base address and size of register area
+- clock-frequency: input clock frequency in hz
+
+
+Optional properties:
+
+- ti,c64x+pll-bypass-delay: CPU cycles to delay when entering bypass mode
+
+- ti,c64x+pll-reset-delay:  CPU cycles to delay after PLL reset
+
+- ti,c64x+pll-lock-delay:   CPU cycles to delay after PLL frequency change
+
+Example:
+
+       clock-controller@29a0000 {
+               compatible = "ti,c6472-pll", "ti,c64x+pll";
+               reg = <0x029a0000 0x200>;
+               clock-frequency = <25000000>;
+
+               ti,c64x+pll-bypass-delay = <200>;
+               ti,c64x+pll-reset-delay = <12000>;
+               ti,c64x+pll-lock-delay = <80000>;
+       };
diff --git a/Documentation/devicetree/bindings/c6x/dscr.txt b/Documentation/devicetree/bindings/c6x/dscr.txt
new file mode 100644 (file)
index 0000000..d847758
--- /dev/null
@@ -0,0 +1,127 @@
+Device State Configuration Registers
+------------------------------------
+
+TI C6X SoCs contain a region of miscellaneous registers which provide various
+function for SoC control or status. Details vary considerably among from SoC
+to SoC with no two being alike.
+
+In general, the Device State Configuraion Registers (DSCR) will provide one or
+more configuration registers often protected by a lock register where one or
+more key values must be written to a lock register in order to unlock the
+configuration register for writes. These configuration register may be used to
+enable (and disable in some cases) SoC pin drivers, select peripheral clock
+sources (internal or pin), etc. In some cases, a configuration register is
+write once or the individual bits are write once. In addition to device config,
+the DSCR block may provide registers which which are used to reset peripherals,
+provide device ID information, provide ethernet MAC addresses, as well as other
+miscellaneous functions.
+
+For device state control (enable/disable), each device control is assigned an
+id which is used by individual device drivers to control the state as needed.
+
+Required properties:
+
+- compatible: must be "ti,c64x+dscr"
+- reg: register area base and size
+
+Optional properties:
+
+  NOTE: These are optional in that not all SoCs will have all properties. For
+        SoCs which do support a given property, leaving the property out of the
+        device tree will result in reduced functionality or possibly driver
+        failure.
+
+- ti,dscr-devstat
+    offset of the devstat register
+
+- ti,dscr-silicon-rev
+    offset, start bit, and bitsize of silicon revision field
+
+- ti,dscr-rmii-resets
+    offset and bitmask of RMII reset field. May have multiple tuples if more
+    than one ethernet port is available.
+
+- ti,dscr-locked-regs
+    possibly multiple tuples describing registers which are write protected by
+    a lock register. Each tuple consists of the register offset, lock register
+    offsset, and the key value used to unlock the register.
+
+- ti,dscr-kick-regs
+    offset and key values of two "kick" registers used to write protect other
+    registers in DSCR. On SoCs using kick registers, the first key must be
+    written to the first kick register and the second key must be written to
+    the second register before other registers in the area are write-enabled.
+
+- ti,dscr-mac-fuse-regs
+    MAC addresses are contained in two registers. Each element of a MAC address
+    is contained in a single byte. This property has two tuples. Each tuple has
+    a register offset and four cells representing bytes in the register from
+    most significant to least. The value of these four cells is the MAC byte
+    index (1-6) of the byte within the register. A value of 0 means the byte
+    is unused in the MAC address.
+
+- ti,dscr-devstate-ctl-regs
+    This property describes the bitfields used to control the state of devices.
+    Each tuple describes a range of identical bitfields used to control one or
+    more devices (one bitfield per device). The layout of each tuple is:
+
+        start_id num_ids reg enable disable start_bit nbits
+
+    Where:
+        start_id is device id for the first device control in the range
+        num_ids is the number of device controls in the range
+        reg is the offset of the register holding the control bits
+        enable is the value to enable a device
+        disable is the value to disable a device (0xffffffff if cannot disable)
+        start_bit is the bit number of the first bit in the range
+        nbits is the number of bits per device control
+
+- ti,dscr-devstate-stat-regs
+    This property describes the bitfields used to provide device state status
+    for device states controlled by the DSCR. Each tuple describes a range of
+    identical bitfields used to provide status for one or more devices (one
+    bitfield per device). The layout of each tuple is:
+
+        start_id num_ids reg enable disable start_bit nbits
+
+    Where:
+        start_id is device id for the first device status in the range
+        num_ids is the number of devices covered by the range
+        reg is the offset of the register holding the status bits
+        enable is the value indicating device is enabled
+        disable is the value indicating device is disabled
+        start_bit is the bit number of the first bit in the range
+        nbits is the number of bits per device status
+
+- ti,dscr-privperm
+    Offset and default value for register used to set access privilege for
+    some SoC devices.
+
+
+Example:
+
+       device-state-config-regs@2a80000 {
+               compatible = "ti,c64x+dscr";
+               reg = <0x02a80000 0x41000>;
+
+               ti,dscr-devstat = <0>;
+               ti,dscr-silicon-rev = <8 28 0xf>;
+               ti,dscr-rmii-resets = <0x40020 0x00040000>;
+
+               ti,dscr-locked-regs = <0x40008 0x40004 0x0f0a0b00>;
+               ti,dscr-devstate-ctl-regs =
+                        <0 12 0x40008 1 0  0  2
+                         12 1 0x40008 3 0 30  2
+                         13 2 0x4002c 1 0xffffffff 0 1>;
+               ti,dscr-devstate-stat-regs =
+                       <0 10 0x40014 1 0  0  3
+                        10 2 0x40018 1 0  0  3>;
+
+               ti,dscr-mac-fuse-regs = <0x700 1 2 3 4
+                                        0x704 5 6 0 0>;
+
+               ti,dscr-privperm = <0x41c 0xaaaaaaaa>;
+
+               ti,dscr-kick-regs = <0x38 0x83E70B13
+                                    0x3c 0x95A4F1E0>;
+       };
diff --git a/Documentation/devicetree/bindings/c6x/emifa.txt b/Documentation/devicetree/bindings/c6x/emifa.txt
new file mode 100644 (file)
index 0000000..0ff6e9b
--- /dev/null
@@ -0,0 +1,62 @@
+External Memory Interface
+-------------------------
+
+The emifa node describes a simple external bus controller found on some C6X
+SoCs. This interface provides external busses with a number of chip selects.
+
+Required properties:
+
+- compatible: must be "ti,c64x+emifa", "simple-bus"
+- reg: register area base and size
+- #address-cells: must be 2 (chip-select + offset)
+- #size-cells: must be 1
+- ranges: mapping from EMIFA space to parent space
+
+
+Optional properties:
+
+- ti,dscr-dev-enable: Device ID if EMIF is enabled/disabled from DSCR
+
+- ti,emifa-burst-priority:
+      Number of memory transfers after which the EMIF will elevate the priority
+      of the oldest command in the command FIFO. Setting this field to 255
+      disables this feature, thereby allowing old commands to stay in the FIFO
+      indefinitely.
+
+- ti,emifa-ce-config:
+      Configuration values for each of the supported chip selects.
+
+Example:
+
+       emifa@70000000 {
+               compatible = "ti,c64x+emifa", "simple-bus";
+               #address-cells = <2>;
+               #size-cells = <1>;
+               reg = <0x70000000 0x100>;
+               ranges = <0x2 0x0 0xa0000000 0x00000008
+                         0x3 0x0 0xb0000000 0x00400000
+                         0x4 0x0 0xc0000000 0x10000000
+                         0x5 0x0 0xD0000000 0x10000000>;
+
+               ti,dscr-dev-enable = <13>;
+               ti,emifa-burst-priority = <255>;
+               ti,emifa-ce-config = <0x00240120
+                                     0x00240120
+                                     0x00240122
+                                     0x00240122>;
+
+               flash@3,0 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       compatible = "cfi-flash";
+                       reg = <0x3 0x0 0x400000>;
+                       bank-width = <1>;
+                       device-width = <1>;
+                       partition@0 {
+                               reg = <0x0 0x400000>;
+                               label = "NOR";
+                       };
+               };
+       };
+
+This shows a flash chip attached to chip select 3.
diff --git a/Documentation/devicetree/bindings/c6x/interrupt.txt b/Documentation/devicetree/bindings/c6x/interrupt.txt
new file mode 100644 (file)
index 0000000..42bb796
--- /dev/null
@@ -0,0 +1,104 @@
+C6X Interrupt Chips
+-------------------
+
+* C64X+ Core Interrupt Controller
+
+  The core interrupt controller provides 16 prioritized interrupts to the
+  C64X+ core. Priority 0 and 1 are used for reset and NMI respectively.
+  Priority 2 and 3 are reserved. Priority 4-15 are used for interrupt
+  sources coming from outside the core.
+
+  Required properties:
+  --------------------
+  - compatible: Should be "ti,c64x+core-pic";
+  - #interrupt-cells: <1>
+
+  Interrupt Specifier Definition
+  ------------------------------
+  Single cell specifying the core interrupt priority level (4-15) where
+  4 is highest priority and 15 is lowest priority.
+
+  Example
+  -------
+  core_pic: interrupt-controller@0 {
+       interrupt-controller;
+       #interrupt-cells = <1>;
+       compatible = "ti,c64x+core-pic";
+  };
+
+
+
+* C64x+ Megamodule Interrupt Controller
+
+  The megamodule PIC consists of four interrupt mupliplexers each of which
+  combine up to 32 interrupt inputs into a single interrupt output which
+  may be cascaded into the core interrupt controller. The megamodule PIC
+  has a total of 12 outputs cascading into the core interrupt controller.
+  One for each core interrupt priority level. In addition to the combined
+  interrupt sources, individual megamodule interrupts may be cascaded to
+  the core interrupt controller. When an individual interrupt is cascaded,
+  it is no longer handled through a megamodule interrupt combiner and is
+  considered to have the core interrupt controller as the parent.
+
+  Required properties:
+  --------------------
+  - compatible: "ti,c64x+megamod-pic"
+  - interrupt-controller
+  - #interrupt-cells: <1>
+  - reg: base address and size of register area
+  - interrupt-parent: must be core interrupt controller
+  - interrupts: This should have four cells; one for each interrupt combiner.
+                The cells contain the core priority interrupt to which the
+                corresponding combiner output is wired.
+
+  Optional properties:
+  --------------------
+  - ti,c64x+megamod-pic-mux: Array of 12 cells correspnding to the 12 core
+                             priority interrupts. The first cell corresponds to
+                             core priority 4 and the last cell corresponds to
+                             core priority 15. The value of each cell is the
+                             megamodule interrupt source which is MUXed to
+                             the core interrupt corresponding to the cell
+                             position. Allowed values are 4 - 127. Mapping for
+                             interrupts 0 - 3 (combined interrupt sources) are
+                             ignored.
+
+  Interrupt Specifier Definition
+  ------------------------------
+  Single cell specifying the megamodule interrupt source (4-127). Note that
+  interrupts mapped directly to the core with "ti,c64x+megamod-pic-mux" will
+  use the core interrupt controller as their parent and the specifier will
+  be the core priority level, not the megamodule interrupt number.
+
+  Examples
+  --------
+  megamod_pic: interrupt-controller@1800000 {
+       compatible = "ti,c64x+megamod-pic";
+       interrupt-controller;
+       #interrupt-cells = <1>;
+       reg = <0x1800000 0x1000>;
+       interrupt-parent = <&core_pic>;
+       interrupts = < 12 13 14 15 >;
+  };
+
+  This is a minimal example where all individual interrupts go through a
+  combiner. Combiner-0 is mapped to core interrupt 12, combiner-1 is mapped
+  to interrupt 13, etc.
+
+
+  megamod_pic: interrupt-controller@1800000 {
+       compatible = "ti,c64x+megamod-pic";
+       interrupt-controller;
+       #interrupt-cells = <1>;
+       reg = <0x1800000 0x1000>;
+       interrupt-parent = <&core_pic>;
+       interrupts = < 12 13 14 15 >;
+       ti,c64x+megamod-pic-mux = <  0  0  0  0
+                                    32  0  0  0
+                                     0  0  0  0 >;
+  };
+
+  This the same as the first example except that megamodule interrupt 32 is
+  mapped directly to core priority interrupt 8. The node using this interrupt
+  must set the core controller as its interrupt parent and use 8 in the
+  interrupt specifier value.
diff --git a/Documentation/devicetree/bindings/c6x/soc.txt b/Documentation/devicetree/bindings/c6x/soc.txt
new file mode 100644 (file)
index 0000000..b1e4973
--- /dev/null
@@ -0,0 +1,28 @@
+C6X System-on-Chip
+------------------
+
+Required properties:
+
+- compatible: "simple-bus"
+- #address-cells: must be 1
+- #size-cells: must be 1
+- ranges
+
+Optional properties:
+
+- model: specific SoC model
+
+- nodes for IP blocks within SoC
+
+
+Example:
+
+       soc {
+               compatible = "simple-bus";
+               model = "tms320c6455";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               ...
+       };
diff --git a/Documentation/devicetree/bindings/c6x/timer64.txt b/Documentation/devicetree/bindings/c6x/timer64.txt
new file mode 100644 (file)
index 0000000..95911fe
--- /dev/null
@@ -0,0 +1,26 @@
+Timer64
+-------
+
+The timer64 node describes C6X event timers.
+
+Required properties:
+
+- compatible: must be "ti,c64x+timer64"
+- reg: base address and size of register region
+- interrupt-parent: interrupt controller
+- interrupts: interrupt id
+
+Optional properties:
+
+- ti,dscr-dev-enable: Device ID used to enable timer IP through DSCR interface.
+
+- ti,core-mask: on multi-core SoCs, bitmask of cores allowed to use this timer.
+
+Example:
+       timer0: timer@25e0000 {
+               compatible = "ti,c64x+timer64";
+               ti,core-mask = < 0x01 >;
+               reg = <0x25e0000 0x40>;
+               interrupt-parent = <&megamod_pic>;
+               interrupts = < 16 >;
+       };
diff --git a/Documentation/devicetree/bindings/input/tegra-kbc.txt b/Documentation/devicetree/bindings/input/tegra-kbc.txt
new file mode 100644 (file)
index 0000000..5ecfa99
--- /dev/null
@@ -0,0 +1,18 @@
+* Tegra keyboard controller
+
+Required properties:
+- compatible: "nvidia,tegra20-kbc"
+
+Optional properties:
+- debounce-delay: delay in milliseconds per row scan for debouncing
+- repeat-delay: delay in milliseconds before repeat starts
+- ghost-filter: enable ghost filtering for this device
+- wakeup-source: configure keyboard as a wakeup source for suspend/resume
+
+Example:
+
+keyboard: keyboard {
+       compatible = "nvidia,tegra20-kbc";
+       reg = <0x7000e200 0x100>;
+       ghost-filter;
+};
diff --git a/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt b/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
new file mode 100644 (file)
index 0000000..719f4dc
--- /dev/null
@@ -0,0 +1,44 @@
+GPIO assisted NAND flash
+
+The GPIO assisted NAND flash uses a memory mapped interface to
+read/write the NAND commands and data and GPIO pins for the control
+signals.
+
+Required properties:
+- compatible : "gpio-control-nand"
+- reg : should specify localbus chip select and size used for the chip.  The
+  resource describes the data bus connected to the NAND flash and all accesses
+  are made in native endianness.
+- #address-cells, #size-cells : Must be present if the device has sub-nodes
+  representing partitions.
+- gpios : specifies the gpio pins to control the NAND device.  nwp is an
+  optional gpio and may be set to 0 if not present.
+
+Optional properties:
+- bank-width : Width (in bytes) of the device.  If not present, the width
+  defaults to 1 byte.
+- chip-delay : chip dependent delay for transferring data from array to
+  read registers (tR).  If not present then a default of 20us is used.
+- gpio-control-nand,io-sync-reg : A 64-bit physical address for a read
+  location used to guard against bus reordering with regards to accesses to
+  the GPIO's and the NAND flash data bus.  If present, then after changing
+  GPIO state and before and after command byte writes, this register will be
+  read to ensure that the GPIO accesses have completed.
+
+Examples:
+
+gpio-nand@1,0 {
+       compatible = "gpio-control-nand";
+       reg = <1 0x0000 0x2>;
+       #address-cells = <1>;
+       #size-cells = <1>;
+       gpios = <&banka 1 0     /* rdy */
+                &banka 2 0     /* nce */
+                &banka 3 0     /* ale */
+                &banka 4 0     /* cle */
+                0              /* nwp */>;
+
+       partition@0 {
+       ...
+       };
+};
diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.txt b/Documentation/devicetree/bindings/regulator/fixed-regulator.txt
new file mode 100644 (file)
index 0000000..9cf57fd
--- /dev/null
@@ -0,0 +1,29 @@
+Fixed Voltage regulators
+
+Required properties:
+- compatible: Must be "regulator-fixed";
+
+Optional properties:
+- gpio: gpio to use for enable control
+- startup-delay-us: startup time in microseconds
+- enable-active-high: Polarity of GPIO is Active high
+If this property is missing, the default assumed is Active low.
+
+Any property defined as part of the core regulator
+binding, defined in regulator.txt, can also be used.
+However a fixed voltage regulator is expected to have the
+regulator-min-microvolt and regulator-max-microvolt
+to be the same.
+
+Example:
+
+       abc: fixedregulator@0 {
+               compatible = "regulator-fixed";
+               regulator-name = "fixed-supply";
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+               gpio = <&gpio1 16 0>;
+               startup-delay-us = <70000>;
+               enable-active-high;
+               regulator-boot-on
+       };
diff --git a/Documentation/devicetree/bindings/regulator/regulator.txt b/Documentation/devicetree/bindings/regulator/regulator.txt
new file mode 100644 (file)
index 0000000..5b7a408
--- /dev/null
@@ -0,0 +1,54 @@
+Voltage/Current Regulators
+
+Optional properties:
+- regulator-name: A string used as a descriptive name for regulator outputs
+- regulator-min-microvolt: smallest voltage consumers may set
+- regulator-max-microvolt: largest voltage consumers may set
+- regulator-microvolt-offset: Offset applied to voltages to compensate for voltage drops
+- regulator-min-microamp: smallest current consumers may set
+- regulator-max-microamp: largest current consumers may set
+- regulator-always-on: boolean, regulator should never be disabled
+- regulator-boot-on: bootloader/firmware enabled regulator
+- <name>-supply: phandle to the parent supply/regulator node
+
+Example:
+
+       xyzreg: regulator@0 {
+               regulator-min-microvolt = <1000000>;
+               regulator-max-microvolt = <2500000>;
+               regulator-always-on;
+               vin-supply = <&vin>;
+       };
+
+Regulator Consumers:
+Consumer nodes can reference one or more of its supplies/
+regulators using the below bindings.
+
+- <name>-supply: phandle to the regulator node
+
+These are the same bindings that a regulator in the above
+example used to reference its own supply, in which case
+its just seen as a special case of a regulator being a
+consumer itself.
+
+Example of a consumer device node (mmc) referencing two
+regulators (twl_reg1 and twl_reg2),
+
+       twl_reg1: regulator@0 {
+               ...
+               ...
+               ...
+       };
+
+       twl_reg2: regulator@1 {
+               ...
+               ...
+               ...
+       };
+
+       mmc: mmc@0x0 {
+               ...
+               ...
+               vmmc-supply = <&twl_reg1>;
+               vmmcaux-supply = <&twl_reg2>;
+       };
diff --git a/Documentation/devicetree/bindings/rtc/twl-rtc.txt b/Documentation/devicetree/bindings/rtc/twl-rtc.txt
new file mode 100644 (file)
index 0000000..596e0c9
--- /dev/null
@@ -0,0 +1,12 @@
+* TI twl RTC
+
+The TWL family (twl4030/6030) contains a RTC.
+
+Required properties:
+- compatible : Should be twl4030-rtc
+
+Examples:
+
+rtc@0 {
+    compatible = "ti,twl4030-rtc";
+};
index 284b44259750bd570df63926aef019728f978043..5575759b84eedccead799d851499ec69c5da3d8d 100644 (file)
@@ -350,15 +350,6 @@ Who:       anybody or Florian Mickler <florian@mickler.org>
 
 ----------------------------
 
-What:  KVM paravirt mmu host support
-When:  January 2011
-Why:   The paravirt mmu host support is slower than non-paravirt mmu, both
-       on newer and older hardware.  It is already not exposed to the guest,
-       and kept only for live migration purposes.
-Who:   Avi Kivity <avi@redhat.com>
-
-----------------------------
-
 What:  iwlwifi 50XX module parameters
 When:  3.0
 Why:   The "..50" modules parameters were used to configure 5000 series and
index 4917cf24a5e0885518cf06a12e53d4057f5c91fa..10ec4639f1522dad34d66eea61443750435fe3ae 100644 (file)
@@ -581,6 +581,13 @@ Table of Ext4 specific ioctls
                              behaviour may change in the future as it is
                              not necessary and has been done this way only
                              for sake of simplicity.
+
+ EXT4_IOC_RESIZE_FS          Resize the filesystem to a new size.  The number
+                             of blocks of resized filesystem is passed in via
+                             64 bit integer argument.  The kernel allocates
+                             bitmaps and inode table, the userspace tool thus
+                             just passes the new number of blocks.
+
 ..............................................................................
 
 References
index 0ec91f03422e5befe8dd7f69d22dec4a22250f69..12fee132fbe2e37da5a7441cb281120d367099a2 100644 (file)
@@ -41,6 +41,8 @@ Table of Contents
   3.5  /proc/<pid>/mountinfo - Information about mounts
   3.6  /proc/<pid>/comm  & /proc/<pid>/task/<tid>/comm
 
+  4    Configuring procfs
+  4.1  Mount options
 
 ------------------------------------------------------------------------------
 Preface
@@ -1542,3 +1544,40 @@ a task to set its own or one of its thread siblings comm value. The comm value
 is limited in size compared to the cmdline value, so writing anything longer
 then the kernel's TASK_COMM_LEN (currently 16 chars) will result in a truncated
 comm value.
+
+
+------------------------------------------------------------------------------
+Configuring procfs
+------------------------------------------------------------------------------
+
+4.1    Mount options
+---------------------
+
+The following mount options are supported:
+
+       hidepid=        Set /proc/<pid>/ access mode.
+       gid=            Set the group authorized to learn processes information.
+
+hidepid=0 means classic mode - everybody may access all /proc/<pid>/ directories
+(default).
+
+hidepid=1 means users may not access any /proc/<pid>/ directories but their
+own.  Sensitive files like cmdline, sched*, status are now protected against
+other users.  This makes it impossible to learn whether any user runs
+specific program (given the program doesn't reveal itself by its behaviour).
+As an additional bonus, as /proc/<pid>/cmdline is unaccessible for other users,
+poorly written programs passing sensitive information via program arguments are
+now protected against local eavesdroppers.
+
+hidepid=2 means hidepid=1 plus all /proc/<pid>/ will be fully invisible to other
+users.  It doesn't mean that it hides a fact whether a process with a specific
+pid value exists (it can be learned by other means, e.g. by "kill -0 $PID"),
+but it hides process' uid and gid, which may be learned by stat()'ing
+/proc/<pid>/ otherwise.  It greatly complicates an intruder's task of gathering
+information about running processes, whether some daemon runs with elevated
+privileges, whether other user runs some sensitive program, whether other users
+run any program at all, etc.
+
+gid= defines a group authorized to learn processes information otherwise
+prohibited by hidepid=.  If you use some daemon like identd which needs to learn
+information about processes information, just add identd to this group.
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
new file mode 100644 (file)
index 0000000..f274c28
--- /dev/null
@@ -0,0 +1,188 @@
+ALPS Touchpad Protocol
+----------------------
+
+Introduction
+------------
+
+Currently the ALPS touchpad driver supports four protocol versions in use by
+ALPS touchpads, called versions 1, 2, 3, and 4. Information about the various
+protocol versions is contained in the following sections.
+
+Detection
+---------
+
+All ALPS touchpads should respond to the "E6 report" command sequence:
+E8-E6-E6-E6-E9. An ALPS touchpad should respond with either 00-00-0A or
+00-00-64.
+
+If the E6 report is successful, the touchpad model is identified using the "E7
+report" sequence: E8-E7-E7-E7-E9. The response is the model signature and is
+matched against known models in the alps_model_data_array.
+
+With protocol versions 3 and 4, the E7 report model signature is always
+73-02-64. To differentiate between these versions, the response from the
+"Enter Command Mode" sequence must be inspected as described below.
+
+Command Mode
+------------
+
+Protocol versions 3 and 4 have a command mode that is used to read and write
+one-byte device registers in a 16-bit address space. The command sequence
+EC-EC-EC-E9 places the device in command mode, and the device will respond
+with 88-07 followed by a third byte. This third byte can be used to determine
+whether the devices uses the version 3 or 4 protocol.
+
+To exit command mode, PSMOUSE_CMD_SETSTREAM (EA) is sent to the touchpad.
+
+While in command mode, register addresses can be set by first sending a
+specific command, either EC for v3 devices or F5 for v4 devices. Then the
+address is sent one nibble at a time, where each nibble is encoded as a
+command with optional data. This enoding differs slightly between the v3 and
+v4 protocols.
+
+Once an address has been set, the addressed register can be read by sending
+PSMOUSE_CMD_GETINFO (E9). The first two bytes of the response contains the
+address of the register being read, and the third contains the value of the
+register. Registers are written by writing the value one nibble at a time
+using the same encoding used for addresses.
+
+Packet Format
+-------------
+
+In the following tables, the following notation is used.
+
+ CAPITALS = stick, miniscules = touchpad
+
+?'s can have different meanings on different models, such as wheel rotation,
+extra buttons, stick buttons on a dualpoint, etc.
+
+PS/2 packet format
+------------------
+
+ byte 0:  0    0 YSGN XSGN    1    M    R    L
+ byte 1: X7   X6   X5   X4   X3   X2   X1   X0
+ byte 2: Y7   Y6   Y5   Y4   Y3   Y2   Y1   Y0
+
+Note that the device never signals overflow condition.
+
+ALPS Absolute Mode - Protocol Verion 1
+--------------------------------------
+
+ byte 0:  1    0    0    0    1   x9   x8   x7
+ byte 1:  0   x6   x5   x4   x3   x2   x1   x0
+ byte 2:  0    ?    ?    l    r    ?  fin  ges
+ byte 3:  0    ?    ?    ?    ?   y9   y8   y7
+ byte 4:  0   y6   y5   y4   y3   y2   y1   y0
+ byte 5:  0   z6   z5   z4   z3   z2   z1   z0
+
+ALPS Absolute Mode - Protocol Version 2
+---------------------------------------
+
+ byte 0:  1    ?    ?    ?    1    ?    ?    ?
+ byte 1:  0   x6   x5   x4   x3   x2   x1   x0
+ byte 2:  0  x10   x9   x8   x7    ?  fin  ges
+ byte 3:  0   y9   y8   y7    1    M    R    L
+ byte 4:  0   y6   y5   y4   y3   y2   y1   y0
+ byte 5:  0   z6   z5   z4   z3   z2   z1   z0
+
+Dualpoint device -- interleaved packet format
+---------------------------------------------
+
+ byte 0:    1    1    0    0    1    1    1    1
+ byte 1:    0   x6   x5   x4   x3   x2   x1   x0
+ byte 2:    0  x10   x9   x8   x7    0  fin  ges
+ byte 3:    0    0 YSGN XSGN    1    1    1    1
+ byte 4:   X7   X6   X5   X4   X3   X2   X1   X0
+ byte 5:   Y7   Y6   Y5   Y4   Y3   Y2   Y1   Y0
+ byte 6:    0   y9   y8   y7    1    m    r    l
+ byte 7:    0   y6   y5   y4   y3   y2   y1   y0
+ byte 8:    0   z6   z5   z4   z3   z2   z1   z0
+
+ALPS Absolute Mode - Protocol Version 3
+---------------------------------------
+
+ALPS protocol version 3 has three different packet formats. The first two are
+associated with touchpad events, and the third is associatd with trackstick
+events.
+
+The first type is the touchpad position packet.
+
+ byte 0:    1    ?   x1   x0    1    1    1    1
+ byte 1:    0  x10   x9   x8   x7   x6   x5   x4
+ byte 2:    0  y10   y9   y8   y7   y6   y5   y4
+ byte 3:    0    M    R    L    1    m    r    l
+ byte 4:    0   mt   x3   x2   y3   y2   y1   y0
+ byte 5:    0   z6   z5   z4   z3   z2   z1   z0
+
+Note that for some devices the trackstick buttons are reported in this packet,
+and on others it is reported in the trackstick packets.
+
+The second packet type contains bitmaps representing the x and y axes. In the
+bitmaps a given bit is set if there is a finger covering that position on the
+given axis. Thus the bitmap packet can be used for low-resolution multi-touch
+data, although finger tracking is not possible.  This packet also encodes the
+number of contacts (f1 and f0 in the table below).
+
+ byte 0:    1    1   x1   x0    1    1    1    1
+ byte 1:    0   x8   x7   x6   x5   x4   x3   x2
+ byte 2:    0   y7   y6   y5   y4   y3   y2   y1
+ byte 3:    0  y10   y9   y8    1    1    1    1
+ byte 4:    0  x14  x13  x12  x11  x10   x9   y0
+ byte 5:    0    1    ?    ?    ?    ?   f1   f0
+
+This packet only appears after a position packet with the mt bit set, and
+ususally only appears when there are two or more contacts (although
+ocassionally it's seen with only a single contact).
+
+The final v3 packet type is the trackstick packet.
+
+ byte 0:    1    1   x7   y7    1    1    1    1
+ byte 1:    0   x6   x5   x4   x3   x2   x1   x0
+ byte 2:    0   y6   y5   y4   y3   y2   y1   y0
+ byte 3:    0    1    0    0    1    0    0    0
+ byte 4:    0   z4   z3   z2   z1   z0    ?    ?
+ byte 5:    0    0    1    1    1    1    1    1
+
+ALPS Absolute Mode - Protocol Version 4
+---------------------------------------
+
+Protocol version 4 has an 8-byte packet format.
+
+ byte 0:    1    ?   x1   x0    1    1    1    1
+ byte 1:    0  x10   x9   x8   x7   x6   x5   x4
+ byte 2:    0  y10   y9   y8   y7   y6   y5   y4
+ byte 3:    0    1   x3   x2   y3   y2   y1   y0
+ byte 4:    0    ?    ?    ?    1    ?    r    l
+ byte 5:    0   z6   z5   z4   z3   z2   z1   z0
+ byte 6:    bitmap data (described below)
+ byte 7:    bitmap data (described below)
+
+The last two bytes represent a partial bitmap packet, with 3 full packets
+required to construct a complete bitmap packet.  Once assembled, the 6-byte
+bitmap packet has the following format:
+
+ byte 0:    0    1   x7   x6   x5   x4   x3   x2
+ byte 1:    0   x1   x0   y4   y3   y2   y1   y0
+ byte 2:    0    0    ?  x14  x13  x12  x11  x10
+ byte 3:    0   x9   x8   y9   y8   y7   y6   y5
+ byte 4:    0    0    0    0    0    0    0    0
+ byte 5:    0    0    0    0    0    0    0  y10
+
+There are several things worth noting here.
+
+ 1) In the bitmap data, bit 6 of byte 0 serves as a sync byte to
+    identify the first fragment of a bitmap packet.
+
+ 2) The bitmaps represent the same data as in the v3 bitmap packets, although
+    the packet layout is different.
+
+ 3) There doesn't seem to be a count of the contact points anywhere in the v4
+    protocol packets. Deriving a count of contact points must be done by
+    analyzing the bitmaps.
+
+ 4) There is a 3 to 1 ratio of position packets to bitmap packets. Therefore
+    MT position can only be updated for every third ST position update, and
+    the count of contact points can only be updated every third packet as
+    well.
+
+So far no v4 devices with tracksticks have been encountered.
diff --git a/Documentation/input/gpio-tilt.txt b/Documentation/input/gpio-tilt.txt
new file mode 100644 (file)
index 0000000..06d60c3
--- /dev/null
@@ -0,0 +1,103 @@
+Driver for tilt-switches connected via GPIOs
+============================================
+
+Generic driver to read data from tilt switches connected via gpios.
+Orientation can be provided by one or more than one tilt switches,
+i.e. each tilt switch providing one axis, and the number of axes
+is also not limited.
+
+
+Data structures:
+----------------
+
+The array of struct gpio in the gpios field is used to list the gpios
+that represent the current tilt state.
+
+The array of struct gpio_tilt_axis describes the axes that are reported
+to the input system. The values set therein are used for the
+input_set_abs_params calls needed to init the axes.
+
+The array of struct gpio_tilt_state maps gpio states to the corresponding
+values to report. The gpio state is represented as a bitfield where the
+bit-index corresponds to the index of the gpio in the struct gpio array.
+In the same manner the values stored in the axes array correspond to
+the elements of the gpio_tilt_axis-array.
+
+
+Example:
+--------
+
+Example configuration for a single TS1003 tilt switch that rotates around
+one axis in 4 steps and emitts the current tilt via two GPIOs.
+
+static int sg060_tilt_enable(struct device *dev) {
+       /* code to enable the sensors */
+};
+
+static void sg060_tilt_disable(struct device *dev) {
+       /* code to disable the sensors */
+};
+
+static struct gpio sg060_tilt_gpios[] = {
+       { SG060_TILT_GPIO_SENSOR1, GPIOF_IN, "tilt_sensor1" },
+       { SG060_TILT_GPIO_SENSOR2, GPIOF_IN, "tilt_sensor2" },
+};
+
+static struct gpio_tilt_state sg060_tilt_states[] = {
+       {
+               .gpios = (0 << 1) | (0 << 0),
+               .axes = (int[]) {
+                       0,
+               },
+       }, {
+               .gpios = (0 << 1) | (1 << 0),
+               .axes = (int[]) {
+                       1, /* 90 degrees */
+               },
+       }, {
+               .gpios = (1 << 1) | (1 << 0),
+               .axes = (int[]) {
+                       2, /* 180 degrees */
+               },
+       }, {
+               .gpios = (1 << 1) | (0 << 0),
+               .axes = (int[]) {
+                       3, /* 270 degrees */
+               },
+       },
+};
+
+static struct gpio_tilt_axis sg060_tilt_axes[] = {
+       {
+               .axis = ABS_RY,
+               .min = 0,
+               .max = 3,
+               .fuzz = 0,
+               .flat = 0,
+       },
+};
+
+static struct gpio_tilt_platform_data sg060_tilt_pdata= {
+       .gpios = sg060_tilt_gpios,
+       .nr_gpios = ARRAY_SIZE(sg060_tilt_gpios),
+
+       .axes = sg060_tilt_axes,
+       .nr_axes = ARRAY_SIZE(sg060_tilt_axes),
+
+       .states = sg060_tilt_states,
+       .nr_states = ARRAY_SIZE(sg060_tilt_states),
+
+       .debounce_interval = 100,
+
+       .poll_interval = 1000,
+       .enable = sg060_tilt_enable,
+       .disable = sg060_tilt_disable,
+};
+
+static struct platform_device sg060_device_tilt = {
+       .name = "gpio-tilt-polled",
+       .id = -1,
+       .dev = {
+               .platform_data = &sg060_tilt_pdata,
+       },
+};
index b2ef125b71f80abfd064b59d407d64a9d7fac7fe..89251e2a3eba101fddf89e7d4da3a2f1b8fb5ba8 100644 (file)
@@ -1,5 +1,5 @@
-Copyright (C) 2002-2010 Sentelic Corporation.
-Last update: Jan-13-2010
+Copyright (C) 2002-2011 Sentelic Corporation.
+Last update: Dec-07-2011
 
 ==============================================================================
 * Finger Sensing Pad Intellimouse Mode(scrolling wheel, 4th and 5th buttons)
@@ -140,6 +140,7 @@ BYTE  |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------
 Byte 1: Bit7~Bit6 => 00, Normal data packet
                   => 01, Absolute coordination packet
                   => 10, Notify packet
+                  => 11, Normal data packet with on-pad click
         Bit5 => Valid bit, 0 means that the coordinate is invalid or finger up.
                 When both fingers are up, the last two reports have zero valid
                 bit.
@@ -164,6 +165,7 @@ BYTE  |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------
 Byte 1: Bit7~Bit6 => 00, Normal data packet
                   => 01, Absolute coordinates packet
                   => 10, Notify packet
+                  => 11, Normal data packet with on-pad click
         Bit5 => Valid bit, 0 means that the coordinate is invalid or finger up.
                 When both fingers are up, the last two reports have zero valid
                 bit.
@@ -188,6 +190,7 @@ BYTE  |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------
 Byte 1: Bit7~Bit6 => 00, Normal data packet
                   => 01, Absolute coordinates packet
                   => 10, Notify packet
+                  => 11, Normal data packet with on-pad click
         Bit5 => 1
         Bit4 => when in absolute coordinates mode (valid when EN_PKT_GO is 1):
                 0: left button is generated by the on-pad command
@@ -205,7 +208,7 @@ Byte 4: Bit7 => scroll right button
         Bit6 => scroll left button
         Bit5 => scroll down button
         Bit4 => scroll up button
-            * Note that if gesture and additional buttoni (Bit4~Bit7)
+            * Note that if gesture and additional button (Bit4~Bit7)
               happen at the same time, the button information will not
               be sent.
         Bit3~Bit0 => Reserved
@@ -227,6 +230,7 @@ BYTE  |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------
 Byte 1: Bit7~Bit6 => 00, Normal data packet
                   => 01, Absolute coordinates packet
                   => 10, Notify packet
+                  => 11, Normal data packet with on-pad click
         Bit5 => Valid bit, 0 means that the coordinate is invalid or finger up.
                 When both fingers are up, the last two reports have zero valid
                 bit.
@@ -253,6 +257,7 @@ BYTE  |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------
 Byte 1: Bit7~Bit6 => 00, Normal data packet
                   => 01, Absolute coordination packet
                   => 10, Notify packet
+                  => 11, Normal data packet with on-pad click
         Bit5 => Valid bit, 0 means that the coordinate is invalid or finger up.
                 When both fingers are up, the last two reports have zero valid
                 bit.
@@ -279,8 +284,9 @@ BYTE  |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------
 Byte 1: Bit7~Bit6 => 00, Normal data packet
                   => 01, Absolute coordination packet
                   => 10, Notify packet
+                  => 11, Normal data packet with on-pad click
         Bit5 => 1
-        Bit4 => when in absolute coordinate mode (valid when EN_PKT_GO is 1):
+        Bit4 => when in absolute coordinates mode (valid when EN_PKT_GO is 1):
                 0: left button is generated by the on-pad command
                 1: left button is generated by the external button
         Bit3 => 1
@@ -306,6 +312,110 @@ Sample sequence of Multi-finger, Multi-coordinate mode:
        notify packet (valid bit == 1), abs pkt 1, abs pkt 2, abs pkt 1,
        abs pkt 2, ..., notify packet (valid bit == 0)
 
+==============================================================================
+* Absolute position for STL3888-Cx and STL3888-Dx.
+==============================================================================
+Single Finger, Absolute Coordinate Mode (SFAC)
+   Bit 7 6 5 4 3 2 1 0       7 6 5 4 3 2 1 0      7 6 5 4 3 2 1 0      7 6 5 4 3 2 1 0
+BYTE  |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------------|
+  1   |0|1|0|P|1|M|R|L|  2  |X|X|X|X|X|X|X|X|  3 |Y|Y|Y|Y|Y|Y|Y|Y|  4 |r|l|B|F|X|X|Y|Y|
+      |---------------|     |---------------|    |---------------|    |---------------|
+
+Byte 1: Bit7~Bit6 => 00, Normal data packet
+                  => 01, Absolute coordinates packet
+                  => 10, Notify packet
+       Bit5 => Coordinate mode(always 0 in SFAC mode):
+               0: single-finger absolute coordinates (SFAC) mode
+               1: multi-finger, multiple coordinates (MFMC) mode
+       Bit4 => 0: The LEFT button is generated by on-pad command (OPC)
+               1: The LEFT button is generated by external button
+               Default is 1 even if the LEFT button is not pressed.
+       Bit3 => Always 1, as specified by PS/2 protocol.
+       Bit2 => Middle Button, 1 is pressed, 0 is not pressed.
+       Bit1 => Right Button, 1 is pressed, 0 is not pressed.
+       Bit0 => Left Button, 1 is pressed, 0 is not pressed.
+Byte 2: X coordinate (xpos[9:2])
+Byte 3: Y coordinate (ypos[9:2])
+Byte 4: Bit1~Bit0 => Y coordinate (xpos[1:0])
+       Bit3~Bit2 => X coordinate (ypos[1:0])
+       Bit4 => 4th mouse button(forward one page)
+       Bit5 => 5th mouse button(backward one page)
+       Bit6 => scroll left button
+       Bit7 => scroll right button
+
+Multi Finger, Multiple Coordinates Mode (MFMC):
+   Bit 7 6 5 4 3 2 1 0       7 6 5 4 3 2 1 0      7 6 5 4 3 2 1 0      7 6 5 4 3 2 1 0
+BYTE  |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------------|
+  1   |0|1|1|P|1|F|R|L|  2  |X|X|X|X|X|X|X|X|  3 |Y|Y|Y|Y|Y|Y|Y|Y|  4 |r|l|B|F|X|X|Y|Y|
+      |---------------|     |---------------|    |---------------|    |---------------|
+
+Byte 1: Bit7~Bit6 => 00, Normal data packet
+                  => 01, Absolute coordination packet
+                  => 10, Notify packet
+       Bit5 => Coordinate mode (always 1 in MFMC mode):
+               0: single-finger absolute coordinates (SFAC) mode
+               1: multi-finger, multiple coordinates (MFMC) mode
+       Bit4 => 0: The LEFT button is generated by on-pad command (OPC)
+               1: The LEFT button is generated by external button
+               Default is 1 even if the LEFT button is not pressed.
+       Bit3 => Always 1, as specified by PS/2 protocol.
+       Bit2 => Finger index, 0 is the first finger, 1 is the second finger.
+               If bit 1 and 0 are all 1 and bit 4 is 0, the middle external
+               button is pressed.
+       Bit1 => Right Button, 1 is pressed, 0 is not pressed.
+       Bit0 => Left Button, 1 is pressed, 0 is not pressed.
+Byte 2: X coordinate (xpos[9:2])
+Byte 3: Y coordinate (ypos[9:2])
+Byte 4: Bit1~Bit0 => Y coordinate (xpos[1:0])
+       Bit3~Bit2 => X coordinate (ypos[1:0])
+       Bit4 => 4th mouse button(forward one page)
+       Bit5 => 5th mouse button(backward one page)
+       Bit6 => scroll left button
+       Bit7 => scroll right button
+
+  When one of the two fingers is up, the device will output four consecutive
+MFMC#0 report packets with zero X and Y to represent 1st finger is up or
+four consecutive MFMC#1 report packets with zero X and Y to represent that
+the 2nd finger is up.  On the other hand, if both fingers are up, the device
+will output four consecutive single-finger, absolute coordinate(SFAC) packets
+with zero X and Y.
+
+Notify Packet for STL3888-Cx/Dx
+   Bit 7 6 5 4 3 2 1 0       7 6 5 4 3 2 1 0      7 6 5 4 3 2 1 0      7 6 5 4 3 2 1 0
+BYTE  |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------------|
+  1   |1|0|0|P|1|M|R|L|  2  |C|C|C|C|C|C|C|C|  3 |0|0|F|F|0|0|0|i|  4 |r|l|u|d|0|0|0|0|
+      |---------------|     |---------------|    |---------------|    |---------------|
+
+Byte 1: Bit7~Bit6 => 00, Normal data packet
+                  => 01, Absolute coordinates packet
+                  => 10, Notify packet
+       Bit5 => Always 0
+       Bit4 => 0: The LEFT button is generated by on-pad command(OPC)
+               1: The LEFT button is generated by external button
+               Default is 1 even if the LEFT button is not pressed.
+       Bit3 => 1
+       Bit2 => Middle Button, 1 is pressed, 0 is not pressed.
+       Bit1 => Right Button, 1 is pressed, 0 is not pressed.
+       Bit0 => Left Button, 1 is pressed, 0 is not pressed.
+Byte 2: Message type:
+       0xba => gesture information
+       0xc0 => one finger hold-rotating gesture
+Byte 3: The first parameter for the received message:
+       0xba => gesture ID (refer to the 'Gesture ID' section)
+       0xc0 => region ID
+Byte 4: The second parameter for the received message:
+       0xba => N/A
+       0xc0 => finger up/down information
+
+Sample sequence of Multi-finger, Multi-coordinates mode:
+
+       notify packet (valid bit == 1), MFMC packet 1 (byte 1, bit 2 == 0),
+       MFMC packet 2 (byte 1, bit 2 == 1), MFMC packet 1, MFMC packet 2,
+       ..., notify packet (valid bit == 0)
+
+       That is, when the device is in MFMC mode, the host will receive
+       interleaved absolute coordinate packets for each finger.
+
 ==============================================================================
 * FSP Enable/Disable packet
 ==============================================================================
@@ -348,9 +458,10 @@ http://www.computer-engineering.org/ps2mouse/
 ==============================================================================
 1. Identify FSP by reading device ID(0x00) and version(0x01) register
 
-2. Determine number of buttons by reading status2 (0x0b) register
+2a. For FSP version < STL3888 Cx, determine number of buttons by reading
+    the 'test mode status' (0x20) register:
 
-       buttons = reg[0x0b] & 0x30
+       buttons = reg[0x20] & 0x30
 
        if buttons == 0x30 or buttons == 0x20:
                # two/four buttons
@@ -365,6 +476,10 @@ http://www.computer-engineering.org/ps2mouse/
                Refer to 'Finger Sensing Pad PS/2 Mouse Intellimouse'
                section A for packet parsing detail
 
+2b. For FSP version >= STL3888 Cx:
+       Refer to 'Finger Sensing Pad PS/2 Mouse Intellimouse'
+       section A for packet parsing detail (ignore byte 4, bit ~ 7)
+
 ==============================================================================
 * Programming Sequence for Register Reading/Writing
 ==============================================================================
@@ -374,7 +489,7 @@ Register inversion requirement:
   Following values needed to be inverted(the '~' operator in C) before being
 sent to FSP:
 
-       0xe9, 0xee, 0xf2 and 0xff.
+       0xe8, 0xe9, 0xee, 0xf2, 0xf3 and 0xff.
 
 Register swapping requirement:
 
@@ -415,7 +530,18 @@ Register reading sequence:
 
        8. send 0xe9(status request) PS/2 command to FSP;
 
-       9. the response read from FSP should be the requested register value.
+       9. the 4th byte of the response read from FSP should be the
+       requested register value(?? indicates don't care byte):
+
+               host: 0xe9
+               3888: 0xfa (??) (??) (val)
+
+       * Note that since the Cx release, the hardware will return 1's
+       complement of the register value at the 3rd byte of status request
+       result:
+
+               host: 0xe9
+               3888: 0xfa (??) (~val) (val)
 
 Register writing sequence:
 
@@ -465,71 +591,194 @@ Register writing sequence:
 
        9. the register writing sequence is completed.
 
+       * Note that since the Cx release, the hardware will return 1's
+       complement of the register value at the 3rd byte of status request
+       result. Host can optionally send another 0xe9 (status request) PS/2
+       command to FSP at the end of register writing to verify that the
+       register writing operation is successful (?? indicates don't care
+       byte):
+
+               host: 0xe9
+               3888: 0xfa (??) (~val) (val)
+
+==============================================================================
+* Programming Sequence for Page Register Reading/Writing
+==============================================================================
+
+  In order to overcome the limitation of maximum number of registers
+supported, the hardware separates register into different groups called
+'pages.' Each page is able to include up to 255 registers.
+
+  The default page after power up is 0x82; therefore, if one has to get
+access to register 0x8301, one has to use following sequence to switch
+to page 0x83, then start reading/writing from/to offset 0x01 by using
+the register read/write sequence described in previous section.
+
+Page register reading sequence:
+
+       1. send 0xf3 PS/2 command to FSP;
+
+       2. send 0x66 PS/2 command to FSP;
+
+       3. send 0x88 PS/2 command to FSP;
+
+       4. send 0xf3 PS/2 command to FSP;
+
+       5. send 0x83 PS/2 command to FSP;
+
+       6. send 0x88 PS/2 command to FSP;
+
+       7. send 0xe9(status request) PS/2 command to FSP;
+
+       8. the response read from FSP should be the requested page value.
+
+Page register writing sequence:
+
+       1. send 0xf3 PS/2 command to FSP;
+
+       2. send 0x38 PS/2 command to FSP;
+
+       3. send 0x88 PS/2 command to FSP;
+
+       4. send 0xf3 PS/2 command to FSP;
+
+       5. if the page address being written is not required to be
+       inverted(refer to the 'Register inversion requirement' section),
+       goto step 6
+
+       5a. send 0x47 PS/2 command to FSP;
+
+       5b. send the inverted page address to FSP and goto step 9;
+
+       6. if the page address being written is not required to be
+       swapped(refer to the 'Register swapping requirement' section),
+       goto step 7
+
+       6a. send 0x44 PS/2 command to FSP;
+
+       6b. send the swapped page address to FSP and goto step 9;
+
+       7. send 0x33 PS/2 command to FSP;
+
+       8. send the page address to FSP;
+
+       9. the page register writing sequence is completed.
+
+==============================================================================
+* Gesture ID
+==============================================================================
+
+  Unlike other devices which sends multiple fingers' coordinates to host,
+FSP processes multiple fingers' coordinates internally and convert them
+into a 8 bits integer, namely 'Gesture ID.'  Following is a list of
+supported gesture IDs:
+
+       ID      Description
+       0x86    2 finger straight up
+       0x82    2 finger straight down
+       0x80    2 finger straight right
+       0x84    2 finger straight left
+       0x8f    2 finger zoom in
+       0x8b    2 finger zoom out
+       0xc0    2 finger curve, counter clockwise
+       0xc4    2 finger curve, clockwise
+       0x2e    3 finger straight up
+       0x2a    3 finger straight down
+       0x28    3 finger straight right
+       0x2c    3 finger straight left
+       0x38    palm
+
 ==============================================================================
 * Register Listing
 ==============================================================================
 
+  Registers are represented in 16 bits values. The higher 8 bits represent
+the page address and the lower 8 bits represent the relative offset within
+that particular page.  Refer to the 'Programming Sequence for Page Register
+Reading/Writing' section for instructions on how to change current page
+address.
+
 offset width           default r/w     name
-0x00   bit7~bit0       0x01    RO      device ID
+0x8200 bit7~bit0       0x01    RO      device ID
 
-0x01   bit7~bit0       0xc0    RW      version ID
+0x8201 bit7~bit0               RW      version ID
+                                       0xc1: STL3888 Ax
+                                       0xd0 ~ 0xd2: STL3888 Bx
+                                       0xe0 ~ 0xe1: STL3888 Cx
+                                       0xe2 ~ 0xe3: STL3888 Dx
 
-0x02   bit7~bit0       0x01    RO      vendor ID
+0x8202 bit7~bit0       0x01    RO      vendor ID
 
-0x03   bit7~bit0       0x01    RO      product ID
+0x8203 bit7~bit0       0x01    RO      product ID
 
-0x04   bit3~bit0       0x01    RW      revision ID
+0x8204 bit3~bit0       0x01    RW      revision ID
 
-0x0b                           RO      test mode status 1
-       bit3            1       RO      0: rotate 180 degree, 1: no rotation
+0x820b                                 test mode status 1
+       bit3            1       RO      0: rotate 180 degree
+                                       1: no rotation
+                                       *only supported by H/W prior to Cx
 
-       bit5~bit4               RO      number of buttons
-                       11 => 2, lbtn/rbtn
-                       10 => 4, lbtn/rbtn/scru/scrd
-                       01 => 6, lbtn/rbtn/scru/scrd/scrl/scrr
-                       00 => 6, lbtn/rbtn/scru/scrd/fbtn/bbtn
+0x820f                                 register file page control
+       bit2            0       RW      1: rotate 180 degree
+                                       0: no rotation
+                                       *supported since Cx
 
-0x0f                           RW      register file page control
        bit0            0       RW      1 to enable page 1 register files
+                                       *only supported by H/W prior to Cx
 
-0x10                           RW      system control 1
+0x8210                         RW      system control 1
        bit0            1       RW      Reserved, must be 1
        bit1            0       RW      Reserved, must be 0
-       bit4            1       RW      Reserved, must be 0
-       bit5            0       RW      register clock gating enable
+       bit4            0       RW      Reserved, must be 0
+       bit5            1       RW      register clock gating enable
                                        0: read only, 1: read/write enable
        (Note that following registers does not require clock gating being
        enabled prior to write: 05 06 07 08 09 0c 0f 10 11 12 16 17 18 23 2e
        40 41 42 43.  In addition to that, this bit must be 1 when gesture
        mode is enabled)
 
-0x31                           RW      on-pad command detection
+0x8220                                 test mode status
+       bit5~bit4               RO      number of buttons
+                                       11 => 2, lbtn/rbtn
+                                       10 => 4, lbtn/rbtn/scru/scrd
+                                       01 => 6, lbtn/rbtn/scru/scrd/scrl/scrr
+                                       00 => 6, lbtn/rbtn/scru/scrd/fbtn/bbtn
+                                       *only supported by H/W prior to Cx
+
+0x8231                         RW      on-pad command detection
        bit7            0       RW      on-pad command left button down tag
                                        enable
                                        0: disable, 1: enable
+                                       *only supported by H/W prior to Cx
 
-0x34                           RW      on-pad command control 5
+0x8234                         RW      on-pad command control 5
        bit4~bit0       0x05    RW      XLO in 0s/4/1, so 03h = 0010.1b = 2.5
        (Note that position unit is in 0.5 scanline)
+                                       *only supported by H/W prior to Cx
 
        bit7            0       RW      on-pad tap zone enable
                                        0: disable, 1: enable
+                                       *only supported by H/W prior to Cx
 
-0x35                           RW      on-pad command control 6
+0x8235                         RW      on-pad command control 6
        bit4~bit0       0x1d    RW      XHI in 0s/4/1, so 19h = 1100.1b = 12.5
        (Note that position unit is in 0.5 scanline)
+                                       *only supported by H/W prior to Cx
 
-0x36                           RW      on-pad command control 7
+0x8236                         RW      on-pad command control 7
        bit4~bit0       0x04    RW      YLO in 0s/4/1, so 03h = 0010.1b = 2.5
        (Note that position unit is in 0.5 scanline)
+                                       *only supported by H/W prior to Cx
 
-0x37                           RW      on-pad command control 8
+0x8237                         RW      on-pad command control 8
        bit4~bit0       0x13    RW      YHI in 0s/4/1, so 11h = 1000.1b = 8.5
        (Note that position unit is in 0.5 scanline)
+                                       *only supported by H/W prior to Cx
 
-0x40                           RW      system control 5
+0x8240                         RW      system control 5
        bit1            0       RW      FSP Intellimouse mode enable
                                        0: disable, 1: enable
+                                       *only supported by H/W prior to Cx
 
        bit2            0       RW      movement + abs. coordinate mode enable
                                        0: disable, 1: enable
@@ -537,6 +786,7 @@ offset      width           default r/w     name
        bit 1 is not set. However, the format is different from that of bit 1.
        In addition, when bit 1 and bit 2 are set at the same time, bit 2 will
        override bit 1.)
+                                       *only supported by H/W prior to Cx
 
        bit3            0       RW      abs. coordinate only mode enable
                                        0: disable, 1: enable
@@ -544,9 +794,11 @@ offset     width           default r/w     name
        bit 1 is not set. However, the format is different from that of bit 1.
        In addition, when bit 1, bit 2 and bit 3 are set at the same time,
        bit 3 will override bit 1 and 2.)
+                                       *only supported by H/W prior to Cx
 
        bit5            0       RW      auto switch enable
                                        0: disable, 1: enable
+                                       *only supported by H/W prior to Cx
 
        bit6            0       RW      G0 abs. + notify packet format enable
                                        0: disable, 1: enable
@@ -554,18 +806,68 @@ offset    width           default r/w     name
        bit 2 and 3.  That is, if any of those bit is 1, host will receive
        absolute coordinates; otherwise, host only receives packets with
        relative coordinate.)
+                                       *only supported by H/W prior to Cx
 
        bit7            0       RW      EN_PS2_F2: PS/2 gesture mode 2nd
                                        finger packet enable
                                        0: disable, 1: enable
+                                       *only supported by H/W prior to Cx
 
-0x43                           RW      on-pad control
+0x8243                         RW      on-pad control
        bit0            0       RW      on-pad control enable
                                        0: disable, 1: enable
        (Note that if this bit is cleared, bit 3/5 will be ineffective)
+                                       *only supported by H/W prior to Cx
 
        bit3            0       RW      on-pad fix vertical scrolling enable
                                        0: disable, 1: enable
+                                       *only supported by H/W prior to Cx
 
        bit5            0       RW      on-pad fix horizontal scrolling enable
                                        0: disable, 1: enable
+                                       *only supported by H/W prior to Cx
+
+0x8290                         RW      software control register 1
+       bit0            0       RW      absolute coordination mode
+                                       0: disable, 1: enable
+                                       *supported since Cx
+
+       bit1            0       RW      gesture ID output
+                                       0: disable, 1: enable
+                                       *supported since Cx
+
+       bit2            0       RW      two fingers' coordinates output
+                                       0: disable, 1: enable
+                                       *supported since Cx
+
+       bit3            0       RW      finger up one packet output
+                                       0: disable, 1: enable
+                                       *supported since Cx
+
+       bit4            0       RW      absolute coordination continuous mode
+                                       0: disable, 1: enable
+                                       *supported since Cx
+
+       bit6~bit5       00      RW      gesture group selection
+                                       00: basic
+                                       01: suite
+                                       10: suite pro
+                                       11: advanced
+                                       *supported since Cx
+
+       bit7            0       RW      Bx packet output compatible mode
+                                       0: disable, 1: enable                                   *supported since Cx
+                                       *supported since Cx
+
+
+0x833d                         RW      on-pad command control 1
+       bit7            1       RW      on-pad command detection enable
+                                       0: disable, 1: enable
+                                       *supported since Cx
+
+0x833e                         RW      on-pad command detection
+       bit7            0       RW      on-pad command left button down tag
+                                       enable. Works only in H/W based PS/2
+                                       data packet mode.
+                                       0: disable, 1: enable
+                                       *supported since Cx
index 7b2e5c5eefa60269e0311d2685bd28b10c1e22cc..c92b1532f05adadf9f8aaeac671434080b444424 100644 (file)
@@ -329,6 +329,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                    is a lot of faster
                        off       - do not initialize any AMD IOMMU found in
                                    the system
+                       force_isolation - Force device isolation for all
+                                         devices. The IOMMU driver is not
+                                         allowed anymore to lift isolation
+                                         requirements as needed. This option
+                                         does not override iommu=pt
 
        amijoy.map=     [HW,JOY] Amiga joystick support
                        Map of devices attached to JOY0DAT and JOY1DAT
@@ -623,6 +628,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        no_debug_objects
                        [KNL] Disable object debugging
 
+       debug_guardpage_minorder=
+                       [KNL] When CONFIG_DEBUG_PAGEALLOC is set, this
+                       parameter allows control of the order of pages that will
+                       be intentionally kept free (and hence protected) by the
+                       buddy allocator. Bigger value increase the probability
+                       of catching random memory corruption, but reduce the
+                       amount of memory for normal system use. The maximum
+                       possible value is MAX_ORDER/2.  Setting this parameter
+                       to 1 or 2 should be enough to identify most random
+                       memory corruption problems caused by bugs in kernel or
+                       driver code when a CPU writes to (or reads from) a
+                       random memory location. Note that there exists a class
+                       of memory corruptions problems caused by buggy H/W or
+                       F/W or by drivers badly programing DMA (basically when
+                       memory is written at bus level and the CPU MMU is
+                       bypassed) which are not detectable by
+                       CONFIG_DEBUG_PAGEALLOC, hence this option will not help
+                       tracking down these problems.
+
        debugpat        [X86] Enable PAT debugging
 
        decnet.addr=    [HW,NET]
@@ -1059,7 +1083,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                nomerge
                forcesac
                soft
-               pt      [x86, IA-64]
+               pt              [x86, IA-64]
+               group_mf        [x86, IA-64]
+
 
        io7=            [HW] IO7 for Marvel based alpha systems
                        See comment before marvel_specify_io7 in
@@ -1178,9 +1204,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
                        Default is 0 (don't ignore, but inject #GP)
 
-       kvm.oos_shadow= [KVM] Disable out-of-sync shadow paging.
-                       Default is 1 (enabled)
-
        kvm.mmu_audit=  [KVM] This is a R/W parameter which allows audit
                        KVM MMU at runtime.
                        Default is 0 (off)
@@ -1630,12 +1653,17 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        The default is to return 64-bit inode numbers.
 
        nfs.nfs4_disable_idmapping=
-                       [NFSv4] When set, this option disables the NFSv4
-                       idmapper on the client, but only if the mount
-                       is using the 'sec=sys' security flavour. This may
-                       make migration from legacy NFSv2/v3 systems easier
-                       provided that the server has the appropriate support.
-                       The default is to always enable NFSv4 idmapping.
+                       [NFSv4] When set to the default of '1', this option
+                       ensures that both the RPC level authentication
+                       scheme and the NFS level operations agree to use
+                       numeric uids/gids if the mount is using the
+                       'sec=sys' security flavour. In effect it is
+                       disabling idmapping, which can make migration from
+                       legacy NFSv2/v3 systems to NFSv4 easier.
+                       Servers that do not support this mode of operation
+                       will be autodetected by the client, and it will fall
+                       back to using the idmapper.
+                       To turn off this behaviour, set the value to '0'.
 
        nmi_debug=      [KNL,AVR32,SH] Specify one or more actions to take
                        when a NMI is triggered.
index b04cb7d45a1632625a194883587b1300690a200c..6727b92bc2fb9db3f700d3056c1360e0297c09b5 100644 (file)
@@ -7,12 +7,9 @@ This subsystem deals with:
 
 - Multiplexing of pins, pads, fingers (etc) see below for details
 
-The intention is to also deal with:
-
-- Software-controlled biasing and driving mode specific pins, such as
-  pull-up/down, open drain etc, load capacitance configuration when controlled
-  by software, etc.
-
+- Configuration of pins, pads, fingers (etc), such as software-controlled
+  biasing and driving mode specific pins, such as pull-up/down, open drain,
+  load capacitance etc.
 
 Top-level interface
 ===================
@@ -32,7 +29,7 @@ Definition of PIN:
   be sparse - i.e. there may be gaps in the space with numbers where no
   pin exists.
 
-When a PIN CONTROLLER is instatiated, it will register a descriptor to the
+When a PIN CONTROLLER is instantiated, it will register a descriptor to the
 pin control framework, and this descriptor contains an array of pin descriptors
 describing the pins handled by this specific pin controller.
 
@@ -61,14 +58,14 @@ this in our driver:
 
 #include <linux/pinctrl/pinctrl.h>
 
-const struct pinctrl_pin_desc __refdata foo_pins[] = {
-      PINCTRL_PIN(0, "A1"),
-      PINCTRL_PIN(1, "A2"),
-      PINCTRL_PIN(2, "A3"),
+const struct pinctrl_pin_desc foo_pins[] = {
+      PINCTRL_PIN(0, "A8"),
+      PINCTRL_PIN(1, "B8"),
+      PINCTRL_PIN(2, "C8"),
       ...
-      PINCTRL_PIN(61, "H6"),
-      PINCTRL_PIN(62, "H7"),
-      PINCTRL_PIN(63, "H8"),
+      PINCTRL_PIN(61, "F1"),
+      PINCTRL_PIN(62, "G1"),
+      PINCTRL_PIN(63, "H1"),
 };
 
 static struct pinctrl_desc foo_desc = {
@@ -88,11 +85,16 @@ int __init foo_probe(void)
                pr_err("could not register foo pin driver\n");
 }
 
+To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and
+selected drivers, you need to select them from your machine's Kconfig entry,
+since these are so tightly integrated with the machines they are used on.
+See for example arch/arm/mach-u300/Kconfig for an example.
+
 Pins usually have fancier names than this. You can find these in the dataheet
 for your chip. Notice that the core pinctrl.h file provides a fancy macro
 called PINCTRL_PIN() to create the struct entries. As you can see I enumerated
-the pins from 0 in the upper left corner to 63 in the lower right corner,
-this enumeration was arbitrarily chosen, in practice you need to think
+the pins from 0 in the upper left corner to 63 in the lower right corner.
+This enumeration was arbitrarily chosen, in practice you need to think
 through your numbering system so that it matches the layout of registers
 and such things in your driver, or the code may become complicated. You must
 also consider matching of offsets to the GPIO ranges that may be handled by
@@ -133,8 +135,8 @@ struct foo_group {
        const unsigned num_pins;
 };
 
-static unsigned int spi0_pins[] = { 0, 8, 16, 24 };
-static unsigned int i2c0_pins[] = { 24, 25 };
+static const unsigned int spi0_pins[] = { 0, 8, 16, 24 };
+static const unsigned int i2c0_pins[] = { 24, 25 };
 
 static const struct foo_group foo_groups[] = {
        {
@@ -193,6 +195,88 @@ structure, for example specific register ranges associated with each group
 and so on.
 
 
+Pin configuration
+=================
+
+Pins can sometimes be software-configured in an various ways, mostly related
+to their electronic properties when used as inputs or outputs. For example you
+may be able to make an output pin high impedance, or "tristate" meaning it is
+effectively disconnected. You may be able to connect an input pin to VDD or GND
+using a certain resistor value - pull up and pull down - so that the pin has a
+stable value when nothing is driving the rail it is connected to, or when it's
+unconnected.
+
+For example, a platform may do this:
+
+ret = pin_config_set("foo-dev", "FOO_GPIO_PIN", PLATFORM_X_PULL_UP);
+
+To pull up a pin to VDD. The pin configuration driver implements callbacks for
+changing pin configuration in the pin controller ops like this:
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include "platform_x_pindefs.h"
+
+static int foo_pin_config_get(struct pinctrl_dev *pctldev,
+                   unsigned offset,
+                   unsigned long *config)
+{
+       struct my_conftype conf;
+
+       ... Find setting for pin @ offset ...
+
+       *config = (unsigned long) conf;
+}
+
+static int foo_pin_config_set(struct pinctrl_dev *pctldev,
+                   unsigned offset,
+                   unsigned long config)
+{
+       struct my_conftype *conf = (struct my_conftype *) config;
+
+       switch (conf) {
+               case PLATFORM_X_PULL_UP:
+               ...
+               }
+       }
+}
+
+static int foo_pin_config_group_get (struct pinctrl_dev *pctldev,
+                   unsigned selector,
+                   unsigned long *config)
+{
+       ...
+}
+
+static int foo_pin_config_group_set (struct pinctrl_dev *pctldev,
+                   unsigned selector,
+                   unsigned long config)
+{
+       ...
+}
+
+static struct pinconf_ops foo_pconf_ops = {
+       .pin_config_get = foo_pin_config_get,
+       .pin_config_set = foo_pin_config_set,
+       .pin_config_group_get = foo_pin_config_group_get,
+       .pin_config_group_set = foo_pin_config_group_set,
+};
+
+/* Pin config operations are handled by some pin controller */
+static struct pinctrl_desc foo_desc = {
+       ...
+       .confops = &foo_pconf_ops,
+};
+
+Since some controllers have special logic for handling entire groups of pins
+they can exploit the special whole-group pin control function. The
+pin_config_group_set() callback is allowed to return the error code -EAGAIN,
+for groups it does not want to handle, or if it just wants to do some
+group-level handling and then fall through to iterate over all pins, in which
+case each individual pin will be treated by separate pin_config_set() calls as
+well.
+
+
 Interaction with the GPIO subsystem
 ===================================
 
@@ -214,19 +298,20 @@ static struct pinctrl_gpio_range gpio_range_a = {
        .name = "chip a",
        .id = 0,
        .base = 32,
+       .pin_base = 32,
        .npins = 16,
        .gc = &chip_a;
 };
 
-static struct pinctrl_gpio_range gpio_range_a = {
+static struct pinctrl_gpio_range gpio_range_b = {
        .name = "chip b",
        .id = 0,
        .base = 48,
+       .pin_base = 64,
        .npins = 8,
        .gc = &chip_b;
 };
 
-
 {
        struct pinctrl_dev *pctl;
        ...
@@ -235,42 +320,39 @@ static struct pinctrl_gpio_range gpio_range_a = {
 }
 
 So this complex system has one pin controller handling two different
-GPIO chips. Chip a has 16 pins and chip b has 8 pins. They are mapped in
-the global GPIO pin space at:
+GPIO chips. "chip a" has 16 pins and "chip b" has 8 pins. The "chip a" and
+"chip b" have different .pin_base, which means a start pin number of the
+GPIO range.
+
+The GPIO range of "chip a" starts from the GPIO base of 32 and actual
+pin range also starts from 32. However "chip b" has different starting
+offset for the GPIO range and pin range. The GPIO range of "chip b" starts
+from GPIO number 48, while the pin range of "chip b" starts from 64.
+
+We can convert a gpio number to actual pin number using this "pin_base".
+They are mapped in the global GPIO pin space at:
 
-chip a: [32 .. 47]
-chip b: [48 .. 55]
+chip a:
+ - GPIO range : [32 .. 47]
+ - pin range  : [32 .. 47]
+chip b:
+ - GPIO range : [48 .. 55]
+ - pin range  : [64 .. 71]
 
 When GPIO-specific functions in the pin control subsystem are called, these
-ranges will be used to look up the apropriate pin controller by inspecting
+ranges will be used to look up the appropriate pin controller by inspecting
 and matching the pin to the pin ranges across all controllers. When a
 pin controller handling the matching range is found, GPIO-specific functions
 will be called on that specific pin controller.
 
 For all functionalities dealing with pin biasing, pin muxing etc, the pin
 controller subsystem will subtract the range's .base offset from the passed
-in gpio pin number, and pass that on to the pin control driver, so the driver
-will get an offset into its handled number range. Further it is also passed
+in gpio number, and add the ranges's .pin_base offset to retrive a pin number.
+After that, the subsystem passes it on to the pin control driver, so the driver
+will get an pin number into its handled number range. Further it is also passed
 the range ID value, so that the pin controller knows which range it should
 deal with.
 
-For example: if a user issues pinctrl_gpio_set_foo(50), the pin control
-subsystem will find that the second range on this pin controller matches,
-subtract the base 48 and call the
-pinctrl_driver_gpio_set_foo(pinctrl, range, 2) where the latter function has
-this signature:
-
-int pinctrl_driver_gpio_set_foo(struct pinctrl_dev *pctldev,
-    struct pinctrl_gpio_range *rangeid,
-    unsigned offset);
-
-Now the driver knows that we want to do some GPIO-specific operation on the
-second GPIO range handled by "chip b", at offset 2 in that specific range.
-
-(If the GPIO subsystem is ever refactored to use a local per-GPIO controller
-pin space, this mapping will need to be augmented accordingly.)
-
-
 PINMUX interfaces
 =================
 
@@ -438,7 +520,7 @@ you. Define enumerators only for the pins you can control if that makes sense.
 
 Assumptions:
 
-We assume that the number possible function maps to pin groups is limited by
+We assume that the number of possible function maps to pin groups is limited by
 the hardware. I.e. we assume that there is no system where any function can be
 mapped to any pin, like in a phone exchange. So the available pins groups for
 a certain function will be limited to a few choices (say up to eight or so),
@@ -585,7 +667,7 @@ int foo_list_funcs(struct pinctrl_dev *pctldev, unsigned selector)
 
 const char *foo_get_fname(struct pinctrl_dev *pctldev, unsigned selector)
 {
-       return myfuncs[selector].name;
+       return foo_functions[selector].name;
 }
 
 static int foo_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
@@ -600,16 +682,16 @@ static int foo_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
 int foo_enable(struct pinctrl_dev *pctldev, unsigned selector,
                unsigned group)
 {
-       u8 regbit = (1 << group);
+       u8 regbit = (1 << selector + group);
 
        writeb((readb(MUX)|regbit), MUX)
        return 0;
 }
 
-int foo_disable(struct pinctrl_dev *pctldev, unsigned selector,
+void foo_disable(struct pinctrl_dev *pctldev, unsigned selector,
                unsigned group)
 {
-       u8 regbit = (1 << group);
+       u8 regbit = (1 << selector + group);
 
        writeb((readb(MUX) & ~(regbit)), MUX)
        return 0;
@@ -647,6 +729,17 @@ All the above functions are mandatory to implement for a pinmux driver.
 Pinmux interaction with the GPIO subsystem
 ==========================================
 
+The public pinmux API contains two functions named pinmux_request_gpio()
+and pinmux_free_gpio(). These two functions shall *ONLY* be called from
+gpiolib-based drivers as part of their gpio_request() and
+gpio_free() semantics. Likewise the pinmux_gpio_direction_[input|output]
+shall only be called from within respective gpio_direction_[input|output]
+gpiolib implementation.
+
+NOTE that platforms and individual drivers shall *NOT* request GPIO pins to be
+muxed in. Instead, implement a proper gpiolib driver and have that driver
+request proper muxing for its pins.
+
 The function list could become long, especially if you can convert every
 individual pin into a GPIO pin independent of any other pins, and then try
 the approach to define every pin as a function.
@@ -654,19 +747,24 @@ the approach to define every pin as a function.
 In this case, the function array would become 64 entries for each GPIO
 setting and then the device functions.
 
-For this reason there is an additional function a pinmux driver can implement
-to enable only GPIO on an individual pin: .gpio_request_enable(). The same
-.free() function as for other functions is assumed to be usable also for
-GPIO pins.
+For this reason there are two functions a pinmux driver can implement
+to enable only GPIO on an individual pin: .gpio_request_enable() and
+.gpio_disable_free().
 
 This function will pass in the affected GPIO range identified by the pin
 controller core, so you know which GPIO pins are being affected by the request
 operation.
 
-Alternatively it is fully allowed to use named functions for each GPIO
-pin, the pinmux_request_gpio() will attempt to obtain the function "gpioN"
-where "N" is the global GPIO pin number if no special GPIO-handler is
-registered.
+If your driver needs to have an indication from the framework of whether the
+GPIO pin shall be used for input or output you can implement the
+.gpio_set_direction() function. As described this shall be called from the
+gpiolib driver and the affected GPIO range, pin offset and desired direction
+will be passed along to this function.
+
+Alternatively to using these special functions, it is fully allowed to use
+named functions for each GPIO pin, the pinmux_request_gpio() will attempt to
+obtain the function "gpioN" where "N" is the global GPIO pin number if no
+special GPIO-handler is registered.
 
 
 Pinmux board/machine configuration
@@ -683,19 +781,19 @@ spi on the second function mapping:
 
 #include <linux/pinctrl/machine.h>
 
-static struct pinmux_map pmx_mapping[] = {
+static const struct pinmux_map __initdata pmx_mapping[] = {
        {
-               .ctrl_dev_name = "pinctrl.0",
+               .ctrl_dev_name = "pinctrl-foo",
                .function = "spi0",
                .dev_name = "foo-spi.0",
        },
        {
-               .ctrl_dev_name = "pinctrl.0",
+               .ctrl_dev_name = "pinctrl-foo",
                .function = "i2c0",
                .dev_name = "foo-i2c.0",
        },
        {
-               .ctrl_dev_name = "pinctrl.0",
+               .ctrl_dev_name = "pinctrl-foo",
                .function = "mmc0",
                .dev_name = "foo-mmc.0",
        },
@@ -714,14 +812,14 @@ for example if they are not yet instantiated or cumbersome to obtain.
 
 You register this pinmux mapping to the pinmux subsystem by simply:
 
-       ret = pinmux_register_mappings(&pmx_mapping, ARRAY_SIZE(pmx_mapping));
+       ret = pinmux_register_mappings(pmx_mapping, ARRAY_SIZE(pmx_mapping));
 
 Since the above construct is pretty common there is a helper macro to make
-it even more compact which assumes you want to use pinctrl.0 and position
+it even more compact which assumes you want to use pinctrl-foo and position
 0 for mapping, for example:
 
-static struct pinmux_map pmx_mapping[] = {
-       PINMUX_MAP_PRIMARY("I2CMAP", "i2c0", "foo-i2c.0"),
+static struct pinmux_map __initdata pmx_mapping[] = {
+       PINMUX_MAP("I2CMAP", "pinctrl-foo", "i2c0", "foo-i2c.0"),
 };
 
 
@@ -734,14 +832,14 @@ As it is possible to map a function to different groups of pins an optional
 ...
 {
        .name = "spi0-pos-A",
-       .ctrl_dev_name = "pinctrl.0",
+       .ctrl_dev_name = "pinctrl-foo",
        .function = "spi0",
        .group = "spi0_0_grp",
        .dev_name = "foo-spi.0",
 },
 {
        .name = "spi0-pos-B",
-       .ctrl_dev_name = "pinctrl.0",
+       .ctrl_dev_name = "pinctrl-foo",
        .function = "spi0",
        .group = "spi0_1_grp",
        .dev_name = "foo-spi.0",
@@ -760,44 +858,44 @@ case), we define a mapping like this:
 ...
 {
        .name "2bit"
-       .ctrl_dev_name = "pinctrl.0",
+       .ctrl_dev_name = "pinctrl-foo",
        .function = "mmc0",
-       .group = "mmc0_0_grp",
+       .group = "mmc0_1_grp",
        .dev_name = "foo-mmc.0",
 },
 {
        .name "4bit"
-       .ctrl_dev_name = "pinctrl.0",
+       .ctrl_dev_name = "pinctrl-foo",
        .function = "mmc0",
-       .group = "mmc0_0_grp",
+       .group = "mmc0_1_grp",
        .dev_name = "foo-mmc.0",
 },
 {
        .name "4bit"
-       .ctrl_dev_name = "pinctrl.0",
+       .ctrl_dev_name = "pinctrl-foo",
        .function = "mmc0",
-       .group = "mmc0_1_grp",
+       .group = "mmc0_2_grp",
        .dev_name = "foo-mmc.0",
 },
 {
        .name "8bit"
-       .ctrl_dev_name = "pinctrl.0",
+       .ctrl_dev_name = "pinctrl-foo",
        .function = "mmc0",
-       .group = "mmc0_0_grp",
+       .group = "mmc0_1_grp",
        .dev_name = "foo-mmc.0",
 },
 {
        .name "8bit"
-       .ctrl_dev_name = "pinctrl.0",
+       .ctrl_dev_name = "pinctrl-foo",
        .function = "mmc0",
-       .group = "mmc0_1_grp",
+       .group = "mmc0_2_grp",
        .dev_name = "foo-mmc.0",
 },
 {
        .name "8bit"
-       .ctrl_dev_name = "pinctrl.0",
+       .ctrl_dev_name = "pinctrl-foo",
        .function = "mmc0",
-       .group = "mmc0_2_grp",
+       .group = "mmc0_3_grp",
        .dev_name = "foo-mmc.0",
 },
 ...
@@ -898,7 +996,7 @@ like this:
 
 {
        .name "POWERMAP"
-       .ctrl_dev_name = "pinctrl.0",
+       .ctrl_dev_name = "pinctrl-foo",
        .function = "power_func",
        .hog_on_boot = true,
 },
index 3f8b528f237e17626142dfba88f955dfee7a11b3..e272d9909e393dd4091575555a82173e90694fdc 100644 (file)
@@ -12,7 +12,7 @@ Drivers can register a regulator by calling :-
 
 struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
        struct device *dev, struct regulator_init_data *init_data,
-       void *driver_data);
+       void *driver_data, struct device_node *of_node);
 
 This will register the regulators capabilities and operations to the regulator
 core.
index aa82ee4a5a8762ef9528ef382b07b906c3d6094b..194800410061b3b73be4a68b762be0b7995f4558 100644 (file)
@@ -40,8 +40,8 @@ but the call_site can usually be used to extrapolate that information.
 ==================
 mm_page_alloc            page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s
 mm_page_alloc_zone_locked page=%p pfn=%lu order=%u migratetype=%d cpu=%d percpu_refill=%d
-mm_page_free_direct      page=%p pfn=%lu order=%d
-mm_pagevec_free                  page=%p pfn=%lu order=%d cold=%d
+mm_page_free             page=%p pfn=%lu order=%d
+mm_page_free_batched     page=%p pfn=%lu order=%d cold=%d
 
 These four events deal with page allocation and freeing. mm_page_alloc is
 a simple indicator of page allocator activity. Pages may be allocated from
@@ -53,13 +53,13 @@ amounts of activity imply high activity on the zone->lock. Taking this lock
 impairs performance by disabling interrupts, dirtying cache lines between
 CPUs and serialising many CPUs.
 
-When a page is freed directly by the caller, the mm_page_free_direct event
+When a page is freed directly by the caller, the only mm_page_free event
 is triggered. Significant amounts of activity here could indicate that the
 callers should be batching their activities.
 
-When pages are freed using a pagevec, the mm_pagevec_free is
-triggered. Broadly speaking, pages are taken off the LRU lock in bulk and
-freed in batch with a pagevec. Significant amounts of activity here could
+When pages are freed in batch, the also mm_page_free_batched is triggered.
+Broadly speaking, pages are taken off the LRU lock in bulk and
+freed in batch with a page list. Significant amounts of activity here could
 indicate that the system is under memory pressure and can also indicate
 contention on the zone->lru_lock.
 
index 7df50e8cf4d9510a7c0b8233ac52bdbcc3b8614f..0a120aae33ce5c9836dad948b18736ea183e6801 100644 (file)
@@ -17,8 +17,8 @@ use Getopt::Long;
 
 # Tracepoint events
 use constant MM_PAGE_ALLOC             => 1;
-use constant MM_PAGE_FREE_DIRECT       => 2;
-use constant MM_PAGEVEC_FREE           => 3;
+use constant MM_PAGE_FREE              => 2;
+use constant MM_PAGE_FREE_BATCHED      => 3;
 use constant MM_PAGE_PCPU_DRAIN                => 4;
 use constant MM_PAGE_ALLOC_ZONE_LOCKED => 5;
 use constant MM_PAGE_ALLOC_EXTFRAG     => 6;
@@ -223,10 +223,10 @@ EVENT_PROCESS:
                # Perl Switch() sucks majorly
                if ($tracepoint eq "mm_page_alloc") {
                        $perprocesspid{$process_pid}->{MM_PAGE_ALLOC}++;
-               } elsif ($tracepoint eq "mm_page_free_direct") {
-                       $perprocesspid{$process_pid}->{MM_PAGE_FREE_DIRECT}++;
-               } elsif ($tracepoint eq "mm_pagevec_free") {
-                       $perprocesspid{$process_pid}->{MM_PAGEVEC_FREE}++;
+               } elsif ($tracepoint eq "mm_page_free") {
+                       $perprocesspid{$process_pid}->{MM_PAGE_FREE}++
+               } elsif ($tracepoint eq "mm_page_free_batched") {
+                       $perprocesspid{$process_pid}->{MM_PAGE_FREE_BATCHED}++;
                } elsif ($tracepoint eq "mm_page_pcpu_drain") {
                        $perprocesspid{$process_pid}->{MM_PAGE_PCPU_DRAIN}++;
                        $perprocesspid{$process_pid}->{STATE_PCPU_PAGES_DRAINED}++;
@@ -336,8 +336,8 @@ sub dump_stats {
                        $process_pid,
                        $stats{$process_pid}->{MM_PAGE_ALLOC},
                        $stats{$process_pid}->{MM_PAGE_ALLOC_ZONE_LOCKED},
-                       $stats{$process_pid}->{MM_PAGE_FREE_DIRECT},
-                       $stats{$process_pid}->{MM_PAGEVEC_FREE},
+                       $stats{$process_pid}->{MM_PAGE_FREE},
+                       $stats{$process_pid}->{MM_PAGE_FREE_BATCHED},
                        $stats{$process_pid}->{MM_PAGE_PCPU_DRAIN},
                        $stats{$process_pid}->{HIGH_PCPU_DRAINS},
                        $stats{$process_pid}->{HIGH_PCPU_REFILLS},
@@ -364,8 +364,8 @@ sub aggregate_perprocesspid() {
 
                $perprocess{$process}->{MM_PAGE_ALLOC} += $perprocesspid{$process_pid}->{MM_PAGE_ALLOC};
                $perprocess{$process}->{MM_PAGE_ALLOC_ZONE_LOCKED} += $perprocesspid{$process_pid}->{MM_PAGE_ALLOC_ZONE_LOCKED};
-               $perprocess{$process}->{MM_PAGE_FREE_DIRECT} += $perprocesspid{$process_pid}->{MM_PAGE_FREE_DIRECT};
-               $perprocess{$process}->{MM_PAGEVEC_FREE} += $perprocesspid{$process_pid}->{MM_PAGEVEC_FREE};
+               $perprocess{$process}->{MM_PAGE_FREE} += $perprocesspid{$process_pid}->{MM_PAGE_FREE};
+               $perprocess{$process}->{MM_PAGE_FREE_BATCHED} += $perprocesspid{$process_pid}->{MM_PAGE_FREE_BATCHED};
                $perprocess{$process}->{MM_PAGE_PCPU_DRAIN} += $perprocesspid{$process_pid}->{MM_PAGE_PCPU_DRAIN};
                $perprocess{$process}->{HIGH_PCPU_DRAINS} += $perprocesspid{$process_pid}->{HIGH_PCPU_DRAINS};
                $perprocess{$process}->{HIGH_PCPU_REFILLS} += $perprocesspid{$process_pid}->{HIGH_PCPU_REFILLS};
index 87bee3c129ba71f8c359b5e849bafd177f421136..058cc6c9dc56d442e4fad72c1957a82c7f630f4d 100644 (file)
@@ -93,14 +93,14 @@ By specifying the -a switch and analysing sleep, the system-wide events
 for a duration of time can be examined.
 
  $ perf stat -a \
-       -e kmem:mm_page_alloc -e kmem:mm_page_free_direct \
-       -e kmem:mm_pagevec_free \
+       -e kmem:mm_page_alloc -e kmem:mm_page_free \
+       -e kmem:mm_page_free_batched \
        sleep 10
  Performance counter stats for 'sleep 10':
 
            9630  kmem:mm_page_alloc
-           2143  kmem:mm_page_free_direct
-           7424  kmem:mm_pagevec_free
+           2143  kmem:mm_page_free
+           7424  kmem:mm_page_free_batched
 
    10.002577764  seconds time elapsed
 
@@ -119,15 +119,15 @@ basis using set_ftrace_pid.
 Events can be activated and tracked for the duration of a process on a local
 basis using PCL such as follows.
 
-  $ perf stat -e kmem:mm_page_alloc -e kmem:mm_page_free_direct \
-                -e kmem:mm_pagevec_free ./hackbench 10
+  $ perf stat -e kmem:mm_page_alloc -e kmem:mm_page_free \
+                -e kmem:mm_page_free_batched ./hackbench 10
   Time: 0.909
 
     Performance counter stats for './hackbench 10':
 
           17803  kmem:mm_page_alloc
-          12398  kmem:mm_page_free_direct
-           4827  kmem:mm_pagevec_free
+          12398  kmem:mm_page_free
+           4827  kmem:mm_page_free_batched
 
     0.973913387  seconds time elapsed
 
@@ -146,8 +146,8 @@ to know what the standard deviation is. By and large, this is left to the
 performance analyst to do it by hand. In the event that the discrete event
 occurrences are useful to the performance analyst, then perf can be used.
 
-  $ perf stat --repeat 5 -e kmem:mm_page_alloc -e kmem:mm_page_free_direct
-                       -e kmem:mm_pagevec_free ./hackbench 10
+  $ perf stat --repeat 5 -e kmem:mm_page_alloc -e kmem:mm_page_free
+                       -e kmem:mm_page_free_batched ./hackbench 10
   Time: 0.890
   Time: 0.895
   Time: 0.915
@@ -157,8 +157,8 @@ occurrences are useful to the performance analyst, then perf can be used.
    Performance counter stats for './hackbench 10' (5 runs):
 
           16630  kmem:mm_page_alloc         ( +-   3.542% )
-          11486  kmem:mm_page_free_direct   ( +-   4.771% )
-           4730  kmem:mm_pagevec_free       ( +-   2.325% )
+          11486  kmem:mm_page_free         ( +-   4.771% )
+           4730  kmem:mm_page_free_batched  ( +-   2.325% )
 
     0.982653002  seconds time elapsed   ( +-   1.448% )
 
@@ -168,15 +168,15 @@ aggregation of discrete events, then a script would need to be developed.
 Using --repeat, it is also possible to view how events are fluctuating over
 time on a system-wide basis using -a and sleep.
 
-  $ perf stat -e kmem:mm_page_alloc -e kmem:mm_page_free_direct \
-               -e kmem:mm_pagevec_free \
+  $ perf stat -e kmem:mm_page_alloc -e kmem:mm_page_free \
+               -e kmem:mm_page_free_batched \
                -a --repeat 10 \
                sleep 1
   Performance counter stats for 'sleep 1' (10 runs):
 
            1066  kmem:mm_page_alloc         ( +-  26.148% )
-            182  kmem:mm_page_free_direct   ( +-   5.464% )
-            890  kmem:mm_pagevec_free       ( +-  30.079% )
+            182  kmem:mm_page_free          ( +-   5.464% )
+            890  kmem:mm_page_free_batched  ( +-  30.079% )
 
     1.002251757  seconds time elapsed   ( +-   0.005% )
 
@@ -220,8 +220,8 @@ were generating events within the kernel. To begin this sort of analysis, the
 data must be recorded. At the time of writing, this required root:
 
   $ perf record -c 1 \
-       -e kmem:mm_page_alloc -e kmem:mm_page_free_direct \
-       -e kmem:mm_pagevec_free \
+       -e kmem:mm_page_alloc -e kmem:mm_page_free \
+       -e kmem:mm_page_free_batched \
        ./hackbench 10
   Time: 0.894
   [ perf record: Captured and wrote 0.733 MB perf.data (~32010 samples) ]
@@ -260,8 +260,8 @@ noticed that X was generating an insane amount of page allocations so let's look
 at it:
 
   $ perf record -c 1 -f \
-               -e kmem:mm_page_alloc -e kmem:mm_page_free_direct \
-               -e kmem:mm_pagevec_free \
+               -e kmem:mm_page_alloc -e kmem:mm_page_free \
+               -e kmem:mm_page_free_batched \
                -p `pidof X`
 
 This was interrupted after a few seconds and
index e2a4b5287361d25c0800954cbc79eccea88291d3..e1d94bf4056e77ab6df7dbc6d9c72b35f183190a 100644 (file)
@@ -1466,6 +1466,31 @@ is supported; 2 if the processor requires all virtual machines to have
 an RMA, or 1 if the processor can use an RMA but doesn't require it,
 because it supports the Virtual RMA (VRMA) facility.
 
+4.64 KVM_NMI
+
+Capability: KVM_CAP_USER_NMI
+Architectures: x86
+Type: vcpu ioctl
+Parameters: none
+Returns: 0 on success, -1 on error
+
+Queues an NMI on the thread's vcpu.  Note this is well defined only
+when KVM_CREATE_IRQCHIP has not been called, since this is an interface
+between the virtual cpu core and virtual local APIC.  After KVM_CREATE_IRQCHIP
+has been called, this interface is completely emulated within the kernel.
+
+To use this to emulate the LINT1 input with KVM_CREATE_IRQCHIP, use the
+following algorithm:
+
+  - pause the vpcu
+  - read the local APIC's state (KVM_GET_LAPIC)
+  - check whether changing LINT1 will queue an NMI (see the LVT entry for LINT1)
+  - if so, issue KVM_NMI
+  - resume the vcpu
+
+Some guests configure the LINT1 NMI input to cause a panic, aiding in
+debugging.
+
 5. The kvm_run structure
 
 Application code obtains a pointer to the kvm_run structure by
index fc51128071c27c332f65c254806e9b1f0f95992e..fc9082a1477a43f5881a899ad5a33f6a77c2f030 100644 (file)
@@ -1,5 +1,7 @@
 00-INDEX
        - this file.
+convert_drivers_to_kernel_api.txt
+       - how-to for converting old watchdog drivers to the new kernel API.
 hpwdt.txt
        - information on the HP iLO2 NMI watchdog
 pcwd-watchdog.txt
index ae1e90036d0687ca0fbbeae6aa803483336c7d4a..be8119bb15d209f98012c24159d0ff2b78c0eebf 100644 (file)
@@ -163,6 +163,25 @@ Here is a simple example for a watchdog device:
 +};
 
 
+Handle the 'nowayout' feature
+-----------------------------
+
+A few drivers use nowayout statically, i.e. there is no module parameter for it
+and only CONFIG_WATCHDOG_NOWAYOUT determines if the feature is going to be
+used. This needs to be converted by initializing the status variable of the
+watchdog_device like this:
+
+        .status = WATCHDOG_NOWAYOUT_INIT_STATUS,
+
+Most drivers, however, also allow runtime configuration of nowayout, usually
+by adding a module parameter. The conversion for this would be something like:
+
+       watchdog_set_nowayout(&s3c2410_wdd, nowayout);
+
+The module parameter itself needs to stay, everything else related to nowayout
+can go, though. This will likely be some code in open(), close() or write().
+
+
 Register the watchdog device
 ----------------------------
 
index 4f7c894244d2438cb1d2a7d943136e26e46705ec..4b93c28e35c672925d88e5460c0a1c4cb6da7ca7 100644 (file)
@@ -1,6 +1,6 @@
 The Linux WatchDog Timer Driver Core kernel API.
 ===============================================
-Last reviewed: 22-Jul-2011
+Last reviewed: 29-Nov-2011
 
 Wim Van Sebroeck <wim@iguana.be>
 
@@ -142,6 +142,14 @@ bit-operations. The status bits that are defined are:
 * WDOG_NO_WAY_OUT: this bit stores the nowayout setting for the watchdog.
   If this bit is set then the watchdog timer will not be able to stop.
 
+  To set the WDOG_NO_WAY_OUT status bit (before registering your watchdog
+  timer device) you can either:
+  * set it statically in your watchdog_device struct with
+       .status = WATCHDOG_NOWAYOUT_INIT_STATUS,
+    (this will set the value the same as CONFIG_WATCHDOG_NOWAYOUT) or
+  * use the following helper function:
+  static inline void watchdog_set_nowayout(struct watchdog_device *wdd, int nowayout)
+
 Note: The WatchDog Timer Driver Core supports the magic close feature and
 the nowayout feature. To use the magic close feature you must set the
 WDIOF_MAGICCLOSE bit in the options field of the watchdog's info structure.
index 0ae41c9a6c1382ea96fd750b36b0afacda0bd2fe..311b0c4055721888b7415d8ce1cef1eb7280a3a5 100644 (file)
@@ -342,7 +342,7 @@ S:  Supported
 F:     drivers/mfd/adp5520.c
 F:     drivers/video/backlight/adp5520_bl.c
 F:     drivers/leds/leds-adp5520.c
-F:     drivers/gpio/adp5520-gpio.c
+F:     drivers/gpio/gpio-adp5520.c
 F:     drivers/input/keyboard/adp5520-keys.c
 
 ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587)
@@ -351,7 +351,7 @@ L:  device-drivers-devel@blackfin.uclinux.org
 W:     http://wiki.analog.com/ADP5588
 S:     Supported
 F:     drivers/input/keyboard/adp5588-keys.c
-F:     drivers/gpio/adp5588-gpio.c
+F:     drivers/gpio/gpio-adp5588.c
 
 ADP8860 BACKLIGHT DRIVER (ADP8860/ADP8861/ADP8863)
 M:     Michael Hennerich <michael.hennerich@analog.com>
@@ -914,7 +914,6 @@ M:  Lennert Buytenhek <kernel@wantstofly.org>
 M:     Nicolas Pitre <nico@fluxnic.net>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Odd Fixes
-F:     arch/arm/mach-loki/
 F:     arch/arm/mach-kirkwood/
 F:     arch/arm/mach-mv78xx0/
 F:     arch/arm/mach-orion5x/
@@ -1076,8 +1075,8 @@ L:        linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-s5pv210/mach-aquila.c
 F:     arch/arm/mach-s5pv210/mach-goni.c
-F:     arch/arm/mach-exynos4/mach-universal_c210.c
-F:     arch/arm/mach-exynos4/mach-nuri.c
+F:     arch/arm/mach-exynos/mach-universal_c210.c
+F:     arch/arm/mach-exynos/mach-nuri.c
 
 ARM/SAMSUNG S5P SERIES FIMC SUPPORT
 M:     Kyungmin Park <kyungmin.park@samsung.com>
@@ -1105,7 +1104,6 @@ M:        Tomasz Stanislawski <t.stanislaws@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org
 L:     linux-media@vger.kernel.org
 S:     Maintained
-F:     arch/arm/plat-s5p/dev-tv.c
 F:     drivers/media/video/s5p-tv/
 
 ARM/SHMOBILE ARM ARCHITECTURE
@@ -1140,14 +1138,13 @@ L:      linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.mcuos.com
 S:     Maintained
 F:     arch/arm/mach-w90x900/
-F:     arch/arm/mach-nuc93x/
 F:     drivers/input/keyboard/w90p910_keypad.c
 F:     drivers/input/touchscreen/w90p910_ts.c
 F:     drivers/watchdog/nuc900_wdt.c
 F:     drivers/net/ethernet/nuvoton/w90p910_ether.c
 F:     drivers/mtd/nand/nuc900_nand.c
 F:     drivers/rtc/rtc-nuc900.c
-F:     drivers/spi/spi_nuc900.c
+F:     drivers/spi/spi-nuc900.c
 F:     drivers/usb/host/ehci-w90x900.c
 F:     drivers/video/nuc900fb.c
 
@@ -1172,7 +1169,6 @@ L:        linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-ux500/
 F:     drivers/dma/ste_dma40*
-F:     drivers/mfd/ab3550*
 F:     drivers/mfd/abx500*
 F:     drivers/mfd/ab8500*
 F:     drivers/mfd/stmpe*
@@ -1352,7 +1348,7 @@ F:        drivers/net/ethernet/cadence/
 ATMEL SPI DRIVER
 M:     Nicolas Ferre <nicolas.ferre@atmel.com>
 S:     Supported
-F:     drivers/spi/atmel_spi.*
+F:     drivers/spi/spi-atmel.*
 
 ATMEL USBA UDC DRIVER
 M:     Nicolas Ferre <nicolas.ferre@atmel.com>
@@ -1491,7 +1487,7 @@ M:        Sonic Zhang <sonic.zhang@analog.com>
 L:     uclinux-dist-devel@blackfin.uclinux.org
 W:     http://blackfin.uclinux.org
 S:     Supported
-F:     drivers/tty/serial/bfin_5xx.c
+F:     drivers/tty/serial/bfin_uart.c
 
 BLACKFIN WATCHDOG DRIVER
 M:     Mike Frysinger <vapier.adi@gmail.com>
@@ -1621,7 +1617,7 @@ BT8XXGPIO DRIVER
 M:     Michael Buesch <m@bues.ch>
 W:     http://bu3sch.de/btgpio.php
 S:     Maintained
-F:     drivers/gpio/bt8xxgpio.c
+F:     drivers/gpio/gpio-bt8xx.c
 
 BTRFS FILE SYSTEM
 M:     Chris Mason <chris.mason@oracle.com>
@@ -1649,6 +1645,14 @@ T:       git git://git.alsa-project.org/alsa-kernel.git
 S:     Maintained
 F:     sound/pci/oxygen/
 
+C6X ARCHITECTURE
+M:     Mark Salter <msalter@redhat.com>
+M:     Aurelien Jacquiot <a-jacquiot@ti.com>
+L:     linux-c6x-dev@linux-c6x.org
+W:     http://www.linux-c6x.org/wiki/index.php/Main_Page
+S:     Maintained
+F:     arch/c6x/
+
 CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
 M:     David Howells <dhowells@redhat.com>
 L:     linux-cachefs@redhat.com
@@ -1662,7 +1666,7 @@ L:        linux-media@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:     Maintained
 F:     Documentation/video4linux/cafe_ccic
-F:     drivers/media/video/cafe_ccic*
+F:     drivers/media/video/marvell-ccic/
 
 CAIF NETWORK LAYER
 M:     Sjur Braendeland <sjur.brandeland@stericsson.com>
@@ -2100,7 +2104,7 @@ DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER
 L:     netdev@vger.kernel.org
 S:     Orphan
 F:     Documentation/networking/dmfe.txt
-F:     drivers/net/ethernet/tulip/dmfe.c
+F:     drivers/net/ethernet/dec/tulip/dmfe.c
 
 DC390/AM53C974 SCSI driver
 M:     Kurt Garloff <garloff@suse.de>
@@ -2173,6 +2177,13 @@ T:       git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
 S:     Maintained
 F:     drivers/usb/dwc3/
 
+DEVICE FREQUENCY (DEVFREQ)
+M:     MyungJoo Ham <myungjoo.ham@samsung.com>
+M:     Kyungmin Park <kyungmin.park@samsung.com>
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+F:     drivers/devfreq/
+
 DEVICE NUMBER REGISTRY
 M:     Torben Mathiasen <device@lanana.org>
 W:     http://lanana.org/docs/device-list/index.html
@@ -2910,7 +2921,7 @@ GRETH 10/100/1G Ethernet MAC device driver
 M:     Kristoffer Glembo <kristoffer@gaisler.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
-F:     drivers/net/greth*
+F:     drivers/net/ethernet/aeroflex/
 
 GSPCA FINEPIX SUBDRIVER
 M:     Frank Zago <frank@zago.net>
@@ -3860,8 +3871,7 @@ L:        keyrings@linux-nfs.org
 S:     Supported
 F:     Documentation/security/keys-trusted-encrypted.txt
 F:     include/keys/encrypted-type.h
-F:     security/keys/encrypted.c
-F:     security/keys/encrypted.h
+F:     security/keys/encrypted-keys/
 
 KGDB / KDB /debug_core
 M:     Jason Wessel <jason.wessel@windriver.com>
@@ -5123,7 +5133,7 @@ F:        drivers/*/*/picoxcell*
 PIN CONTROL SUBSYSTEM
 M:     Linus Walleij <linus.walleij@linaro.org>
 S:     Maintained
-F:     drivers/pinmux/
+F:     drivers/pinctrl/
 
 PKTCDVD DRIVER
 M:     Peter Osterlund <petero2@telia.com>
@@ -5313,7 +5323,7 @@ T:        git git://git.linaro.org/people/ycmiao/pxa-linux.git
 S:     Maintained
 F:     arch/arm/mach-pxa/
 F:     drivers/pcmcia/pxa2xx*
-F:     drivers/spi/pxa2xx*
+F:     drivers/spi/spi-pxa2xx*
 F:     drivers/usb/gadget/pxa2*
 F:     include/sound/pxa2xx-lib.h
 F:     sound/arm/pxa*
@@ -5795,13 +5805,14 @@ L:      linux-mmc@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
 S:     Maintained
 F:     drivers/mmc/host/sdhci.*
+F:     drivers/mmc/host/sdhci-pltfm.[ch]
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
 M:     Anton Vorontsov <avorontsov@ru.mvista.com>
 L:     linuxppc-dev@lists.ozlabs.org
 L:     linux-mmc@vger.kernel.org
 S:     Maintained
-F:     drivers/mmc/host/sdhci-of.*
+F:     drivers/mmc/host/sdhci-pltfm.[ch]
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) SAMSUNG DRIVER
 M:     Ben Dooks <ben-linux@fluff.org>
@@ -6180,9 +6191,7 @@ M:        Viresh Kumar <viresh.kumar@st.com>
 W:     http://www.st.com/spear
 S:     Maintained
 F:     arch/arm/mach-spear*/clock.c
-F:     arch/arm/mach-spear*/include/mach/clkdev.h
 F:     arch/arm/plat-spear/clock.c
-F:     arch/arm/plat-spear/include/plat/clkdev.h
 F:     arch/arm/plat-spear/include/plat/clock.h
 
 SPEAR PAD MULTIPLEXING SUPPORT
@@ -6306,7 +6315,7 @@ STAGING - LIRC (LINUX INFRARED REMOTE CONTROL) DRIVERS
 M:     Jarod Wilson <jarod@wilsonet.com>
 W:     http://www.lirc.org/
 S:     Odd Fixes
-F:     drivers/staging/lirc/
+F:     drivers/staging/media/lirc/
 
 STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
 M:     Julian Andres Klode <jak@jak-linux.org>
@@ -6342,7 +6351,7 @@ F:        drivers/staging/sm7xx/
 STAGING - SOFTLOGIC 6x10 MPEG CODEC
 M:     Ben Collins <bcollins@bluecherry.net>
 S:     Odd Fixes
-F:     drivers/staging/solo6x10/
+F:     drivers/staging/media/solo6x10/
 
 STAGING - SPEAKUP CONSOLE SPEECH DRIVER
 M:     William Hubbs <w.d.hubbs@gmail.com>
@@ -6645,7 +6654,7 @@ TULIP NETWORK DRIVERS
 M:     Grant Grundler <grundler@parisc-linux.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
-F:     drivers/net/ethernet/tulip/
+F:     drivers/net/ethernet/dec/tulip/
 
 TUN/TAP driver
 M:     Maxim Krasnyansky <maxk@qualcomm.com>
index 3d74801a40157b09e76263ea5af4546e67fedd30..56a4df952fb0f2e68889229133e38b2b34de4716 100644 (file)
@@ -70,10 +70,6 @@ config GENERIC_ISA_DMA
        bool
        default y
 
-config GENERIC_IOMAP
-       bool
-       default n
-
 source "init/Kconfig"
 source "kernel/Kconfig.freezer"
 
@@ -319,6 +315,7 @@ config ISA_DMA_API
 config PCI
        bool
        depends on !ALPHA_JENSEN
+       select GENERIC_PCI_IOMAP
        default y
        help
          Find out whether you have a PCI motherboard. PCI is the name of a
index 246100ef07c2474fa5213163b0e1b899d10198a6..04eea4894ef33bc84c60335fc1e79e31553bd328 100644 (file)
@@ -185,15 +185,3 @@ struct dma_map_ops alpha_noop_ops = {
 
 struct dma_map_ops *dma_ops = &alpha_noop_ops;
 EXPORT_SYMBOL(dma_ops);
-
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
-       return NULL;
-}
-
-void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
-{
-}
-
-EXPORT_SYMBOL(pci_iomap);
-EXPORT_SYMBOL(pci_iounmap);
index c9ab94ee1ca814090249356df8f90b992c59c5c1..f3cae275d3f5e875142c67f89691c891227e2d19 100644 (file)
@@ -508,30 +508,7 @@ sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
        return -EOPNOTSUPP;
 }
 
-/* Create an __iomem token from a PCI BAR.  Copied from lib/iomap.c with
-   no changes, since we don't want the other things in that object file.  */
-
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
-       resource_size_t start = pci_resource_start(dev, bar);
-       resource_size_t len = pci_resource_len(dev, bar);
-       unsigned long flags = pci_resource_flags(dev, bar);
-
-       if (!len || !start)
-               return NULL;
-       if (maxlen && len > maxlen)
-               len = maxlen;
-       if (flags & IORESOURCE_IO)
-               return ioport_map(start, len);
-       if (flags & IORESOURCE_MEM) {
-               /* Not checking IORESOURCE_CACHEABLE because alpha does
-                  not distinguish between ioremap and ioremap_nocache.  */
-               return ioremap(start, len);
-       }
-       return NULL;
-}
-
-/* Destroy that token.  Not copied from lib/iomap.c.  */
+/* Destroy an __iomem token.  Not copied from lib/iomap.c.  */
 
 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
 {
@@ -539,7 +516,6 @@ void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
                iounmap(addr);
 }
 
-EXPORT_SYMBOL(pci_iomap);
 EXPORT_SYMBOL(pci_iounmap);
 
 /* FIXME: Some boxes have multiple ISA bridges! */
index 9d66dfc33a5ac2b2a00ad56dfe130c3032954501..24626b0419ee97e963e68329a8eb6769360b46ea 100644 (file)
@@ -16,6 +16,7 @@ config ARM
        select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
        select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL)
        select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
+       select ARCH_BINFMT_ELF_RANDOMIZE_PIE
        select HAVE_GENERIC_DMA_COHERENT
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZO
@@ -30,6 +31,7 @@ config ARM
        select HAVE_SPARSE_IRQ
        select GENERIC_IRQ_SHOW
        select CPU_PM if (SUSPEND || CPU_IDLE)
+       select GENERIC_PCI_IOMAP
        help
          The ARM series is a line of low-power-consumption RISC chip designs
          licensed by ARM Ltd and targeted at embedded applications and
index 065d100fa63e7d608f49d3ef7f3a629d86cfe057..9275828feb3df24f481934a080572f8d53be3136 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/byteorder.h>
 #include <asm/memory.h>
 #include <asm/system.h>
+#include <asm-generic/pci_iomap.h>
 
 /*
  * ISA I/O bus memory addresses are 1:1 with the physical address.
@@ -306,7 +307,6 @@ extern void ioport_unmap(void __iomem *addr);
 
 struct pci_dev;
 
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
 extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
 
 /*
index f8a682f60a4208defb9daeae1c4d105b4ed09230..6b22b543a83f0f6f393a2fbf9e23324c2bc7b9d1 100644 (file)
@@ -127,7 +127,7 @@ static void da850_evm_m25p80_notify_add(struct mtd_info *mtd)
        size_t retlen;
 
        if (!strcmp(mtd->name, "MAC-Address")) {
-               mtd->read(mtd, 0, ETH_ALEN, &retlen, mac_addr);
+               mtd_read(mtd, 0, ETH_ALEN, &retlen, mac_addr);
                if (retlen == ETH_ALEN)
                        pr_info("Read MAC addr from SPI Flash: %pM\n",
                                mac_addr);
index 35d5dffab7e11c4fa3f4efefe2397d3cf429dc5d..46dfd1ae8f71a6001f90ecdcea237103f906dcc4 100644 (file)
@@ -28,6 +28,7 @@
 #include <plat/board.h>
 #include <plat/mcbsp.h>
 #include <plat/mmc.h>
+#include <plat/iommu.h>
 #include <plat/dma.h>
 #include <plat/omap_hwmod.h>
 #include <plat/omap_device.h>
@@ -211,9 +212,15 @@ static struct platform_device omap3isp_device = {
        .resource       = omap3isp_resources,
 };
 
+static struct omap_iommu_arch_data omap3_isp_iommu = {
+       .name = "isp",
+};
+
 int omap3_init_camera(struct isp_platform_data *pdata)
 {
        omap3isp_device.dev.platform_data = pdata;
+       omap3isp_device.dev.archdata.iommu = &omap3_isp_iommu;
+
        return platform_device_register(&omap3isp_device);
 }
 
index 4f3572a1c684422bbf00e4c2990d472285c7fba8..20bb0545f99208f4126d61afcf53671ba261afb2 100644 (file)
@@ -53,6 +53,7 @@ struct tegra_kbc_platform_data {
        struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO];
        const struct matrix_keymap_data *keymap_data;
 
+       u32 wakeup_key;
        bool wakeup;
        bool use_fn_map;
        bool use_ghost_filter;
index 1cbcd4fc1e17f418b50a03d22839aee72b50a57d..54d8f34fdee5d1d2770be32884011aac3949951f 100644 (file)
@@ -7,8 +7,8 @@ comment "ST-Ericsson Mobile Platform Products"
 config MACH_U300
        bool "U300"
        select PINCTRL
-       select PINMUX_U300
-       select GPIO_U300
+       select PINCTRL_U300
+       select PINCTRL_COH901
 
 comment "ST-Ericsson U300/U330/U335/U365 Feature Selections"
 
index 697930761b3ec1f3b7cb8ce3a31a6557b91fe39a..b4c6926a700cc30c8187768d0a629e590b1d3417 100644 (file)
@@ -1605,15 +1605,15 @@ static struct platform_device pinmux_device = {
 };
 
 /* Pinmux settings */
-static struct pinmux_map u300_pinmux_map[] = {
+static struct pinmux_map __initdata u300_pinmux_map[] = {
        /* anonymous maps for chip power and EMIFs */
-       PINMUX_MAP_PRIMARY_SYS_HOG("POWER", "power"),
-       PINMUX_MAP_PRIMARY_SYS_HOG("EMIF0", "emif0"),
-       PINMUX_MAP_PRIMARY_SYS_HOG("EMIF1", "emif1"),
+       PINMUX_MAP_SYS_HOG("POWER", "pinmux-u300", "power"),
+       PINMUX_MAP_SYS_HOG("EMIF0", "pinmux-u300", "emif0"),
+       PINMUX_MAP_SYS_HOG("EMIF1", "pinmux-u300", "emif1"),
        /* per-device maps for MMC/SD, SPI and UART */
-       PINMUX_MAP_PRIMARY("MMCSD", "mmc0", "mmci"),
-       PINMUX_MAP_PRIMARY("SPI", "spi0", "pl022"),
-       PINMUX_MAP_PRIMARY("UART0", "uart0", "uart0"),
+       PINMUX_MAP("MMCSD", "pinmux-u300", "mmc0", "mmci"),
+       PINMUX_MAP("SPI", "pinmux-u300", "spi0", "pl022"),
+       PINMUX_MAP("UART0", "pinmux-u300", "uart0", "uart0"),
 };
 
 struct u300_mux_hog {
index 0c2b2021951a80e4127e56a9b2805fada9927cb8..bf4c7935aecdf7d6925b24376e0affd719dea5d9 100644 (file)
@@ -9,121 +9,6 @@
 #ifndef __MACH_U300_GPIO_U300_H
 #define __MACH_U300_GPIO_U300_H
 
-/*
- * Individual pin assignments for the B26/S26. Notice that the
- * actual usage of these pins depends on the PAD MUX settings, that
- * is why the same number can potentially appear several times.
- * In the reference design each pin is only used for one purpose.
- * These were determined by inspecting the B26/S26 schematic:
- * 2/1911-ROA 128 1603
- */
-#ifdef CONFIG_MACH_U300_BS2X
-#define U300_GPIO_PIN_UART_RX          0
-#define U300_GPIO_PIN_UART_TX          1
-#define U300_GPIO_PIN_GPIO02           2  /* Unrouted */
-#define U300_GPIO_PIN_GPIO03           3  /* Unrouted */
-#define U300_GPIO_PIN_CAM_SLEEP                4
-#define U300_GPIO_PIN_CAM_REG_EN       5
-#define U300_GPIO_PIN_GPIO06           6  /* Unrouted */
-#define U300_GPIO_PIN_GPIO07           7  /* Unrouted */
-
-#define U300_GPIO_PIN_GPIO08           8  /* Service point SP2321 */
-#define U300_GPIO_PIN_GPIO09           9  /* Service point SP2322 */
-#define U300_GPIO_PIN_PHFSENSE         10 /* Headphone jack sensing */
-#define U300_GPIO_PIN_MMC_CLKRET       11 /* Clock return from MMC/SD card */
-#define U300_GPIO_PIN_MMC_CD           12 /* MMC Card insertion detection */
-#define U300_GPIO_PIN_FLIPSENSE                13 /* Mechanical flip sensing */
-#define U300_GPIO_PIN_GPIO14           14 /* DSP JTAG Port RTCK */
-#define U300_GPIO_PIN_GPIO15           15 /* Unrouted */
-
-#define U300_GPIO_PIN_GPIO16           16 /* Unrouted */
-#define U300_GPIO_PIN_GPIO17           17 /* Unrouted */
-#define U300_GPIO_PIN_GPIO18           18 /* Unrouted */
-#define U300_GPIO_PIN_GPIO19           19 /* Unrouted */
-#define U300_GPIO_PIN_GPIO20           20 /* Unrouted */
-#define U300_GPIO_PIN_GPIO21           21 /* Unrouted */
-#define U300_GPIO_PIN_GPIO22           22 /* Unrouted */
-#define U300_GPIO_PIN_GPIO23           23 /* Unrouted */
-#endif
-
-/*
- * Individual pin assignments for the B330/S330 and B365/S365.
- * Notice that the actual usage of these pins depends on the
- * PAD MUX settings, that is why the same number can potentially
- * appear several times. In the reference design each pin is only
- * used for one purpose. These were determined by inspecting the
- * S365 schematic.
- */
-#if defined(CONFIG_MACH_U300_BS330) || defined(CONFIG_MACH_U300_BS365) || \
-    defined(CONFIG_MACH_U300_BS335)
-#define U300_GPIO_PIN_UART_RX          0
-#define U300_GPIO_PIN_UART_TX          1
-#define U300_GPIO_PIN_UART_CTS         2
-#define U300_GPIO_PIN_UART_RTS         3
-#define U300_GPIO_PIN_CAM_MAIN_STANDBY 4 /* Camera MAIN standby */
-#define U300_GPIO_PIN_GPIO05           5 /* Unrouted */
-#define U300_GPIO_PIN_MS_CD            6 /* Memory Stick Card insertion */
-#define U300_GPIO_PIN_GPIO07           7 /* Test point TP2430 */
-
-#define U300_GPIO_PIN_GPIO08           8 /* Test point TP2437 */
-#define U300_GPIO_PIN_GPIO09           9 /* Test point TP2431 */
-#define U300_GPIO_PIN_GPIO10           10 /* Test point TP2432 */
-#define U300_GPIO_PIN_MMC_CLKRET       11 /* Clock return from MMC/SD card */
-#define U300_GPIO_PIN_MMC_CD           12 /* MMC Card insertion detection */
-#define U300_GPIO_PIN_CAM_SUB_STANDBY  13 /* Camera SUB standby */
-#define U300_GPIO_PIN_GPIO14           14 /* Test point TP2436 */
-#define U300_GPIO_PIN_GPIO15           15 /* Unrouted */
-
-#define U300_GPIO_PIN_GPIO16           16 /* Test point TP2438 */
-#define U300_GPIO_PIN_PHFSENSE         17 /* Headphone jack sensing */
-#define U300_GPIO_PIN_GPIO18           18 /* Test point TP2439 */
-#define U300_GPIO_PIN_GPIO19           19 /* Routed somewhere */
-#define U300_GPIO_PIN_GPIO20           20 /* Unrouted */
-#define U300_GPIO_PIN_GPIO21           21 /* Unrouted */
-#define U300_GPIO_PIN_GPIO22           22 /* Unrouted */
-#define U300_GPIO_PIN_GPIO23           23 /* Unrouted */
-
-#define U300_GPIO_PIN_GPIO24           24 /* Unrouted */
-#define U300_GPIO_PIN_GPIO25           25 /* Unrouted */
-#define U300_GPIO_PIN_GPIO26           26 /* Unrouted */
-#define U300_GPIO_PIN_GPIO27           27 /* Unrouted */
-#define U300_GPIO_PIN_GPIO28           28 /* Unrouted */
-#define U300_GPIO_PIN_GPIO29           29 /* Unrouted */
-#define U300_GPIO_PIN_GPIO30           30 /* Unrouted */
-#define U300_GPIO_PIN_GPIO31           31 /* Unrouted */
-
-#define U300_GPIO_PIN_GPIO32           32 /* Unrouted */
-#define U300_GPIO_PIN_GPIO33           33 /* Unrouted */
-#define U300_GPIO_PIN_GPIO34           34 /* Unrouted */
-#define U300_GPIO_PIN_GPIO35           35 /* Unrouted */
-#define U300_GPIO_PIN_GPIO36           36 /* Unrouted */
-#define U300_GPIO_PIN_GPIO37           37 /* Unrouted */
-#define U300_GPIO_PIN_GPIO38           38 /* Unrouted */
-#define U300_GPIO_PIN_GPIO39           39 /* Unrouted */
-
-#ifdef CONFIG_MACH_U300_BS335
-
-#define U300_GPIO_PIN_GPIO40           40 /* Unrouted */
-#define U300_GPIO_PIN_GPIO41           41 /* Unrouted */
-#define U300_GPIO_PIN_GPIO42           42 /* Unrouted */
-#define U300_GPIO_PIN_GPIO43           43 /* Unrouted */
-#define U300_GPIO_PIN_GPIO44           44 /* Unrouted */
-#define U300_GPIO_PIN_GPIO45           45 /* Unrouted */
-#define U300_GPIO_PIN_GPIO46           46 /* Unrouted */
-#define U300_GPIO_PIN_GPIO47           47 /* Unrouted */
-
-#define U300_GPIO_PIN_GPIO48           48 /* Unrouted */
-#define U300_GPIO_PIN_GPIO49           49 /* Unrouted */
-#define U300_GPIO_PIN_GPIO50           50 /* Unrouted */
-#define U300_GPIO_PIN_GPIO51           51 /* Unrouted */
-#define U300_GPIO_PIN_GPIO52           52 /* Unrouted */
-#define U300_GPIO_PIN_GPIO53           53 /* Unrouted */
-#define U300_GPIO_PIN_GPIO54           54 /* Unrouted */
-#define U300_GPIO_PIN_GPIO55           55 /* Unrouted */
-#endif
-
-#endif
-
 /**
  * enum u300_gpio_variant - the type of U300 GPIO employed
  */
index db3fbfa1d6e97749e43ccb19d060c99a81fadb35..ee78a26707ebe595f00265ccbc3c09a52ea4c1d3 100644 (file)
 #endif
 
 /* Maximum 8*7 GPIO lines */
-#ifdef CONFIG_GPIO_U300
+#ifdef CONFIG_PINCTRL_COH901
 #define IRQ_U300_GPIO_BASE             (U300_VIC_IRQS_END)
 #define IRQ_U300_GPIO_END              (IRQ_U300_GPIO_BASE + 56)
 #else
index 4d482aacc2720e82c14238f69f247985339e93f2..05abd6ad9fabbf2a00daf4aaff273884e71caa2d 100644 (file)
@@ -18,8 +18,8 @@
 #include <linux/slab.h>
 #include <mach/coh901318.h>
 #include <mach/dma_channels.h>
-#include <mach/gpio-u300.h>
 
+#include "u300-gpio.h"
 #include "mmc.h"
 
 static struct mmci_platform_data mmc0_plat_data = {
diff --git a/arch/arm/mach-u300/u300-gpio.h b/arch/arm/mach-u300/u300-gpio.h
new file mode 100644 (file)
index 0000000..847dc25
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Individual pin assignments for the B26/S26. Notice that the
+ * actual usage of these pins depends on the PAD MUX settings, that
+ * is why the same number can potentially appear several times.
+ * In the reference design each pin is only used for one purpose.
+ * These were determined by inspecting the B26/S26 schematic:
+ * 2/1911-ROA 128 1603
+ */
+#ifdef CONFIG_MACH_U300_BS2X
+#define U300_GPIO_PIN_UART_RX          0
+#define U300_GPIO_PIN_UART_TX          1
+#define U300_GPIO_PIN_GPIO02           2  /* Unrouted */
+#define U300_GPIO_PIN_GPIO03           3  /* Unrouted */
+#define U300_GPIO_PIN_CAM_SLEEP                4
+#define U300_GPIO_PIN_CAM_REG_EN       5
+#define U300_GPIO_PIN_GPIO06           6  /* Unrouted */
+#define U300_GPIO_PIN_GPIO07           7  /* Unrouted */
+
+#define U300_GPIO_PIN_GPIO08           8  /* Service point SP2321 */
+#define U300_GPIO_PIN_GPIO09           9  /* Service point SP2322 */
+#define U300_GPIO_PIN_PHFSENSE         10 /* Headphone jack sensing */
+#define U300_GPIO_PIN_MMC_CLKRET       11 /* Clock return from MMC/SD card */
+#define U300_GPIO_PIN_MMC_CD           12 /* MMC Card insertion detection */
+#define U300_GPIO_PIN_FLIPSENSE                13 /* Mechanical flip sensing */
+#define U300_GPIO_PIN_GPIO14           14 /* DSP JTAG Port RTCK */
+#define U300_GPIO_PIN_GPIO15           15 /* Unrouted */
+
+#define U300_GPIO_PIN_GPIO16           16 /* Unrouted */
+#define U300_GPIO_PIN_GPIO17           17 /* Unrouted */
+#define U300_GPIO_PIN_GPIO18           18 /* Unrouted */
+#define U300_GPIO_PIN_GPIO19           19 /* Unrouted */
+#define U300_GPIO_PIN_GPIO20           20 /* Unrouted */
+#define U300_GPIO_PIN_GPIO21           21 /* Unrouted */
+#define U300_GPIO_PIN_GPIO22           22 /* Unrouted */
+#define U300_GPIO_PIN_GPIO23           23 /* Unrouted */
+#endif
+
+/*
+ * Individual pin assignments for the B330/S330 and B365/S365.
+ * Notice that the actual usage of these pins depends on the
+ * PAD MUX settings, that is why the same number can potentially
+ * appear several times. In the reference design each pin is only
+ * used for one purpose. These were determined by inspecting the
+ * S365 schematic.
+ */
+#if defined(CONFIG_MACH_U300_BS330) || defined(CONFIG_MACH_U300_BS365) || \
+    defined(CONFIG_MACH_U300_BS335)
+#define U300_GPIO_PIN_UART_RX          0
+#define U300_GPIO_PIN_UART_TX          1
+#define U300_GPIO_PIN_UART_CTS         2
+#define U300_GPIO_PIN_UART_RTS         3
+#define U300_GPIO_PIN_CAM_MAIN_STANDBY 4 /* Camera MAIN standby */
+#define U300_GPIO_PIN_GPIO05           5 /* Unrouted */
+#define U300_GPIO_PIN_MS_CD            6 /* Memory Stick Card insertion */
+#define U300_GPIO_PIN_GPIO07           7 /* Test point TP2430 */
+
+#define U300_GPIO_PIN_GPIO08           8 /* Test point TP2437 */
+#define U300_GPIO_PIN_GPIO09           9 /* Test point TP2431 */
+#define U300_GPIO_PIN_GPIO10           10 /* Test point TP2432 */
+#define U300_GPIO_PIN_MMC_CLKRET       11 /* Clock return from MMC/SD card */
+#define U300_GPIO_PIN_MMC_CD           12 /* MMC Card insertion detection */
+#define U300_GPIO_PIN_CAM_SUB_STANDBY  13 /* Camera SUB standby */
+#define U300_GPIO_PIN_GPIO14           14 /* Test point TP2436 */
+#define U300_GPIO_PIN_GPIO15           15 /* Unrouted */
+
+#define U300_GPIO_PIN_GPIO16           16 /* Test point TP2438 */
+#define U300_GPIO_PIN_PHFSENSE         17 /* Headphone jack sensing */
+#define U300_GPIO_PIN_GPIO18           18 /* Test point TP2439 */
+#define U300_GPIO_PIN_GPIO19           19 /* Routed somewhere */
+#define U300_GPIO_PIN_GPIO20           20 /* Unrouted */
+#define U300_GPIO_PIN_GPIO21           21 /* Unrouted */
+#define U300_GPIO_PIN_GPIO22           22 /* Unrouted */
+#define U300_GPIO_PIN_GPIO23           23 /* Unrouted */
+
+#define U300_GPIO_PIN_GPIO24           24 /* Unrouted */
+#define U300_GPIO_PIN_GPIO25           25 /* Unrouted */
+#define U300_GPIO_PIN_GPIO26           26 /* Unrouted */
+#define U300_GPIO_PIN_GPIO27           27 /* Unrouted */
+#define U300_GPIO_PIN_GPIO28           28 /* Unrouted */
+#define U300_GPIO_PIN_GPIO29           29 /* Unrouted */
+#define U300_GPIO_PIN_GPIO30           30 /* Unrouted */
+#define U300_GPIO_PIN_GPIO31           31 /* Unrouted */
+
+#define U300_GPIO_PIN_GPIO32           32 /* Unrouted */
+#define U300_GPIO_PIN_GPIO33           33 /* Unrouted */
+#define U300_GPIO_PIN_GPIO34           34 /* Unrouted */
+#define U300_GPIO_PIN_GPIO35           35 /* Unrouted */
+#define U300_GPIO_PIN_GPIO36           36 /* Unrouted */
+#define U300_GPIO_PIN_GPIO37           37 /* Unrouted */
+#define U300_GPIO_PIN_GPIO38           38 /* Unrouted */
+#define U300_GPIO_PIN_GPIO39           39 /* Unrouted */
+
+#ifdef CONFIG_MACH_U300_BS335
+
+#define U300_GPIO_PIN_GPIO40           40 /* Unrouted */
+#define U300_GPIO_PIN_GPIO41           41 /* Unrouted */
+#define U300_GPIO_PIN_GPIO42           42 /* Unrouted */
+#define U300_GPIO_PIN_GPIO43           43 /* Unrouted */
+#define U300_GPIO_PIN_GPIO44           44 /* Unrouted */
+#define U300_GPIO_PIN_GPIO45           45 /* Unrouted */
+#define U300_GPIO_PIN_GPIO46           46 /* Unrouted */
+#define U300_GPIO_PIN_GPIO47           47 /* Unrouted */
+
+#define U300_GPIO_PIN_GPIO48           48 /* Unrouted */
+#define U300_GPIO_PIN_GPIO49           49 /* Unrouted */
+#define U300_GPIO_PIN_GPIO50           50 /* Unrouted */
+#define U300_GPIO_PIN_GPIO51           51 /* Unrouted */
+#define U300_GPIO_PIN_GPIO52           52 /* Unrouted */
+#define U300_GPIO_PIN_GPIO53           53 /* Unrouted */
+#define U300_GPIO_PIN_GPIO54           54 /* Unrouted */
+#define U300_GPIO_PIN_GPIO55           55 /* Unrouted */
+#endif
+
+#endif
index 430df1a5978d04d198a88f2448c8a2de67c7e963..e62956e1203094bc6daa022486b93bd490c70057 100644 (file)
@@ -35,27 +35,6 @@ EXPORT_SYMBOL(pcibios_min_mem);
 unsigned int pci_flags = PCI_REASSIGN_ALL_RSRC;
 EXPORT_SYMBOL(pci_flags);
 
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
-       resource_size_t start = pci_resource_start(dev, bar);
-       resource_size_t len   = pci_resource_len(dev, bar);
-       unsigned long flags = pci_resource_flags(dev, bar);
-
-       if (!len || !start)
-               return NULL;
-       if (maxlen && len > maxlen)
-               len = maxlen;
-       if (flags & IORESOURCE_IO)
-               return ioport_map(start, len);
-       if (flags & IORESOURCE_MEM) {
-               if (flags & IORESOURCE_CACHEABLE)
-                       return ioremap(start, len);
-               return ioremap_nocache(start, len);
-       }
-       return NULL;
-}
-EXPORT_SYMBOL(pci_iomap);
-
 void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
 {
        if ((unsigned long)addr >= VMALLOC_START &&
index a1d79ee192503e3c14fd19fddb1bd24af933e0ad..88be3e628b339f49e6943f6fb6ad792546730aa6 100644 (file)
@@ -111,6 +111,32 @@ struct iommu_platform_data {
        u32 da_end;
 };
 
+/**
+ * struct iommu_arch_data - omap iommu private data
+ * @name: name of the iommu device
+ * @iommu_dev: handle of the iommu device
+ *
+ * This is an omap iommu private data object, which binds an iommu user
+ * to its iommu device. This object should be placed at the iommu user's
+ * dev_archdata so generic IOMMU API can be used without having to
+ * utilize omap-specific plumbing anymore.
+ */
+struct omap_iommu_arch_data {
+       const char *name;
+       struct omap_iommu *iommu_dev;
+};
+
+/**
+ * dev_to_omap_iommu() - retrieves an omap iommu object from a user device
+ * @dev: iommu client device
+ */
+static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
+{
+       struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+
+       return arch_data->iommu_dev;
+}
+
 /* IOMMU errors */
 #define OMAP_IOMMU_ERR_TLB_MISS                (1 << 0)
 #define OMAP_IOMMU_ERR_TRANS_FAULT     (1 << 1)
@@ -163,8 +189,8 @@ extern int omap_iommu_set_isr(const char *name,
                                    void *priv),
                         void *isr_priv);
 
-extern void omap_iommu_save_ctx(struct omap_iommu *obj);
-extern void omap_iommu_restore_ctx(struct omap_iommu *obj);
+extern void omap_iommu_save_ctx(struct device *dev);
+extern void omap_iommu_restore_ctx(struct device *dev);
 
 extern int omap_install_iommu_arch(const struct iommu_functions *ops);
 extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops);
@@ -176,6 +202,5 @@ extern ssize_t
 omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
 extern size_t
 omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
-struct device *omap_find_iommu_device(const char *name);
 
 #endif /* __MACH_IOMMU_H */
index 6af1a91c0f36311996f91f2328cfe84da770064a..498e57cda6cd4babfebd0c474a4a71a101ff064b 100644 (file)
@@ -72,18 +72,18 @@ struct iovm_struct {
 #define IOVMF_DA_FIXED         (1 << (4 + IOVMF_SW_SHIFT))
 
 
-extern struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da);
+extern struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da);
 extern u32
-omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
+omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
                        const struct sg_table *sgt, u32 flags);
 extern struct sg_table *omap_iommu_vunmap(struct iommu_domain *domain,
-                               struct omap_iommu *obj, u32 da);
+                               struct device *dev, u32 da);
 extern u32
-omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj,
+omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev,
                                u32 da, size_t bytes, u32 flags);
 extern void
-omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
+omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
                                const u32 da);
-extern void *omap_da_to_va(struct omap_iommu *obj, u32 da);
+extern void *omap_da_to_va(struct device *dev, u32 da);
 
 #endif /* __IOMMU_MMAP_H */
index b59a6483cd8a85fa3c5b8253f69a3a22c7753df5..c81ace332a1ea102721e50a3ba2162381193c9ef 100644 (file)
 #ifndef __PLAT_SAMSUNG_KEYPAD_H
 #define __PLAT_SAMSUNG_KEYPAD_H
 
-#include <linux/input/matrix_keypad.h>
-
-#define SAMSUNG_MAX_ROWS       8
-#define SAMSUNG_MAX_COLS       8
-
-/**
- * struct samsung_keypad_platdata - Platform device data for Samsung Keypad.
- * @keymap_data: pointer to &matrix_keymap_data.
- * @rows: number of keypad row supported.
- * @cols: number of keypad col supported.
- * @no_autorepeat: disable key autorepeat.
- * @wakeup: controls whether the device should be set up as wakeup source.
- * @cfg_gpio: configure the GPIO.
- *
- * Initialisation data specific to either the machine or the platform
- * for the device driver to use or call-back when configuring gpio.
- */
-struct samsung_keypad_platdata {
-       const struct matrix_keymap_data *keymap_data;
-       unsigned int rows;
-       unsigned int cols;
-       bool no_autorepeat;
-       bool wakeup;
-
-       void (*cfg_gpio)(unsigned int rows, unsigned int cols);
-};
+#include <linux/input/samsung-keypad.h>
 
 /**
  * samsung_keypad_set_platdata - Set platform data for Samsung Keypad device.
index 5edcb58d6f737f49a69a8dcf9784c2c2935e1dea..0b7039cf07ff18b665e868bb7dc0015e519b930c 100644 (file)
@@ -80,7 +80,7 @@ CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
index 2e549572d4f59c5ff73ab137c56fd13a4b312dd2..5553205d7cbe962f8aee6d386383e3f28b3cdf24 100644 (file)
@@ -97,7 +97,7 @@ CONFIG_I2C_CHARDEV=m
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_WATCHDOG=y
index ad0881ba30af0ec5bc90974d4c518249b72d33df..d95658fc31276d61d99d9da002fdd0cab45dba74 100644 (file)
@@ -68,7 +68,7 @@ CONFIG_I2C_ALGOBIT=y
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=400
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
index 8465b3e6b8628a23b5961471c46de0adf617d1db..498f64a87050fd2572b1fa29f14042bc5477ba1e 100644 (file)
@@ -105,7 +105,7 @@ CONFIG_I2C_CHARDEV=m
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
index 5e7321b26040665fb4a6a3b74db5c82208af2107..72e0317565ef407bbd613e3057717482f0ba089d 100644 (file)
@@ -99,7 +99,7 @@ CONFIG_I2C_CHARDEV=m
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
index a7eb54bf3089916359b3f178a3e26fc5980df1be..2f075e0b26244ea9b3a136377b11580f9999f20b 100644 (file)
@@ -81,7 +81,7 @@ CONFIG_SERIAL_BFIN_CONSOLE=y
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
index b90d3792ed5209a46b2cd4342398d18e28bf531f..ab38a82597b2868ebe129b416f632554c24cbcac 100644 (file)
@@ -84,7 +84,7 @@ CONFIG_I2C=m
 CONFIG_I2C_CHARDEV=m
 CONFIG_I2C_GPIO=m
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
index 005362537a7b4c3f4d70dc46f864bd78feafcbf3..5c802d6bbbc00f54d86306378e983af9c8c9cc23 100644 (file)
@@ -94,7 +94,7 @@ CONFIG_I2C_CHARDEV=m
 CONFIG_I2C_BLACKFIN_TWI=m
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
index 580bf4296a1456ef7869a8884ebd49e40c1b1351..972aa6263ad0a9e7163c7b3b191afceef4282d70 100644 (file)
@@ -101,7 +101,7 @@ CONFIG_I2C=m
 CONFIG_I2C_BLACKFIN_TWI=m
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
index 0e6d841b5d0147d21344e2fe4ce8dad446df05ea..7a1e3bf2b04f97c78f1e984a04c74feaeb6c0e75 100644 (file)
@@ -113,7 +113,7 @@ CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
index 77a27e31d6d1d93dc3fd5b499b4f136dd787b0a9..0fdc4ecaa5317a4de2f1850251c3d618db03a96e 100644 (file)
@@ -85,7 +85,7 @@ CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_PCA_PLATFORM=y
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_SPI_SPIDEV=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
index f5ed34e12e0ca9928e3b7b1a9bcbd339a076e6bd..78adbbf3982619b1309eb9bff3571593399d4317 100644 (file)
@@ -84,7 +84,7 @@ CONFIG_SERIAL_BFIN_CONSOLE=y
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
index d7ff2aee3fbc62f73f855875611728ebbf3295f6..d3cd0f561c842cb6f3307580c40a1c63eb5ecdce 100644 (file)
@@ -86,7 +86,7 @@ CONFIG_SERIAL_BFIN_CONSOLE=y
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
index 85014319672c1fb04bae3de7dc7b9a8c7efbb101..7b982d0502adddfb1f8142f9550944985b5725c8 100644 (file)
@@ -80,7 +80,7 @@ CONFIG_I2C=m
 CONFIG_I2C_CHARDEV=m
 CONFIG_I2C_GPIO=m
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_SPI_SPIDEV=m
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
index dbf750cd2db85bf3c3713bde6f36d61108cb8d73..c280a50e79435a8a590631e915008c0036146324 100644 (file)
@@ -88,7 +88,7 @@ CONFIG_I2C_CHARDEV=m
 CONFIG_I2C_BLACKFIN_TWI=m
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_WATCHDOG=y
index 07ffbdae34eeda1e3f3b1c5be68c30d67ddfdf20..c940a1e3ab3685fa85f6db0fd132a5d928d484f8 100644 (file)
@@ -57,7 +57,7 @@ CONFIG_SERIAL_BFIN_CONSOLE=y
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 # CONFIG_HWMON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_MMC=y
index 707cbf8a25903b008b306616405255f5100c8f26..2e47df77490f29c3f42add408b847ada13da4370 100644 (file)
@@ -78,7 +78,7 @@ CONFIG_SERIAL_BFIN_UART1=y
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_USB_GADGET=m
index 4596935eadacc3e1f420a68188ea74cd1f17e8f3..6da629ffc2f10954ffdca933bacd690a08819fef 100644 (file)
@@ -72,7 +72,7 @@ CONFIG_SERIAL_BFIN_UART1=y
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_USB_GADGET=y
index 9f1d08401fcaacdf6d53f661da44e2cee641b66e..349922be01f35c489deacff6ac6cb9b22cc3259a 100644 (file)
@@ -89,7 +89,7 @@ CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_BFIN_WDT=y
index 6c7b21585a43d33032b43ee9421186245122d5df..0456deaa2d6fbc94b62bb7e6a5feb4b815e9a07f 100644 (file)
@@ -78,7 +78,7 @@ CONFIG_SERIAL_BFIN_CONSOLE=y
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_USB_GADGET=m
index b192acfae38609d51e8834f19037dca3168d4de0..89162d0fff9ea6325be99d8a45a21f0d8c45c224 100644 (file)
@@ -78,7 +78,7 @@ CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_SPI_SPIDEV=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
index 06e9f497faed2f0a7f68ec61cd6c8ca74f9527e5..a26436bf50fff23db967e47b2e2ba3c5c765d5df 100644 (file)
@@ -68,7 +68,7 @@ CONFIG_SERIAL_BFIN_CONSOLE=y
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_SPI_SPIDEV=y
 CONFIG_WATCHDOG=y
 CONFIG_SOUND=m
index 5e797cf72043f757be28f019a62a1ed669688e9f..647991514ac9b8fc98065b1a6878b1b7c7e72908 100644 (file)
@@ -70,7 +70,7 @@ CONFIG_SERIAL_BFIN_CONSOLE=y
 # CONFIG_LEGACY_PTYS is not set
 CONFIG_HW_RANDOM=y
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_USB=y
index a566a2fe6b9b7478176629d37a9786dc5b2547ab..8fd9b446d6583f595e2b44ec57a7c6f771581c9f 100644 (file)
@@ -84,7 +84,7 @@ CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
index 12e66cd7cdaad79907e6187ad27cd46fed62e9ab..0520c160230de103d8b85b907c48f64abab4e2b3 100644 (file)
@@ -71,7 +71,7 @@ CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_HWMON=m
 CONFIG_WATCHDOG=y
 CONFIG_BFIN_WDT=y
index d496ae9a39b0c9f1f2c0339e124fc52f4721b2d5..e4ed865b885e3a0279f53250afe78f17dcfefd35 100644 (file)
@@ -92,7 +92,7 @@ CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
index 65f642167a50351bf95fa68e4481465ebf76efc2..c1f45f15295cdf5c9fbf47c1f945d10cfe29f15c 100644 (file)
@@ -70,7 +70,7 @@ CONFIG_SERIAL_BFIN_UART1=y
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_BFIN_WDT=y
index ecacdf34768b593b0e43e91c2314927cde723e83..68bcc3d119b678fac2a9f579bb5f4cce7127769e 100644 (file)
@@ -51,9 +51,6 @@ struct bfin_serial_port {
 #elif ANOMALY_05000363
        unsigned int anomaly_threshold;
 #endif
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
-       int scts;
-#endif
 #if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
        defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
        int cts_pin;
index 05043786da2199cbb302563a0c1e3e62295852c2..e349631c8299fa87f7bfca5fca507d0520718de8 100644 (file)
@@ -14,6 +14,9 @@ struct blackfin_cpudata {
        struct cpu cpu;
        unsigned int imemctl;
        unsigned int dmemctl;
+#ifdef CONFIG_SMP
+       struct task_struct *idle;
+#endif
 };
 
 DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data);
index af6c0aa79bae9435ea5dff4f92f2c945051352aa..dc3d144b4bb5930a396f73d9bce48f67ca1c6365 100644 (file)
@@ -37,7 +37,7 @@ extern unsigned long dcache_invld_count[NR_CPUS];
 #endif
 
 void smp_icache_flush_range_others(unsigned long start,
-                                  unsigned long end);
+                                       unsigned long end);
 #ifdef CONFIG_HOTPLUG_CPU
 void coreb_die(void);
 void cpu_die(void);
@@ -46,4 +46,7 @@ int __cpu_disable(void);
 int __cpu_die(unsigned int cpu);
 #endif
 
+void smp_timer_broadcast(const struct cpumask *mask);
+
+
 #endif /* !__ASM_BLACKFIN_SMP_H */
index dfa2525a442d76318044acc65f51bb07b683b863..d6102c86d037601adcb96d6745d5dc8eadfcf624 100644 (file)
@@ -828,10 +828,18 @@ static inline int __init get_mem_size(void)
        u32 ddrctl = bfin_read_EBIU_DDRCTL1();
        int ret = 0;
        switch (ddrctl & 0xc0000) {
-               case DEVSZ_64:  ret = 64 / 8;
-               case DEVSZ_128: ret = 128 / 8;
-               case DEVSZ_256: ret = 256 / 8;
-               case DEVSZ_512: ret = 512 / 8;
+       case DEVSZ_64:
+               ret = 64 / 8;
+               break;
+       case DEVSZ_128:
+               ret = 128 / 8;
+               break;
+       case DEVSZ_256:
+               ret = 256 / 8;
+               break;
+       case DEVSZ_512:
+               ret = 512 / 8;
+               break;
        }
        switch (ddrctl & 0x30000) {
                case DEVWD_4:  ret *= 2;
index 1bcf3a3c57d83db8b17875a65a5bb9e5b3f7a331..d98f2d69b0c4a3d29dd812c1112a192e66721940 100644 (file)
@@ -219,7 +219,7 @@ static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt)
 
 #if defined(CONFIG_TICKSOURCE_CORETMR)
 /* per-cpu local core timer */
-static DEFINE_PER_CPU(struct clock_event_device, coretmr_events);
+DEFINE_PER_CPU(struct clock_event_device, coretmr_events);
 
 static int bfin_coretmr_set_next_event(unsigned long cycles,
                                struct clock_event_device *evt)
@@ -281,6 +281,7 @@ void bfin_coretmr_init(void)
 #ifdef CONFIG_CORE_TIMER_IRQ_L1
 __attribute__((l1_text))
 #endif
+
 irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id)
 {
        int cpu = smp_processor_id();
@@ -306,6 +307,11 @@ void bfin_coretmr_clockevent_init(void)
        unsigned int cpu = smp_processor_id();
        struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
 
+#ifdef CONFIG_SMP
+       evt->broadcast = smp_timer_broadcast;
+#endif
+
+
        evt->name = "bfin_core_timer";
        evt->rating = 350;
        evt->irq = -1;
index d1c0c0cff3efdb2f68b91edb6a6ad892f10d0a33..a2d96d31bbf16a0764b066d7d08a2201374c5fd9 100644 (file)
@@ -61,7 +61,7 @@ static struct physmap_flash_data ezbrd_flash_data = {
 
 static struct resource ezbrd_flash_resource = {
        .start = 0x20000000,
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        .end   = 0x202fffff,
 #else
        .end   = 0x203fffff,
@@ -122,6 +122,8 @@ static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
 #if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
        .phy_mask = 0xfff7, /* Only probe the port phy connect to the on chip MAC */
 #endif
+       .vlan1_mask = 1,
+       .vlan2_mask = 2,
 };
 
 static struct platform_device bfin_mii_bus = {
@@ -292,7 +294,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
 };
 
 /* SPI controller data */
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* SPI (0) */
 static struct bfin5xx_spi_master bfin_spi0_info = {
        .num_chipselect = 6,
@@ -715,7 +717,7 @@ static struct platform_device *stamp_devices[] __initdata = {
 #endif
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
        &bfin_spi1_device,
 #endif
@@ -777,7 +779,7 @@ static int __init ezbrd_init(void)
        spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
        /* setup BF518-EZBRD GPIO pin PG11 to AMS2, PG15 to AMS3. */
        peripheral_request(P_AMS2, "ParaFlash");
-#if !defined(CONFIG_SPI_BFIN) && !defined(CONFIG_SPI_BFIN_MODULE)
+#if !defined(CONFIG_SPI_BFIN5XX) && !defined(CONFIG_SPI_BFIN5XX_MODULE)
        peripheral_request(P_AMS3, "ParaFlash");
 #endif
        return 0;
index 5470bf89e52e1147dcfcec19542dd18d2df7716d..f271310f739d4107241245218aeebaad21dd4308 100644 (file)
@@ -228,7 +228,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
 };
 
 /* SPI controller data */
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* SPI (0) */
 static struct bfin5xx_spi_master bfin_spi0_info = {
        .num_chipselect = 6,
@@ -635,7 +635,7 @@ static struct platform_device *tcm_devices[] __initdata = {
        &bfin_mac_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
        &bfin_spi1_device,
 #endif
index 5bc6938157ad831638ca4661cb61b68c18339e36..c8d5d2b7c732fd78f2a678d2fd09315d3b1968f7 100644 (file)
@@ -334,7 +334,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
 #endif
 };
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* SPI controller data */
 static struct bfin5xx_spi_master bfin_spi0_info = {
        .num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS,
@@ -744,7 +744,7 @@ static struct platform_device *stamp_devices[] __initdata = {
        &bfin_mac_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
index cd289698b4dd73575682260b10ba615bf35bb71e..7330607856e96061e9badb1ea24b712b1d1d14da 100644 (file)
@@ -444,7 +444,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
 #endif
 };
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* SPI controller data */
 static struct bfin5xx_spi_master bfin_spi0_info = {
        .num_chipselect = 8,
@@ -893,7 +893,7 @@ static struct platform_device *cmbf527_devices[] __initdata = {
        &net2272_bfin_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
index 9f792eafd1ccf604c7b7177d3b5766e691b0c4e2..db3ecfce8306c08fb3b660be5ff509a4b2fdfc4b 100644 (file)
@@ -371,7 +371,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
 #endif
 };
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* SPI controller data */
 static struct bfin5xx_spi_master bfin_spi0_info = {
        .num_chipselect = 8,
@@ -776,7 +776,7 @@ static struct platform_device *stamp_devices[] __initdata = {
        &bfin_mac_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
index 3ecafff5d2ef753416692b623324e800e0925437..dfdd8e6bac7296d02f09ff6a0b6f19ea4f42caf2 100644 (file)
@@ -664,7 +664,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
 #endif
 };
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* SPI controller data */
 static struct bfin5xx_spi_master bfin_spi0_info = {
        .num_chipselect = 8,
@@ -1189,7 +1189,7 @@ static struct platform_device *stamp_devices[] __initdata = {
        &net2272_bfin_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
index 3a92c4318d2deaa392b2cef5693b4dd46f67170a..360e97fc529363d53193c34decc623d27b31a34e 100644 (file)
@@ -448,7 +448,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
 #endif
 };
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* SPI controller data */
 static struct bfin5xx_spi_master bfin_spi0_info = {
        .num_chipselect = EXP_GPIO_SPISEL_BASE + 8 + MAX_CTRL_CS,
@@ -831,7 +831,7 @@ static struct platform_device *tll6527m_devices[] __initdata = {
        &bfin_mac_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
index 47cadd316e764484ac9e728d84ccfead6ee5b53f..6cb7b3ed9b3de48d276324ee81d9277d7102a72e 100644 (file)
@@ -125,7 +125,7 @@ static struct platform_device net2272_bfin_device = {
 };
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* all SPI peripherals info goes here */
 
 #if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
@@ -398,7 +398,7 @@ static struct platform_device *h8606_devices[] __initdata = {
        &net2272_bfin_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
@@ -428,7 +428,7 @@ static int __init H8606_init(void)
        printk(KERN_INFO "HV Sistemas H8606 board support by http://www.hvsistemas.com\n");
        printk(KERN_INFO "%s(): registering device resources\n", __func__);
        platform_add_devices(h8606_devices, ARRAY_SIZE(h8606_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
 #endif
        return 0;
index 18817d57c7a15b5cf0e36bc806ef2752e91746fe..de44a3765e595ea4aeb58ce922d3248a299cacc6 100644 (file)
@@ -146,7 +146,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
 #endif
 };
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* SPI (0) */
 static struct resource bfin_spi0_resource[] = {
        [0] = {
@@ -422,7 +422,7 @@ static struct platform_device *stamp_devices[] __initdata = {
 #endif
 
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
index 2c8f30ef6a7b2ab4b749c337814010cd9dc2fe10..fe47e048c4e673c7a055b02304270e199c2ec7a5 100644 (file)
@@ -29,7 +29,7 @@
  */
 const char bfin_board_name[] = "Bluetechnix CM BF533";
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* all SPI peripherals info goes here */
 #if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
 static struct mtd_partition bfin_spi_flash_partitions[] = {
@@ -536,7 +536,7 @@ static struct platform_device *cm_bf533_devices[] __initdata = {
        &net2272_bfin_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
@@ -549,7 +549,7 @@ static int __init cm_bf533_init(void)
 {
        printk(KERN_INFO "%s(): registering device resources\n", __func__);
        platform_add_devices(cm_bf533_devices, ARRAY_SIZE(cm_bf533_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
 #endif
        return 0;
index 144556e14499e6fc0a7141b8d368cd29650b5b15..07811c209b9d38f020180d44045ef497a2f4095c 100644 (file)
@@ -245,7 +245,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
 #endif
 };
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* SPI (0) */
 static struct resource bfin_spi0_resource[] = {
        [0] = {
@@ -484,7 +484,7 @@ static struct platform_device *ezkit_devices[] __initdata = {
        &smc91x_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
index b597d4e50d58e74e7c29e4fd2b37658bbd12ec49..e303dae4e2d96abb8c8f0a86a0f66670ab4c6b5f 100644 (file)
@@ -104,7 +104,7 @@ static struct platform_device dm9000_device2 = {
 #endif
 
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* all SPI peripherals info goes here */
 
 #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -270,7 +270,7 @@ static struct platform_device *ip0x_devices[] __initdata = {
 #endif
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &spi_bfin_master_device,
 #endif
 
index 2afd02e14bd1f317b053f0296b35161d4b71d4d3..ce88a7165b62a99dfbea8adcfbe59bc356760209 100644 (file)
@@ -219,9 +219,10 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
        },
 #endif
 
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
+#if defined(CONFIG_SND_BF5XX_SOC_AD1836) || \
+       defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
        {
-               .modalias = "ad183x",
+               .modalias = "ad1836",
                .max_speed_hz = 3125000,     /* max spi clock (SCK) speed in HZ */
                .bus_num = 0,
                .chip_select = 4,
@@ -251,7 +252,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
 #endif
 };
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* SPI (0) */
 static struct resource bfin_spi0_resource[] = {
        [0] = {
@@ -471,7 +472,7 @@ static struct i2c_gpio_platform_data i2c_gpio_data = {
        .scl_pin                = GPIO_PF3,
        .sda_is_open_drain      = 0,
        .scl_is_open_drain      = 0,
-       .udelay                 = 40,
+       .udelay                 = 10,
 };
 
 static struct platform_device i2c_gpio_device = {
@@ -540,27 +541,150 @@ static struct platform_device bfin_dpmc = {
        },
 };
 
+#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
+       defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) \
+       || defined(CONFIG_SND_BF5XX_AC97) || \
+       defined(CONFIG_SND_BF5XX_AC97_MODULE)
+
+#include <asm/bfin_sport.h>
+
+#define SPORT_REQ(x) \
+       [x] = {P_SPORT##x##_TFS, P_SPORT##x##_DTPRI, P_SPORT##x##_TSCLK, \
+               P_SPORT##x##_RFS, P_SPORT##x##_DRPRI, P_SPORT##x##_RSCLK, 0}
+
+static const u16 bfin_snd_pin[][7] = {
+       SPORT_REQ(0),
+       SPORT_REQ(1),
+};
+
+static struct bfin_snd_platform_data bfin_snd_data[] = {
+       {
+               .pin_req = &bfin_snd_pin[0][0],
+       },
+       {
+               .pin_req = &bfin_snd_pin[1][0],
+       },
+};
+
+#define BFIN_SND_RES(x) \
+       [x] = { \
+               { \
+                       .start = SPORT##x##_TCR1, \
+                       .end = SPORT##x##_TCR1, \
+                       .flags = IORESOURCE_MEM \
+               }, \
+               { \
+                       .start = CH_SPORT##x##_RX, \
+                       .end = CH_SPORT##x##_RX, \
+                       .flags = IORESOURCE_DMA, \
+               }, \
+               { \
+                       .start = CH_SPORT##x##_TX, \
+                       .end = CH_SPORT##x##_TX, \
+                       .flags = IORESOURCE_DMA, \
+               }, \
+               { \
+                       .start = IRQ_SPORT##x##_ERROR, \
+                       .end = IRQ_SPORT##x##_ERROR, \
+                       .flags = IORESOURCE_IRQ, \
+               } \
+       }
+
+static struct resource bfin_snd_resources[][4] = {
+       BFIN_SND_RES(0),
+       BFIN_SND_RES(1),
+};
+#endif
+
 #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
+static struct platform_device bfin_i2s_pcm = {
+       .name = "bfin-i2s-pcm-audio",
+       .id = -1,
+};
+#endif
+
+#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
+static struct platform_device bfin_tdm_pcm = {
+       .name = "bfin-tdm-pcm-audio",
+       .id = -1,
+};
+#endif
+
+#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
+static struct platform_device bfin_ac97_pcm = {
+       .name = "bfin-ac97-pcm-audio",
+       .id = -1,
+};
+#endif
+
+#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || \
+       defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
+static const unsigned ad73311_gpio[] = {
+       GPIO_PF4,
+};
+
+static struct platform_device bfin_ad73311_machine = {
+       .name = "bfin-snd-ad73311",
+       .id = 1,
+       .dev = {
+               .platform_data = (void *)ad73311_gpio,
+       },
+};
+#endif
+
+#if defined(CONFIG_SND_SOC_AD73311) || defined(CONFIG_SND_SOC_AD73311_MODULE)
+static struct platform_device bfin_ad73311_codec_device = {
+       .name = "ad73311",
+       .id = -1,
+};
+#endif
+
+#if defined(CONFIG_SND_SOC_AD74111) || defined(CONFIG_SND_SOC_AD74111_MODULE)
+static struct platform_device bfin_ad74111_codec_device = {
+       .name = "ad74111",
+       .id = -1,
+};
+#endif
+
+#if defined(CONFIG_SND_BF5XX_SOC_I2S) || \
+       defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE)
 static struct platform_device bfin_i2s = {
        .name = "bfin-i2s",
        .id = CONFIG_SND_BF5XX_SPORT_NUM,
-       /* TODO: add platform data here */
+       .num_resources =
+               ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
+       .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
+       .dev = {
+               .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
+       },
 };
 #endif
 
-#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
+#if defined(CONFIG_SND_BF5XX_SOC_TDM) || \
+       defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE)
 static struct platform_device bfin_tdm = {
        .name = "bfin-tdm",
        .id = CONFIG_SND_BF5XX_SPORT_NUM,
-       /* TODO: add platform data here */
+       .num_resources =
+               ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
+       .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
+       .dev = {
+               .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
+       },
 };
 #endif
 
-#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
+#if defined(CONFIG_SND_BF5XX_SOC_AC97) || \
+       defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE)
 static struct platform_device bfin_ac97 = {
        .name = "bfin-ac97",
        .id = CONFIG_SND_BF5XX_SPORT_NUM,
-       /* TODO: add platform data here */
+       .num_resources =
+               ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
+       .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
+       .dev = {
+               .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
+       },
 };
 #endif
 
@@ -580,7 +704,7 @@ static struct platform_device *stamp_devices[] __initdata = {
        &net2272_bfin_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
@@ -596,7 +720,8 @@ static struct platform_device *stamp_devices[] __initdata = {
 #endif
 #endif
 
-#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
+#if defined(CONFIG_SERIAL_BFIN_SPORT) || \
+       defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
 #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
        &bfin_sport0_uart_device,
 #endif
@@ -618,14 +743,42 @@ static struct platform_device *stamp_devices[] __initdata = {
 #endif
 
 #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
-       &bfin_i2s,
+       &bfin_i2s_pcm,
 #endif
 
 #if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
-       &bfin_tdm,
+       &bfin_tdm_pcm,
 #endif
 
 #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
+       &bfin_ac97_pcm,
+#endif
+
+#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || \
+       defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
+       &bfin_ad73311_machine,
+#endif
+
+#if defined(CONFIG_SND_SOC_AD73311) || defined(CONFIG_SND_SOC_AD73311_MODULE)
+       &bfin_ad73311_codec_device,
+#endif
+
+#if defined(CONFIG_SND_SOC_AD74111) || defined(CONFIG_SND_SOC_AD74111_MODULE)
+       &bfin_ad74111_codec_device,
+#endif
+
+#if defined(CONFIG_SND_BF5XX_SOC_I2S) || \
+       defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE)
+       &bfin_i2s,
+#endif
+
+#if defined(CONFIG_SND_BF5XX_SOC_TDM) || \
+       defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE)
+       &bfin_tdm,
+#endif
+
+#if defined(CONFIG_SND_BF5XX_SOC_AC97) || \
+       defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE)
        &bfin_ac97,
 #endif
 };
index 604a430038e153c73360987a5e763ee7a4af38dc..0d4a2f61a973a94399a93aeac18ff86935810b1f 100644 (file)
@@ -31,7 +31,7 @@
  */
 const char bfin_board_name[] = "Bluetechnix CM BF537E";
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* all SPI peripherals info goes here */
 
 #if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
@@ -735,7 +735,7 @@ static struct platform_device *cm_bf537e_devices[] __initdata = {
        &net2272_bfin_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
@@ -770,7 +770,7 @@ static int __init cm_bf537e_init(void)
 {
        printk(KERN_INFO "%s(): registering device resources\n", __func__);
        platform_add_devices(cm_bf537e_devices, ARRAY_SIZE(cm_bf537e_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
 #endif
 
index d916b46a44fe9334bf49fb51b940aa2889c3245a..f5536982706c702137fbd4cda636affac68b8eec 100644 (file)
@@ -32,7 +32,7 @@
  */
 const char bfin_board_name[] = "Bluetechnix CM BF537U";
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* all SPI peripherals info goes here */
 
 #if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
@@ -700,7 +700,7 @@ static struct platform_device *cm_bf537u_devices[] __initdata = {
        &net2272_bfin_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
@@ -747,7 +747,7 @@ static int __init cm_bf537u_init(void)
 {
        printk(KERN_INFO "%s(): registering device resources\n", __func__);
        platform_add_devices(cm_bf537u_devices, ARRAY_SIZE(cm_bf537u_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
 #endif
 
index 5f307228be63b7b78108c9fcc19136b01e636bf1..11dadeb33d79af0dd63118bbce90f794b89d3a48 100644 (file)
@@ -125,7 +125,7 @@ static struct platform_device asmb_flash_device = {
 };
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 
 #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
 
@@ -370,7 +370,7 @@ static struct platform_device *dnp5370_devices[] __initdata = {
        &bfin_mac_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &spi_bfin_master_device,
 #endif
 
index 3901dd093b9044fad71e1a7da5480273677db7cf..d2d71282618fb9b512371c0626d7ae5937eaf28f 100644 (file)
@@ -121,7 +121,7 @@ static struct platform_device net2272_bfin_device = {
 };
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* all SPI peripherals info goes here */
 
 #if defined(CONFIG_MTD_M25P80) \
@@ -496,7 +496,7 @@ static struct platform_device *minotaur_devices[] __initdata = {
        &net2272_bfin_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
@@ -537,7 +537,7 @@ static int __init minotaur_init(void)
 {
        printk(KERN_INFO "%s(): registering device resources\n", __func__);
        platform_add_devices(minotaur_devices, ARRAY_SIZE(minotaur_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        spi_register_board_info(bfin_spi_board_info,
                                ARRAY_SIZE(bfin_spi_board_info));
 #endif
index aebd31c845f008995c90424a5b053767a3058c78..6fd84709fc68c08c786018f2b4c63590c639d232 100644 (file)
@@ -154,7 +154,7 @@ static struct platform_device net2272_bfin_device = {
 };
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* all SPI peripherals info goes here */
 
 #if defined(CONFIG_MTD_M25P80) \
@@ -477,7 +477,7 @@ static struct platform_device *stamp_devices[] __initdata = {
        &net2272_bfin_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
@@ -508,7 +508,7 @@ static int __init pnav_init(void)
 {
        printk(KERN_INFO "%s(): registering device resources\n", __func__);
        platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        spi_register_board_info(bfin_spi_board_info,
                                ARRAY_SIZE(bfin_spi_board_info));
 #endif
index 7fbb0bbf86762c3fd40336cb765c1f746b90cab6..2221173e489e5294f9c788ea33db0a9e9389e6dd 100644 (file)
@@ -1420,7 +1420,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
 #endif
 };
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* SPI controller data */
 static struct bfin5xx_spi_master bfin_spi0_info = {
        .num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS,
@@ -1462,7 +1462,7 @@ static struct platform_device bfin_spi0_device = {
 
 /* SPORT SPI controller data */
 static struct bfin5xx_spi_master bfin_sport_spi0_info = {
-       .num_chipselect = 1, /* master only supports one device */
+       .num_chipselect = MAX_BLACKFIN_GPIOS,
        .enable_dma = 0,  /* master don't support DMA */
        .pin_req = {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_DRPRI,
                P_SPORT0_RSCLK, P_SPORT0_TFS, P_SPORT0_RFS, 0},
@@ -1492,7 +1492,7 @@ static struct platform_device bfin_sport_spi0_device = {
 };
 
 static struct bfin5xx_spi_master bfin_sport_spi1_info = {
-       .num_chipselect = 1, /* master only supports one device */
+       .num_chipselect = MAX_BLACKFIN_GPIOS,
        .enable_dma = 0,  /* master don't support DMA */
        .pin_req = {P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_DRPRI,
                P_SPORT1_RSCLK, P_SPORT1_TFS, P_SPORT1_RFS, 0},
@@ -1558,6 +1558,71 @@ static struct platform_device bfin_lq035q1_device = {
 };
 #endif
 
+#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \
+       || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
+#include <linux/videodev2.h>
+#include <media/blackfin/bfin_capture.h>
+#include <media/blackfin/ppi.h>
+
+static const unsigned short ppi_req[] = {
+       P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3,
+       P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7,
+       P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+       0,
+};
+
+static const struct ppi_info ppi_info = {
+       .type = PPI_TYPE_PPI,
+       .dma_ch = CH_PPI,
+       .irq_err = IRQ_PPI_ERROR,
+       .base = (void __iomem *)PPI_CONTROL,
+       .pin_req = ppi_req,
+};
+
+#if defined(CONFIG_VIDEO_VS6624) \
+       || defined(CONFIG_VIDEO_VS6624_MODULE)
+static struct v4l2_input vs6624_inputs[] = {
+       {
+               .index = 0,
+               .name = "Camera",
+               .type = V4L2_INPUT_TYPE_CAMERA,
+               .std = V4L2_STD_UNKNOWN,
+       },
+};
+
+static struct bcap_route vs6624_routes[] = {
+       {
+               .input = 0,
+               .output = 0,
+       },
+};
+
+static const unsigned vs6624_ce_pin = GPIO_PF10;
+
+static struct bfin_capture_config bfin_capture_data = {
+       .card_name = "BF537",
+       .inputs = vs6624_inputs,
+       .num_inputs = ARRAY_SIZE(vs6624_inputs),
+       .routes = vs6624_routes,
+       .i2c_adapter_id = 0,
+       .board_info = {
+               .type = "vs6624",
+               .addr = 0x10,
+               .platform_data = (void *)&vs6624_ce_pin,
+       },
+       .ppi_info = &ppi_info,
+       .ppi_control = (PACK_EN | DLEN_8 | XFR_TYPE | 0x0020),
+};
+#endif
+
+static struct platform_device bfin_capture_device = {
+       .name = "bfin_capture",
+       .dev = {
+               .platform_data = &bfin_capture_data,
+       },
+};
+#endif
+
 #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
 #ifdef CONFIG_SERIAL_BFIN_UART0
 static struct resource bfin_uart0_resources[] = {
@@ -2716,7 +2781,7 @@ static struct platform_device *stamp_devices[] __initdata = {
        &net2272_bfin_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
@@ -2733,6 +2798,11 @@ static struct platform_device *stamp_devices[] __initdata = {
        &bfin_lq035q1_device,
 #endif
 
+#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \
+       || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
+       &bfin_capture_device,
+#endif
+
 #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
 #ifdef CONFIG_SERIAL_BFIN_UART0
        &bfin_uart0_device,
index 6917ce2fa55ef4f61568263462e05c1cd733aa18..988517671a5dbd91ac12b3fd028dc156eb0a25af 100644 (file)
@@ -32,7 +32,7 @@
  */
 const char bfin_board_name[] = "Bluetechnix TCM BF537";
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* all SPI peripherals info goes here */
 
 #if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
@@ -702,7 +702,7 @@ static struct platform_device *cm_bf537_devices[] __initdata = {
        &net2272_bfin_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
@@ -737,7 +737,7 @@ static int __init tcm_bf537_init(void)
 {
        printk(KERN_INFO "%s(): registering device resources\n", __func__);
        platform_add_devices(cm_bf537_devices, ARRAY_SIZE(cm_bf537_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
 #endif
 
index 8356eb599f1937cb37b552c36a91c70bf0c0b4b9..1633a6f306c0e1390fedf5dd12dd805c5ac9c704 100644 (file)
@@ -490,7 +490,7 @@ static struct platform_device smc91x_device = {
 };
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* all SPI peripherals info goes here */
 #if defined(CONFIG_MTD_M25P80) \
        || defined(CONFIG_MTD_M25P80_MODULE)
@@ -874,7 +874,7 @@ static struct platform_device *cm_bf538_devices[] __initdata = {
 #endif
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bf538_spi_master0,
        &bf538_spi_master1,
        &bf538_spi_master2,
@@ -938,7 +938,7 @@ static int __init ezkit_init(void)
        printk(KERN_INFO "%s(): registering device resources\n", __func__);
        platform_add_devices(cm_bf538_devices, ARRAY_SIZE(cm_bf538_devices));
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        spi_register_board_info(bf538_spi_board_info,
                        ARRAY_SIZE(bf538_spi_board_info));
 #endif
index 0350eacec21b96785f2eda1cc933cca52bbde572..68af594db48e638c8ad29bc1a504aaa560382d5b 100644 (file)
@@ -854,7 +854,7 @@ static struct platform_device para_flash_device = {
 };
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* all SPI peripherals info goes here */
 #if defined(CONFIG_MTD_M25P80) \
        || defined(CONFIG_MTD_M25P80_MODULE)
@@ -1175,7 +1175,7 @@ static struct platform_device *cm_bf548_devices[] __initdata = {
        &bf54x_sdh_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bf54x_spi_master0,
        &bf54x_spi_master1,
 #endif
@@ -1210,7 +1210,7 @@ static int __init cm_bf548_init(void)
        printk(KERN_INFO "%s(): registering device resources\n", __func__);
        platform_add_devices(cm_bf548_devices, ARRAY_SIZE(cm_bf548_devices));
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        spi_register_board_info(bf54x_spi_board_info,
                        ARRAY_SIZE(bf54x_spi_board_info));
 #endif
index bb868ac0fe2dbdf5063034be857f5ece7a9c82a4..3ea45f8bd61c162a1f723a08f068f2972184a0c5 100644 (file)
@@ -1110,7 +1110,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
        },
 #endif
 };
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* SPI (0) */
 static struct resource bfin_spi0_resource[] = {
        [0] = {
@@ -1183,6 +1183,71 @@ static struct platform_device bf54x_spi_master1 = {
 };
 #endif  /* spi master and devices */
 
+#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \
+       || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
+#include <linux/videodev2.h>
+#include <media/blackfin/bfin_capture.h>
+#include <media/blackfin/ppi.h>
+
+static const unsigned short ppi_req[] = {
+       P_PPI1_D0, P_PPI1_D1, P_PPI1_D2, P_PPI1_D3,
+       P_PPI1_D4, P_PPI1_D5, P_PPI1_D6, P_PPI1_D7,
+       P_PPI1_CLK, P_PPI1_FS1, P_PPI1_FS2,
+       0,
+};
+
+static const struct ppi_info ppi_info = {
+       .type = PPI_TYPE_EPPI,
+       .dma_ch = CH_EPPI1,
+       .irq_err = IRQ_EPPI1_ERROR,
+       .base = (void __iomem *)EPPI1_STATUS,
+       .pin_req = ppi_req,
+};
+
+#if defined(CONFIG_VIDEO_VS6624) \
+       || defined(CONFIG_VIDEO_VS6624_MODULE)
+static struct v4l2_input vs6624_inputs[] = {
+       {
+               .index = 0,
+               .name = "Camera",
+               .type = V4L2_INPUT_TYPE_CAMERA,
+               .std = V4L2_STD_UNKNOWN,
+       },
+};
+
+static struct bcap_route vs6624_routes[] = {
+       {
+               .input = 0,
+               .output = 0,
+       },
+};
+
+static const unsigned vs6624_ce_pin = GPIO_PG6;
+
+static struct bfin_capture_config bfin_capture_data = {
+       .card_name = "BF548",
+       .inputs = vs6624_inputs,
+       .num_inputs = ARRAY_SIZE(vs6624_inputs),
+       .routes = vs6624_routes,
+       .i2c_adapter_id = 0,
+       .board_info = {
+               .type = "vs6624",
+               .addr = 0x10,
+               .platform_data = (void *)&vs6624_ce_pin,
+       },
+       .ppi_info = &ppi_info,
+       .ppi_control = (POLC | PACKEN | DLEN_8 | XFR_TYPE | 0x20),
+};
+#endif
+
+static struct platform_device bfin_capture_device = {
+       .name = "bfin_capture",
+       .dev = {
+               .platform_data = &bfin_capture_data,
+       },
+};
+#endif
+
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
 static struct resource bfin_twi0_resource[] = {
        [0] = {
@@ -1502,10 +1567,14 @@ static struct platform_device *ezkit_devices[] __initdata = {
        &bf54x_sdh_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bf54x_spi_master0,
        &bf54x_spi_master1,
 #endif
+#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \
+       || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
+       &bfin_capture_device,
+#endif
 
 #if defined(CONFIG_KEYBOARD_BFIN) || defined(CONFIG_KEYBOARD_BFIN_MODULE)
        &bf54x_kpad_device,
index b1b7339b6ba776a321d0614c091fe035508d63b7..f6ffd6f054c398201dfea4e21aaf4c3b6d044719 100644 (file)
@@ -372,7 +372,7 @@ static struct bfin5xx_spi_chip data_flash_chip_info = {
 };
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* SPI (0) */
 static struct resource bfin_spi0_resource[] = {
        [0] = {
@@ -475,7 +475,7 @@ static struct platform_device bfin_dpmc = {
 static struct platform_device *acvilon_devices[] __initdata = {
        &bfin_dpmc,
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
index c017cf07ed4e2960f032f9d8b0c81a0a60e657de..d81450f635df13f2777b9b7587fc9f5db6260b54 100644 (file)
@@ -29,7 +29,7 @@
  */
 const char bfin_board_name[] = "Bluetechnix CM BF561";
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* all SPI peripherals info goes here */
 
 #if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
@@ -488,7 +488,7 @@ static struct platform_device *cm_bf561_devices[] __initdata = {
        &net2272_bfin_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
@@ -523,7 +523,7 @@ static int __init cm_bf561_init(void)
 {
        printk(KERN_INFO "%s(): registering device resources\n", __func__);
        platform_add_devices(cm_bf561_devices, ARRAY_SIZE(cm_bf561_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
 #endif
 
index 27f22ed381d99767b0bda919964fed021f7d3cb5..838978808a15a4414410301b2ba464604eae5f36 100644 (file)
@@ -291,7 +291,7 @@ static struct platform_device ezkit_flash_device = {
 };
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* SPI (0) */
 static struct resource bfin_spi0_resource[] = {
        [0] = {
@@ -383,7 +383,7 @@ static struct i2c_gpio_platform_data i2c_gpio_data = {
        .scl_pin                = GPIO_PF0,
        .sda_is_open_drain      = 0,
        .scl_is_open_drain      = 0,
-       .udelay                 = 40,
+       .udelay                 = 10,
 };
 
 static struct platform_device i2c_gpio_device = {
@@ -422,6 +422,96 @@ static struct platform_device bfin_dpmc = {
        },
 };
 
+#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \
+       || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
+#include <linux/videodev2.h>
+#include <media/blackfin/bfin_capture.h>
+#include <media/blackfin/ppi.h>
+
+static const unsigned short ppi_req[] = {
+       P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3,
+       P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7,
+       P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+       0,
+};
+
+static const struct ppi_info ppi_info = {
+       .type = PPI_TYPE_PPI,
+       .dma_ch = CH_PPI0,
+       .irq_err = IRQ_PPI1_ERROR,
+       .base = (void __iomem *)PPI0_CONTROL,
+       .pin_req = ppi_req,
+};
+
+#if defined(CONFIG_VIDEO_ADV7183) \
+       || defined(CONFIG_VIDEO_ADV7183_MODULE)
+#include <media/adv7183.h>
+static struct v4l2_input adv7183_inputs[] = {
+       {
+               .index = 0,
+               .name = "Composite",
+               .type = V4L2_INPUT_TYPE_CAMERA,
+               .std = V4L2_STD_ALL,
+       },
+       {
+               .index = 1,
+               .name = "S-Video",
+               .type = V4L2_INPUT_TYPE_CAMERA,
+               .std = V4L2_STD_ALL,
+       },
+       {
+               .index = 2,
+               .name = "Component",
+               .type = V4L2_INPUT_TYPE_CAMERA,
+               .std = V4L2_STD_ALL,
+       },
+};
+
+static struct bcap_route adv7183_routes[] = {
+       {
+               .input = ADV7183_COMPOSITE4,
+               .output = ADV7183_8BIT_OUT,
+       },
+       {
+               .input = ADV7183_SVIDEO0,
+               .output = ADV7183_8BIT_OUT,
+       },
+       {
+               .input = ADV7183_COMPONENT0,
+               .output = ADV7183_8BIT_OUT,
+       },
+};
+
+
+static const unsigned adv7183_gpio[] = {
+       GPIO_PF13, /* reset pin */
+       GPIO_PF2,  /* output enable pin */
+};
+
+static struct bfin_capture_config bfin_capture_data = {
+       .card_name = "BF561",
+       .inputs = adv7183_inputs,
+       .num_inputs = ARRAY_SIZE(adv7183_inputs),
+       .routes = adv7183_routes,
+       .i2c_adapter_id = 0,
+       .board_info = {
+               .type = "adv7183",
+               .addr = 0x20,
+               .platform_data = (void *)adv7183_gpio,
+       },
+       .ppi_info = &ppi_info,
+       .ppi_control = (PACK_EN | DLEN_8 | DMA32 | FLD_SEL),
+};
+#endif
+
+static struct platform_device bfin_capture_device = {
+       .name = "bfin_capture",
+       .dev = {
+               .platform_data = &bfin_capture_data,
+       },
+};
+#endif
+
 #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
 static struct platform_device bfin_i2s = {
        .name = "bfin-i2s",
@@ -462,7 +552,7 @@ static struct platform_device *ezkit_devices[] __initdata = {
        &bfin_isp1760_device,
 #endif
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
        &bfin_spi0_device,
 #endif
 
@@ -494,6 +584,11 @@ static struct platform_device *ezkit_devices[] __initdata = {
        &ezkit_flash_device,
 #endif
 
+#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \
+       || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
+       &bfin_capture_device,
+#endif
+
 #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
        &bfin_i2s,
 #endif
index 7977db2f1c1268d15d0f0b7092401f32abd41db3..00bdacee9cc2ec96607a1a4a15d0ddd29196391b 100644 (file)
@@ -16,6 +16,7 @@
 #include <mach/irq.h>
 
 #define SUPPLE_0_WAKEUP ((IRQ_SUPPLE_0 - (IRQ_CORETMR + 1)) % 32)
+#define SUPPLE_1_WAKEUP ((IRQ_SUPPLE_1 - (IRQ_CORETMR + 1)) % 32)
 
 static inline void
 bfin_iwr_restore(unsigned long iwr0, unsigned long iwr1, unsigned long iwr2)
@@ -42,7 +43,8 @@ bfin_iwr_save(unsigned long niwr0, unsigned long niwr1, unsigned long niwr2,
 static inline void
 bfin_iwr_set_sup0(unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2)
 {
-       bfin_iwr_save(0, IWR_ENABLE(SUPPLE_0_WAKEUP), 0, iwr0, iwr1, iwr2);
+       bfin_iwr_save(0, IWR_ENABLE(SUPPLE_0_WAKEUP) |
+                       IWR_ENABLE(SUPPLE_1_WAKEUP), 0, iwr0, iwr1, iwr2);
 }
 
 #endif
index db22401e760589f59834302423bbca6cc9466855..ab1c617b9cfc9e646b4366e8fa34bd8c058defa1 100644 (file)
@@ -84,7 +84,7 @@ int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle
 
        if ((bfin_read_SYSCR() & COREB_SRAM_INIT) == 0) {
                /* CoreB already running, sending ipi to wakeup it */
-               platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
+               smp_send_reschedule(cpu);
        } else {
                /* Kick CoreB, which should start execution from CORE_SRAM_BASE. */
                bfin_write_SYSCR(bfin_read_SYSCR() & ~COREB_SRAM_INIT);
@@ -114,7 +114,8 @@ void __init platform_request_ipi(int irq, void *handler)
        int ret;
        const char *name = (irq == IRQ_SUPPLE_0) ? supple0 : supple1;
 
-       ret = request_irq(irq, handler, IRQF_PERCPU, name, handler);
+       ret = request_irq(irq, handler, IRQF_PERCPU | IRQF_NO_SUSPEND |
+                       IRQF_FORCE_RESUME, name, handler);
        if (ret)
                panic("Cannot request %s for IPI service", name);
 }
index 0784a52389c8e4ebe398d1740cd948fbe36e1391..ac8f8a43158c5b85b5524d15bd3a74b8d5de0507 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/sched.h>
 #include <linux/interrupt.h>
 #include <linux/cache.h>
+#include <linux/clockchips.h>
 #include <linux/profile.h>
 #include <linux/errno.h>
 #include <linux/mm.h>
@@ -47,9 +48,10 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS];
 
 struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
 
-#define BFIN_IPI_RESCHEDULE   0
-#define BFIN_IPI_CALL_FUNC    1
-#define BFIN_IPI_CPU_STOP     2
+#define BFIN_IPI_TIMER       0
+#define BFIN_IPI_RESCHEDULE   1
+#define BFIN_IPI_CALL_FUNC    2
+#define BFIN_IPI_CPU_STOP     3
 
 struct blackfin_flush_data {
        unsigned long start;
@@ -160,6 +162,14 @@ static irqreturn_t ipi_handler_int0(int irq, void *dev_instance)
        return IRQ_HANDLED;
 }
 
+DECLARE_PER_CPU(struct clock_event_device, coretmr_events);
+void ipi_timer(void)
+{
+       int cpu = smp_processor_id();
+       struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
+       evt->event_handler(evt);
+}
+
 static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
 {
        struct ipi_message *msg;
@@ -176,18 +186,17 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
        while (msg_queue->count) {
                msg = &msg_queue->ipi_message[msg_queue->head];
                switch (msg->type) {
+               case BFIN_IPI_TIMER:
+                       ipi_timer();
+                       break;
                case BFIN_IPI_RESCHEDULE:
                        scheduler_ipi();
                        break;
                case BFIN_IPI_CALL_FUNC:
-                       spin_unlock_irqrestore(&msg_queue->lock, flags);
                        ipi_call_function(cpu, msg);
-                       spin_lock_irqsave(&msg_queue->lock, flags);
                        break;
                case BFIN_IPI_CPU_STOP:
-                       spin_unlock_irqrestore(&msg_queue->lock, flags);
                        ipi_cpu_stop(cpu);
-                       spin_lock_irqsave(&msg_queue->lock, flags);
                        break;
                default:
                        printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
@@ -297,8 +306,6 @@ void smp_send_reschedule(int cpu)
 {
        cpumask_t callmap;
        /* simply trigger an ipi */
-       if (cpu_is_offline(cpu))
-               return;
 
        cpumask_clear(&callmap);
        cpumask_set_cpu(cpu, &callmap);
@@ -308,6 +315,16 @@ void smp_send_reschedule(int cpu)
        return;
 }
 
+void smp_send_msg(const struct cpumask *mask, unsigned long type)
+{
+       smp_send_message(*mask, type, NULL, NULL, 0);
+}
+
+void smp_timer_broadcast(const struct cpumask *mask)
+{
+       smp_send_msg(mask, BFIN_IPI_TIMER);
+}
+
 void smp_send_stop(void)
 {
        cpumask_t callmap;
@@ -326,17 +343,24 @@ void smp_send_stop(void)
 int __cpuinit __cpu_up(unsigned int cpu)
 {
        int ret;
-       static struct task_struct *idle;
+       struct blackfin_cpudata *ci = &per_cpu(cpu_data, cpu);
+       struct task_struct *idle = ci->idle;
 
-       if (idle)
+       if (idle) {
                free_task(idle);
-
-       idle = fork_idle(cpu);
-       if (IS_ERR(idle)) {
-               printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
-               return PTR_ERR(idle);
+               idle = NULL;
        }
 
+       if (!idle) {
+               idle = fork_idle(cpu);
+               if (IS_ERR(idle)) {
+                       printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
+                       return PTR_ERR(idle);
+               }
+               ci->idle = idle;
+       } else {
+               init_idle(idle, cpu);
+       }
        secondary_stack = task_stack_page(idle) + THREAD_SIZE;
 
        ret = platform_boot_secondary(cpu, idle);
@@ -411,6 +435,7 @@ void __cpuinit secondary_start_kernel(void)
 
        bfin_setup_caches(cpu);
 
+       notify_cpu_starting(cpu);
        /*
         * Calibrate loops per jiffy value.
         * IRQs need to be enabled here - D-cache can be invalidated
@@ -453,8 +478,10 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end)
        smp_flush_data.start = start;
        smp_flush_data.end = end;
 
-       if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 0))
+       preempt_disable();
+       if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 1))
                printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
+       preempt_enable();
 }
 EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
 
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
new file mode 100644 (file)
index 0000000..26e67f0
--- /dev/null
@@ -0,0 +1,174 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+
+config TMS320C6X
+       def_bool y
+       select CLKDEV_LOOKUP
+       select GENERIC_IRQ_SHOW
+       select HAVE_ARCH_TRACEHOOK
+       select HAVE_DMA_API_DEBUG
+       select HAVE_GENERIC_HARDIRQS
+       select HAVE_MEMBLOCK
+       select HAVE_SPARSE_IRQ
+       select OF
+       select OF_EARLY_FLATTREE
+
+config MMU
+       def_bool n
+
+config ZONE_DMA
+       def_bool y
+
+config FPU
+       def_bool n
+
+config HIGHMEM
+       def_bool n
+
+config NUMA
+       def_bool n
+
+config RWSEM_GENERIC_SPINLOCK
+       def_bool y
+
+config RWSEM_XCHGADD_ALGORITHM
+       def_bool n
+
+config GENERIC_CALIBRATE_DELAY
+       def_bool y
+
+config GENERIC_HWEIGHT
+       def_bool y
+
+config GENERIC_CLOCKEVENTS
+       def_bool y
+
+config GENERIC_CLOCKEVENTS_BROADCAST
+       bool
+
+config GENERIC_BUG
+       def_bool y
+
+config COMMON_CLKDEV
+       def_bool y
+
+config C6X_BIG_KERNEL
+       bool "Build a big kernel"
+       help
+         The C6X function call instruction has a limited range of +/- 2MiB.
+         This is sufficient for most kernels, but some kernel configurations
+         with lots of compiled-in functionality may require a larger range
+         for function calls. Use this option to have the compiler generate
+         function calls with 32-bit range. This will make the kernel both
+         larger and slower.
+
+         If unsure, say N.
+
+source "init/Kconfig"
+
+# Use the generic interrupt handling code in kernel/irq/
+
+source "kernel/Kconfig.freezer"
+
+config CMDLINE_BOOL
+       bool "Default bootloader kernel arguments"
+
+config CMDLINE
+       string "Kernel command line"
+       depends on CMDLINE_BOOL
+       default "console=ttyS0,57600"
+       help
+         On some architectures there is currently no way for the boot loader
+         to pass arguments to the kernel. For these architectures, you should
+         supply some command-line options at build time by entering them
+         here.
+
+config CMDLINE_FORCE
+       bool "Force default kernel command string"
+       depends on CMDLINE_BOOL
+       default n
+       help
+         Set this to have arguments from the default kernel command string
+         override those passed by the boot loader.
+
+config CPU_BIG_ENDIAN
+       bool "Build big-endian kernel"
+       default n
+       help
+         Say Y if you plan on running a kernel in big-endian mode.
+         Note that your board must be properly built and your board
+         port must properly enable any big-endian related features
+         of your chipset/board/processor.
+
+config FORCE_MAX_ZONEORDER
+       int "Maximum zone order"
+       default "13"
+       help
+         The kernel memory allocator divides physically contiguous memory
+         blocks into "zones", where each zone is a power of two number of
+         pages.  This option selects the largest power of two that the kernel
+         keeps in the memory allocator.  If you need to allocate very large
+         blocks of physically contiguous memory, then you may need to
+         increase this value.
+
+         This config option is actually maximum order plus one. For example,
+         a value of 11 means that the largest free memory block is 2^10 pages.
+
+menu "Processor type and features"
+
+source "arch/c6x/platforms/Kconfig"
+
+config TMS320C6X_CACHES_ON
+       bool "L2 cache support"
+       default y
+
+config KERNEL_RAM_BASE_ADDRESS
+       hex "Virtual address of memory base"
+       default 0xe0000000 if SOC_TMS320C6455
+       default 0xe0000000 if SOC_TMS320C6457
+       default 0xe0000000 if SOC_TMS320C6472
+       default 0x80000000
+
+source "mm/Kconfig"
+
+source "kernel/Kconfig.preempt"
+
+source "kernel/Kconfig.hz"
+source "kernel/time/Kconfig"
+
+endmenu
+
+menu "Executable file formats"
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+source "net/Kconfig"
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
+
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config ACCESS_CHECK
+       bool "Check the user pointer address"
+       default y
+       help
+         Usually the pointer transfer from user space is checked to see if its
+         address is in the kernel space.
+
+         Say N here to disable that check to improve the performance.
+
+endmenu
diff --git a/arch/c6x/Makefile b/arch/c6x/Makefile
new file mode 100644 (file)
index 0000000..1d08dd0
--- /dev/null
@@ -0,0 +1,60 @@
+#
+# linux/arch/c6x/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+
+cflags-y += -mno-dsbt -msdata=none
+
+cflags-$(CONFIG_C6X_BIG_KERNEL) += -mlong-calls
+
+CFLAGS_MODULE   += -mlong-calls -mno-dsbt -msdata=none
+
+CHECKFLAGS      +=
+
+KBUILD_CFLAGS   += $(cflags-y)
+KBUILD_AFLAGS   += $(cflags-y)
+
+ifdef CONFIG_CPU_BIG_ENDIAN
+KBUILD_CFLAGS   += -mbig-endian
+KBUILD_AFLAGS   += -mbig-endian
+LINKFLAGS       += -mbig-endian
+KBUILD_LDFLAGS  += -mbig-endian
+LDFLAGS += -EB
+endif
+
+head-y          := arch/c6x/kernel/head.o
+core-y          += arch/c6x/kernel/ arch/c6x/mm/ arch/c6x/platforms/
+libs-y          += arch/c6x/lib/
+
+# Default to vmlinux.bin, override when needed
+all: vmlinux.bin
+
+boot := arch/$(ARCH)/boot
+
+# Are we making a dtbImage.<boardname> target? If so, crack out the boardname
+DTB:=$(subst dtbImage.,,$(filter dtbImage.%, $(MAKECMDGOALS)))
+export DTB
+
+ifneq ($(DTB),)
+core-y += $(boot)/
+endif
+
+# With make 3.82 we cannot mix normal and wildcard targets
+
+vmlinux.bin: vmlinux
+       $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+
+dtbImage.%: vmlinux
+       $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+
+archclean:
+       $(Q)$(MAKE) $(clean)=$(boot)
+
+define archhelp
+  @echo '  vmlinux.bin     - Binary kernel image (arch/$(ARCH)/boot/vmlinux.bin)'
+  @echo '  dtbImage.<dt>   - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
+  @echo '                  - stripped elf with fdt blob'
+endef
diff --git a/arch/c6x/boot/Makefile b/arch/c6x/boot/Makefile
new file mode 100644 (file)
index 0000000..ecca820
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# Makefile for bootable kernel images
+#
+
+OBJCOPYFLAGS_vmlinux.bin := -O binary
+$(obj)/vmlinux.bin: vmlinux FORCE
+       $(call if_changed,objcopy)
+
+DTC_FLAGS ?= -p 1024
+
+ifneq ($(DTB),)
+obj-y += linked_dtb.o
+endif
+
+$(obj)/%.dtb: $(src)/dts/%.dts FORCE
+       $(call cmd,dtc)
+
+quiet_cmd_cp = CP      $< $@$2
+       cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false)
+
+# Generate builtin.dtb from $(DTB).dtb
+$(obj)/builtin.dtb: $(obj)/$(DTB).dtb
+       $(call if_changed,cp)
+
+$(obj)/linked_dtb.o: $(obj)/builtin.dtb
+
+$(obj)/dtbImage.%: vmlinux
+       $(call if_changed,objcopy)
+
+clean-files := $(obj)/*.dtb
diff --git a/arch/c6x/boot/dts/dsk6455.dts b/arch/c6x/boot/dts/dsk6455.dts
new file mode 100644 (file)
index 0000000..2b71f80
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * arch/c6x/boot/dts/dsk6455.dts
+ *
+ * DSK6455 Evaluation Platform For TMS320C6455
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ *
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+/dts-v1/;
+
+/include/ "tms320c6455.dtsi"
+
+/ {
+       model = "Spectrum Digital DSK6455";
+       compatible = "spectrum-digital,dsk6455";
+
+       chosen {
+               bootargs = "root=/dev/nfs ip=dhcp rw";
+       };
+
+       memory {
+               device_type = "memory";
+               reg = <0xE0000000 0x08000000>;
+       };
+
+       soc {
+               megamod_pic: interrupt-controller@1800000 {
+                       interrupts = < 12 13 14 15 >;
+               };
+
+               emifa@70000000 {
+                       flash@3,0 {
+                                 #address-cells = <1>;
+                               #size-cells = <1>;
+                               compatible = "cfi-flash";
+                               reg = <0x3 0x0 0x400000>;
+                               bank-width = <1>;
+                               device-width = <1>;
+                               partition@0 {
+                                       reg = <0x0 0x400000>;
+                                       label = "NOR";
+                               };
+                       };
+               };
+
+               timer1: timer@2980000 {
+                       interrupt-parent = <&megamod_pic>;
+                       interrupts = < 69 >;
+               };
+
+               clock-controller@029a0000 {
+                       clock-frequency = <50000000>;
+               };
+       };
+};
diff --git a/arch/c6x/boot/dts/evmc6457.dts b/arch/c6x/boot/dts/evmc6457.dts
new file mode 100644 (file)
index 0000000..0301eb9
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * arch/c6x/boot/dts/evmc6457.dts
+ *
+ * EVMC6457 Evaluation Platform For TMS320C6457
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ *
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+/dts-v1/;
+
+/include/ "tms320c6457.dtsi"
+
+/ {
+       model = "eInfochips EVMC6457";
+       compatible = "einfochips,evmc6457";
+
+       chosen {
+               bootargs = "console=hvc root=/dev/nfs ip=dhcp rw";
+       };
+
+       memory {
+               device_type = "memory";
+               reg = <0xE0000000 0x10000000>;
+       };
+
+       soc {
+               megamod_pic: interrupt-controller@1800000 {
+                      interrupts = < 12 13 14 15 >;
+               };
+
+               timer0: timer@2940000 {
+                       interrupt-parent = <&megamod_pic>;
+                       interrupts = < 67 >;
+               };
+
+               clock-controller@29a0000 {
+                       clock-frequency = <60000000>;
+               };
+       };
+};
diff --git a/arch/c6x/boot/dts/evmc6472.dts b/arch/c6x/boot/dts/evmc6472.dts
new file mode 100644 (file)
index 0000000..3e207b4
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * arch/c6x/boot/dts/evmc6472.dts
+ *
+ * EVMC6472 Evaluation Platform For TMS320C6472
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ *
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+/dts-v1/;
+
+/include/ "tms320c6472.dtsi"
+
+/ {
+       model = "eInfochips EVMC6472";
+       compatible = "einfochips,evmc6472";
+
+       chosen {
+               bootargs = "console=hvc root=/dev/nfs ip=dhcp rw";
+       };
+
+       memory {
+               device_type = "memory";
+               reg = <0xE0000000 0x10000000>;
+       };
+
+       soc {
+               megamod_pic: interrupt-controller@1800000 {
+                      interrupts = < 12 13 14 15 >;
+               };
+
+               timer0: timer@25e0000 {
+                       interrupt-parent = <&megamod_pic>;
+                       interrupts = < 16 >;
+               };
+
+               timer1: timer@25f0000 {
+                       interrupt-parent = <&megamod_pic>;
+                       interrupts = < 16 >;
+               };
+
+               timer2: timer@2600000 {
+                       interrupt-parent = <&megamod_pic>;
+                       interrupts = < 16 >;
+               };
+
+               timer3: timer@2610000 {
+                       interrupt-parent = <&megamod_pic>;
+                       interrupts = < 16 >;
+               };
+
+               timer4: timer@2620000 {
+                       interrupt-parent = <&megamod_pic>;
+                       interrupts = < 16 >;
+               };
+
+               timer5: timer@2630000 {
+                       interrupt-parent = <&megamod_pic>;
+                       interrupts = < 16 >;
+               };
+
+               clock-controller@29a0000 {
+                       clock-frequency = <25000000>;
+               };
+       };
+};
diff --git a/arch/c6x/boot/dts/evmc6474.dts b/arch/c6x/boot/dts/evmc6474.dts
new file mode 100644 (file)
index 0000000..4dc2912
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * arch/c6x/boot/dts/evmc6474.dts
+ *
+ * EVMC6474 Evaluation Platform For TMS320C6474
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ *
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+/dts-v1/;
+
+/include/ "tms320c6474.dtsi"
+
+/ {
+       model = "Spectrum Digital EVMC6474";
+       compatible = "spectrum-digital,evmc6474";
+
+       chosen {
+               bootargs = "console=hvc root=/dev/nfs ip=dhcp rw";
+       };
+
+       memory {
+               device_type = "memory";
+               reg = <0x80000000 0x08000000>;
+       };
+
+       soc {
+               megamod_pic: interrupt-controller@1800000 {
+                      interrupts = < 12 13 14 15 >;
+               };
+
+               timer3: timer@2940000 {
+                       interrupt-parent = <&megamod_pic>;
+                       interrupts = < 39 >;
+               };
+
+               timer4: timer@2950000 {
+                       interrupt-parent = <&megamod_pic>;
+                       interrupts = < 41 >;
+               };
+
+               timer5: timer@2960000 {
+                       interrupt-parent = <&megamod_pic>;
+                       interrupts = < 43 >;
+               };
+
+               clock-controller@29a0000 {
+                       clock-frequency = <50000000>;
+               };
+       };
+};
diff --git a/arch/c6x/boot/dts/tms320c6455.dtsi b/arch/c6x/boot/dts/tms320c6455.dtsi
new file mode 100644 (file)
index 0000000..a804ec1
--- /dev/null
@@ -0,0 +1,96 @@
+
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       device_type = "cpu";
+                       model = "ti,c64x+";
+                       reg = <0>;
+               };
+       };
+
+       soc {
+               compatible = "simple-bus";
+               model = "tms320c6455";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               core_pic: interrupt-controller {
+                         interrupt-controller;
+                         #interrupt-cells = <1>;
+                         compatible = "ti,c64x+core-pic";
+               };
+
+               /*
+                * Megamodule interrupt controller
+                */
+               megamod_pic: interrupt-controller@1800000 {
+                      compatible = "ti,c64x+megamod-pic";
+                      interrupt-controller;
+                      #interrupt-cells = <1>;
+                      reg = <0x1800000 0x1000>;
+                      interrupt-parent = <&core_pic>;
+               };
+
+               cache-controller@1840000 {
+                       compatible = "ti,c64x+cache";
+                       reg = <0x01840000 0x8400>;
+               };
+
+               emifa@70000000 {
+                       compatible = "ti,c64x+emifa", "simple-bus";
+                       #address-cells = <2>;
+                       #size-cells = <1>;
+                       reg = <0x70000000 0x100>;
+                       ranges = <0x2 0x0 0xa0000000 0x00000008
+                                 0x3 0x0 0xb0000000 0x00400000
+                                 0x4 0x0 0xc0000000 0x10000000
+                                 0x5 0x0 0xD0000000 0x10000000>;
+
+                       ti,dscr-dev-enable = <13>;
+                       ti,emifa-burst-priority = <255>;
+                       ti,emifa-ce-config = <0x00240120
+                                             0x00240120
+                                             0x00240122
+                                             0x00240122>;
+               };
+
+               timer1: timer@2980000 {
+                       compatible = "ti,c64x+timer64";
+                       reg = <0x2980000 0x40>;
+                       ti,dscr-dev-enable = <4>;
+               };
+
+               clock-controller@029a0000 {
+                       compatible = "ti,c6455-pll", "ti,c64x+pll";
+                       reg = <0x029a0000 0x200>;
+                       ti,c64x+pll-bypass-delay = <1440>;
+                       ti,c64x+pll-reset-delay = <15360>;
+                       ti,c64x+pll-lock-delay = <24000>;
+               };
+
+               device-state-config-regs@2a80000 {
+                       compatible = "ti,c64x+dscr";
+                       reg = <0x02a80000 0x41000>;
+
+                       ti,dscr-devstat = <0>;
+                       ti,dscr-silicon-rev = <8 28 0xf>;
+                       ti,dscr-rmii-resets = <0 0x40020 0x00040000>;
+
+                       ti,dscr-locked-regs = <0x40008 0x40004 0x0f0a0b00>;
+                       ti,dscr-devstate-ctl-regs =
+                                <0 12 0x40008 1 0  0  2
+                                 12 1 0x40008 3 0 30  2
+                                 13 2 0x4002c 1 0xffffffff 0 1>;
+                       ti,dscr-devstate-stat-regs =
+                               <0 10 0x40014 1 0  0  3
+                                10 2 0x40018 1 0  0  3>;
+               };
+       };
+};
diff --git a/arch/c6x/boot/dts/tms320c6457.dtsi b/arch/c6x/boot/dts/tms320c6457.dtsi
new file mode 100644 (file)
index 0000000..35f4070
--- /dev/null
@@ -0,0 +1,68 @@
+
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       device_type = "cpu";
+                       model = "ti,c64x+";
+                       reg = <0>;
+               };
+       };
+
+       soc {
+               compatible = "simple-bus";
+               model = "tms320c6457";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               core_pic: interrupt-controller {
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+                       compatible = "ti,c64x+core-pic";
+               };
+
+               megamod_pic: interrupt-controller@1800000 {
+                       compatible = "ti,c64x+megamod-pic";
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+                       interrupt-parent = <&core_pic>;
+                       reg = <0x1800000 0x1000>;
+               };
+
+               cache-controller@1840000 {
+                       compatible = "ti,c64x+cache";
+                       reg = <0x01840000 0x8400>;
+               };
+
+               device-state-controller@2880800 {
+                       compatible = "ti,c64x+dscr";
+                       reg = <0x02880800 0x400>;
+
+                       ti,dscr-devstat = <0x20>;
+                       ti,dscr-silicon-rev = <0x18 28 0xf>;
+                       ti,dscr-mac-fuse-regs = <0x114 3 4 5 6
+                                                0x118 0 0 1 2>;
+                       ti,dscr-kick-regs = <0x38 0x83E70B13
+                                            0x3c 0x95A4F1E0>;
+               };
+
+               timer0: timer@2940000 {
+                       compatible = "ti,c64x+timer64";
+                       reg = <0x2940000 0x40>;
+               };
+
+               clock-controller@29a0000 {
+                       compatible = "ti,c6457-pll", "ti,c64x+pll";
+                       reg = <0x029a0000 0x200>;
+                       ti,c64x+pll-bypass-delay = <300>;
+                       ti,c64x+pll-reset-delay = <24000>;
+                       ti,c64x+pll-lock-delay = <50000>;
+               };
+       };
+};
diff --git a/arch/c6x/boot/dts/tms320c6472.dtsi b/arch/c6x/boot/dts/tms320c6472.dtsi
new file mode 100644 (file)
index 0000000..b488aae
--- /dev/null
@@ -0,0 +1,134 @@
+
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       device_type = "cpu";
+                       reg = <0>;
+                       model = "ti,c64x+";
+               };
+               cpu@1 {
+                       device_type = "cpu";
+                       reg = <1>;
+                       model = "ti,c64x+";
+               };
+               cpu@2 {
+                       device_type = "cpu";
+                       reg = <2>;
+                       model = "ti,c64x+";
+               };
+               cpu@3 {
+                       device_type = "cpu";
+                       reg = <3>;
+                       model = "ti,c64x+";
+               };
+               cpu@4 {
+                       device_type = "cpu";
+                       reg = <4>;
+                       model = "ti,c64x+";
+               };
+               cpu@5 {
+                       device_type = "cpu";
+                       reg = <5>;
+                       model = "ti,c64x+";
+               };
+       };
+
+       soc {
+               compatible = "simple-bus";
+               model = "tms320c6472";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               core_pic: interrupt-controller {
+                       compatible = "ti,c64x+core-pic";
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+               };
+
+               megamod_pic: interrupt-controller@1800000 {
+                      compatible = "ti,c64x+megamod-pic";
+                      interrupt-controller;
+                      #interrupt-cells = <1>;
+                      reg = <0x1800000 0x1000>;
+                      interrupt-parent = <&core_pic>;
+               };
+
+               cache-controller@1840000 {
+                       compatible = "ti,c64x+cache";
+                       reg = <0x01840000 0x8400>;
+               };
+
+               timer0: timer@25e0000 {
+                       compatible = "ti,c64x+timer64";
+                       ti,core-mask = < 0x01 >;
+                       reg = <0x25e0000 0x40>;
+               };
+
+               timer1: timer@25f0000 {
+                       compatible = "ti,c64x+timer64";
+                       ti,core-mask = < 0x02 >;
+                       reg = <0x25f0000 0x40>;
+               };
+
+               timer2: timer@2600000 {
+                       compatible = "ti,c64x+timer64";
+                       ti,core-mask = < 0x04 >;
+                       reg = <0x2600000 0x40>;
+               };
+
+               timer3: timer@2610000 {
+                       compatible = "ti,c64x+timer64";
+                       ti,core-mask = < 0x08 >;
+                       reg = <0x2610000 0x40>;
+               };
+
+               timer4: timer@2620000 {
+                       compatible = "ti,c64x+timer64";
+                       ti,core-mask = < 0x10 >;
+                       reg = <0x2620000 0x40>;
+               };
+
+               timer5: timer@2630000 {
+                       compatible = "ti,c64x+timer64";
+                       ti,core-mask = < 0x20 >;
+                       reg = <0x2630000 0x40>;
+               };
+
+               clock-controller@29a0000 {
+                       compatible = "ti,c6472-pll", "ti,c64x+pll";
+                       reg = <0x029a0000 0x200>;
+                       ti,c64x+pll-bypass-delay = <200>;
+                       ti,c64x+pll-reset-delay = <12000>;
+                       ti,c64x+pll-lock-delay = <80000>;
+               };
+
+               device-state-controller@2a80000 {
+                       compatible = "ti,c64x+dscr";
+                       reg = <0x02a80000 0x1000>;
+
+                       ti,dscr-devstat = <0>;
+                       ti,dscr-silicon-rev = <0x70c 16 0xff>;
+
+                       ti,dscr-mac-fuse-regs = <0x700 1 2 3 4
+                                                0x704 5 6 0 0>;
+
+                       ti,dscr-rmii-resets = <0x208 1
+                                              0x20c 1>;
+
+                       ti,dscr-locked-regs = <0x200 0x204 0x0a1e183a
+                                              0x40c 0x420 0xbea7
+                                              0x41c 0x420 0xbea7>;
+
+                       ti,dscr-privperm = <0x41c 0xaaaaaaaa>;
+
+                       ti,dscr-devstate-ctl-regs = <0 13 0x200 1 0 0 1>;
+               };
+       };
+};
diff --git a/arch/c6x/boot/dts/tms320c6474.dtsi b/arch/c6x/boot/dts/tms320c6474.dtsi
new file mode 100644 (file)
index 0000000..cc601bf
--- /dev/null
@@ -0,0 +1,89 @@
+
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       device_type = "cpu";
+                       reg = <0>;
+                       model = "ti,c64x+";
+               };
+               cpu@1 {
+                       device_type = "cpu";
+                       reg = <1>;
+                       model = "ti,c64x+";
+               };
+               cpu@2 {
+                       device_type = "cpu";
+                       reg = <2>;
+                       model = "ti,c64x+";
+               };
+       };
+
+       soc {
+               compatible = "simple-bus";
+               model = "tms320c6474";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               core_pic: interrupt-controller {
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+                       compatible = "ti,c64x+core-pic";
+               };
+
+               megamod_pic: interrupt-controller@1800000 {
+                      compatible = "ti,c64x+megamod-pic";
+                      interrupt-controller;
+                      #interrupt-cells = <1>;
+                      reg = <0x1800000 0x1000>;
+                      interrupt-parent = <&core_pic>;
+               };
+
+               cache-controller@1840000 {
+                       compatible = "ti,c64x+cache";
+                       reg = <0x01840000 0x8400>;
+               };
+
+               timer3: timer@2940000 {
+                       compatible = "ti,c64x+timer64";
+                       ti,core-mask = < 0x04 >;
+                       reg = <0x2940000 0x40>;
+               };
+
+               timer4: timer@2950000 {
+                       compatible = "ti,c64x+timer64";
+                       ti,core-mask = < 0x02 >;
+                       reg = <0x2950000 0x40>;
+               };
+
+               timer5: timer@2960000 {
+                       compatible = "ti,c64x+timer64";
+                       ti,core-mask = < 0x01 >;
+                       reg = <0x2960000 0x40>;
+               };
+
+               device-state-controller@2880800 {
+                       compatible = "ti,c64x+dscr";
+                       reg = <0x02880800 0x400>;
+
+                       ti,dscr-devstat = <0x004>;
+                       ti,dscr-silicon-rev = <0x014 28 0xf>;
+                       ti,dscr-mac-fuse-regs = <0x34 3 4 5 6
+                                                0x38 0 0 1 2>;
+               };
+
+               clock-controller@29a0000 {
+                       compatible = "ti,c6474-pll", "ti,c64x+pll";
+                       reg = <0x029a0000 0x200>;
+                       ti,c64x+pll-bypass-delay = <120>;
+                       ti,c64x+pll-reset-delay = <30000>;
+                       ti,c64x+pll-lock-delay = <60000>;
+               };
+       };
+};
diff --git a/arch/c6x/boot/linked_dtb.S b/arch/c6x/boot/linked_dtb.S
new file mode 100644 (file)
index 0000000..57a4454
--- /dev/null
@@ -0,0 +1,2 @@
+.section __fdt_blob,"a"
+.incbin "arch/c6x/boot/builtin.dtb"
diff --git a/arch/c6x/configs/dsk6455_defconfig b/arch/c6x/configs/dsk6455_defconfig
new file mode 100644 (file)
index 0000000..4663487
--- /dev/null
@@ -0,0 +1,44 @@
+CONFIG_SOC_TMS320C6455=y
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+# CONFIG_FUTEX is not set
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE=""
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=17000
+CONFIG_MISC_DEVICES=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_CRC16=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP_OF=y
diff --git a/arch/c6x/configs/evmc6457_defconfig b/arch/c6x/configs/evmc6457_defconfig
new file mode 100644 (file)
index 0000000..bba40e1
--- /dev/null
@@ -0,0 +1,41 @@
+CONFIG_SOC_TMS320C6457=y
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+# CONFIG_FUTEX is not set
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE=""
+CONFIG_BOARD_EVM6457=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=17000
+CONFIG_MISC_DEVICES=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_CRC16=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/c6x/configs/evmc6472_defconfig b/arch/c6x/configs/evmc6472_defconfig
new file mode 100644 (file)
index 0000000..8c46155
--- /dev/null
@@ -0,0 +1,42 @@
+CONFIG_SOC_TMS320C6472=y
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+# CONFIG_FUTEX is not set
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_FORCE is not set
+CONFIG_BOARD_EVM6472=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=17000
+CONFIG_MISC_DEVICES=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_CRC16=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/c6x/configs/evmc6474_defconfig b/arch/c6x/configs/evmc6474_defconfig
new file mode 100644 (file)
index 0000000..15533f6
--- /dev/null
@@ -0,0 +1,42 @@
+CONFIG_SOC_TMS320C6474=y
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+# CONFIG_FUTEX is not set
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_FORCE is not set
+CONFIG_BOARD_EVM6474=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=17000
+CONFIG_MISC_DEVICES=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_CRC16=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
new file mode 100644 (file)
index 0000000..13dcf78
--- /dev/null
@@ -0,0 +1,54 @@
+include include/asm-generic/Kbuild.asm
+
+generic-y += atomic.h
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += bug.h
+generic-y += bugs.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += dma.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += futex.h
+generic-y += hw_irq.h
+generic-y += io.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += local.h
+generic-y += mman.h
+generic-y += mmu_context.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += pgalloc.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += segment.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += tlbflush.h
+generic-y += topology.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += user.h
+generic-y += vga.h
diff --git a/arch/c6x/include/asm/asm-offsets.h b/arch/c6x/include/asm/asm-offsets.h
new file mode 100644 (file)
index 0000000..d370ee3
--- /dev/null
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/c6x/include/asm/bitops.h b/arch/c6x/include/asm/bitops.h
new file mode 100644 (file)
index 0000000..39ab7e8
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_BITOPS_H
+#define _ASM_C6X_BITOPS_H
+
+#ifdef __KERNEL__
+
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/byteorder.h>
+
+/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit()  barrier()
+
+/*
+ * We are lucky, DSP is perfect for bitops: do it in 3 cycles
+ */
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ * Note __ffs(0) = undef, __ffs(1) = 0, __ffs(0x80000000) = 31.
+ *
+ */
+static inline unsigned long __ffs(unsigned long x)
+{
+       asm (" bitr  .M1  %0,%0\n"
+            " nop\n"
+            " lmbd  .L1  1,%0,%0\n"
+            : "+a"(x));
+
+       return x;
+}
+
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+#define ffz(x) __ffs(~(x))
+
+/**
+ * fls - find last (most-significant) bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+static inline int fls(int x)
+{
+       if (!x)
+               return 0;
+
+       asm (" lmbd  .L1  1,%0,%0\n" : "+a"(x));
+
+       return 32 - x;
+}
+
+/**
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ * Note ffs(0) = 0, ffs(1) = 1, ffs(0x80000000) = 32.
+ */
+static inline int ffs(int x)
+{
+       if (!x)
+               return 0;
+
+       return __ffs(x) + 1;
+}
+
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
+
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_C6X_BITOPS_H */
diff --git a/arch/c6x/include/asm/byteorder.h b/arch/c6x/include/asm/byteorder.h
new file mode 100644 (file)
index 0000000..166038d
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef _ASM_C6X_BYTEORDER_H
+#define _ASM_C6X_BYTEORDER_H
+
+#include <asm/types.h>
+
+#ifdef _BIG_ENDIAN
+#include <linux/byteorder/big_endian.h>
+#else /* _BIG_ENDIAN */
+#include <linux/byteorder/little_endian.h>
+#endif /* _BIG_ENDIAN */
+
+#endif /* _ASM_BYTEORDER_H */
diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h
new file mode 100644 (file)
index 0000000..6d521d9
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2005, 2006, 2009, 2010 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_CACHE_H
+#define _ASM_C6X_CACHE_H
+
+#include <linux/irqflags.h>
+
+/*
+ * Cache line size
+ */
+#define L1D_CACHE_BYTES   64
+#define L1P_CACHE_BYTES   32
+#define L2_CACHE_BYTES   128
+
+/*
+ * L2 used as cache
+ */
+#define L2MODE_SIZE      L2MODE_256K_CACHE
+
+/*
+ * For practical reasons the L1_CACHE_BYTES defines should not be smaller than
+ * the L2 line size
+ */
+#define L1_CACHE_BYTES        L2_CACHE_BYTES
+
+#define L2_CACHE_ALIGN_LOW(x) \
+       (((x) & ~(L2_CACHE_BYTES - 1)))
+#define L2_CACHE_ALIGN_UP(x) \
+       (((x) + (L2_CACHE_BYTES - 1)) & ~(L2_CACHE_BYTES - 1))
+#define L2_CACHE_ALIGN_CNT(x) \
+       (((x) + (sizeof(int) - 1)) & ~(sizeof(int) - 1))
+
+#define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
+#define ARCH_SLAB_MINALIGN     L1_CACHE_BYTES
+
+/*
+ * This is the granularity of hardware cacheability control.
+ */
+#define CACHEABILITY_ALIGN     0x01000000
+
+/*
+ * Align a physical address to MAR regions
+ */
+#define CACHE_REGION_START(v) \
+       (((u32) (v)) & ~(CACHEABILITY_ALIGN - 1))
+#define CACHE_REGION_END(v) \
+       (((u32) (v) + (CACHEABILITY_ALIGN - 1)) & ~(CACHEABILITY_ALIGN - 1))
+
+extern void __init c6x_cache_init(void);
+
+extern void enable_caching(unsigned long start, unsigned long end);
+extern void disable_caching(unsigned long start, unsigned long end);
+
+extern void L1_cache_off(void);
+extern void L1_cache_on(void);
+
+extern void L1P_cache_global_invalidate(void);
+extern void L1D_cache_global_invalidate(void);
+extern void L1D_cache_global_writeback(void);
+extern void L1D_cache_global_writeback_invalidate(void);
+extern void L2_cache_set_mode(unsigned int mode);
+extern void L2_cache_global_writeback_invalidate(void);
+extern void L2_cache_global_writeback(void);
+
+extern void L1P_cache_block_invalidate(unsigned int start, unsigned int end);
+extern void L1D_cache_block_invalidate(unsigned int start, unsigned int end);
+extern void L1D_cache_block_writeback_invalidate(unsigned int start,
+                                                unsigned int end);
+extern void L1D_cache_block_writeback(unsigned int start, unsigned int end);
+extern void L2_cache_block_invalidate(unsigned int start, unsigned int end);
+extern void L2_cache_block_writeback(unsigned int start, unsigned int end);
+extern void L2_cache_block_writeback_invalidate(unsigned int start,
+                                               unsigned int end);
+extern void L2_cache_block_invalidate_nowait(unsigned int start,
+                                            unsigned int end);
+extern void L2_cache_block_writeback_nowait(unsigned int start,
+                                           unsigned int end);
+
+extern void L2_cache_block_writeback_invalidate_nowait(unsigned int start,
+                                                      unsigned int end);
+
+#endif /* _ASM_C6X_CACHE_H */
diff --git a/arch/c6x/include/asm/cacheflush.h b/arch/c6x/include/asm/cacheflush.h
new file mode 100644 (file)
index 0000000..df5db90
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_CACHEFLUSH_H
+#define _ASM_C6X_CACHEFLUSH_H
+
+#include <linux/spinlock.h>
+
+#include <asm/setup.h>
+#include <asm/cache.h>
+#include <asm/mman.h>
+#include <asm/page.h>
+#include <asm/string.h>
+
+/*
+ * virtually-indexed cache management (our cache is physically indexed)
+ */
+#define flush_cache_all()                      do {} while (0)
+#define flush_cache_mm(mm)                     do {} while (0)
+#define flush_cache_dup_mm(mm)                 do {} while (0)
+#define flush_cache_range(mm, start, end)      do {} while (0)
+#define flush_cache_page(vma, vmaddr, pfn)     do {} while (0)
+#define flush_cache_vmap(start, end)           do {} while (0)
+#define flush_cache_vunmap(start, end)         do {} while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page)                        do {} while (0)
+#define flush_dcache_mmap_lock(mapping)                do {} while (0)
+#define flush_dcache_mmap_unlock(mapping)      do {} while (0)
+
+/*
+ * physically-indexed cache management
+ */
+#define flush_icache_range(s, e)                                 \
+do {                                                             \
+               L1D_cache_block_writeback((s), (e));              \
+               L1P_cache_block_invalidate((s), (e));             \
+} while (0)
+
+#define flush_icache_page(vma, page)                                     \
+do {                                                             \
+       if ((vma)->vm_flags & PROT_EXEC)                                  \
+               L1D_cache_block_writeback_invalidate(page_address(page),  \
+                       (unsigned long) page_address(page) + PAGE_SIZE)); \
+               L1P_cache_block_invalidate(page_address(page),            \
+                       (unsigned long) page_address(page) + PAGE_SIZE)); \
+} while (0)
+
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do {                                                \
+       memcpy(dst, src, len);                       \
+       flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
+} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+       memcpy(dst, src, len)
+
+#endif /* _ASM_C6X_CACHEFLUSH_H */
diff --git a/arch/c6x/include/asm/checksum.h b/arch/c6x/include/asm/checksum.h
new file mode 100644 (file)
index 0000000..7246816
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (C) 2011 Texas Instruments Incorporated
+ *  Author: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_CHECKSUM_H
+#define _ASM_C6X_CHECKSUM_H
+
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+                  unsigned short proto, __wsum sum)
+{
+       unsigned long long tmp;
+
+       asm ("add     .d1   %1,%5,%1\n"
+            "|| addu .l1   %3,%4,%0\n"
+            "addu    .l1   %2,%0,%0\n"
+#ifndef CONFIG_CPU_BIG_ENDIAN
+            "|| shl  .s1   %1,8,%1\n"
+#endif
+            "addu    .l1   %1,%0,%0\n"
+            "add     .l1   %P0,%p0,%2\n"
+            : "=&a"(tmp), "+a"(len), "+a"(sum)
+            : "a" (saddr), "a" (daddr), "a" (proto));
+       return sum;
+}
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+
+#include <asm-generic/checksum.h>
+
+#endif /* _ASM_C6X_CHECKSUM_H */
diff --git a/arch/c6x/include/asm/clkdev.h b/arch/c6x/include/asm/clkdev.h
new file mode 100644 (file)
index 0000000..76a070b
--- /dev/null
@@ -0,0 +1,22 @@
+#ifndef _ASM_CLKDEV_H
+#define _ASM_CLKDEV_H
+
+#include <linux/slab.h>
+
+struct clk;
+
+static inline int __clk_get(struct clk *clk)
+{
+       return 1;
+}
+
+static inline void __clk_put(struct clk *clk)
+{
+}
+
+static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
+{
+       return kzalloc(size, GFP_KERNEL);
+}
+
+#endif /* _ASM_CLKDEV_H */
diff --git a/arch/c6x/include/asm/clock.h b/arch/c6x/include/asm/clock.h
new file mode 100644 (file)
index 0000000..bcf42b2
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+ * TI C64X clock definitions
+ *
+ * Copyright (C) 2010, 2011 Texas Instruments.
+ * Contributed by: Mark Salter <msalter@redhat.com>
+ *
+ * Copied heavily from arm/mach-davinci/clock.h, so:
+ *
+ * Copyright (C) 2006-2007 Texas Instruments.
+ * Copyright (C) 2008-2009 Deep Root Systems, LLC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_C6X_CLOCK_H
+#define _ASM_C6X_CLOCK_H
+
+#ifndef __ASSEMBLER__
+
+#include <linux/list.h>
+
+/* PLL/Reset register offsets */
+#define PLLCTL         0x100
+#define PLLM           0x110
+#define PLLPRE         0x114
+#define PLLDIV1                0x118
+#define PLLDIV2                0x11c
+#define PLLDIV3                0x120
+#define PLLPOST                0x128
+#define PLLCMD         0x138
+#define PLLSTAT                0x13c
+#define PLLALNCTL      0x140
+#define PLLDCHANGE     0x144
+#define PLLCKEN                0x148
+#define PLLCKSTAT      0x14c
+#define PLLSYSTAT      0x150
+#define PLLDIV4                0x160
+#define PLLDIV5                0x164
+#define PLLDIV6                0x168
+#define PLLDIV7                0x16c
+#define PLLDIV8                0x170
+#define PLLDIV9                0x174
+#define PLLDIV10       0x178
+#define PLLDIV11       0x17c
+#define PLLDIV12       0x180
+#define PLLDIV13       0x184
+#define PLLDIV14       0x188
+#define PLLDIV15       0x18c
+#define PLLDIV16       0x190
+
+/* PLLM register bits */
+#define PLLM_PLLM_MASK 0xff
+#define PLLM_VAL(x)    ((x) - 1)
+
+/* PREDIV register bits */
+#define PLLPREDIV_EN   BIT(15)
+#define PLLPREDIV_VAL(x) ((x) - 1)
+
+/* PLLCTL register bits */
+#define PLLCTL_PLLEN   BIT(0)
+#define PLLCTL_PLLPWRDN        BIT(1)
+#define PLLCTL_PLLRST  BIT(3)
+#define PLLCTL_PLLDIS  BIT(4)
+#define PLLCTL_PLLENSRC        BIT(5)
+#define PLLCTL_CLKMODE BIT(8)
+
+/* PLLCMD register bits */
+#define PLLCMD_GOSTAT  BIT(0)
+
+/* PLLSTAT register bits */
+#define PLLSTAT_GOSTAT BIT(0)
+
+/* PLLDIV register bits */
+#define PLLDIV_EN      BIT(15)
+#define PLLDIV_RATIO_MASK 0x1f
+#define PLLDIV_RATIO(x) ((x) - 1)
+
+struct pll_data;
+
+struct clk {
+       struct list_head        node;
+       struct module           *owner;
+       const char              *name;
+       unsigned long           rate;
+       int                     usecount;
+       u32                     flags;
+       struct clk              *parent;
+       struct list_head        children;       /* list of children */
+       struct list_head        childnode;      /* parent's child list node */
+       struct pll_data         *pll_data;
+       u32                     div;
+       unsigned long (*recalc) (struct clk *);
+       int (*set_rate) (struct clk *clk, unsigned long rate);
+       int (*round_rate) (struct clk *clk, unsigned long rate);
+};
+
+/* Clock flags: SoC-specific flags start at BIT(16) */
+#define ALWAYS_ENABLED         BIT(1)
+#define CLK_PLL                        BIT(2) /* PLL-derived clock */
+#define PRE_PLL                        BIT(3) /* source is before PLL mult/div */
+#define FIXED_DIV_PLL          BIT(4) /* fixed divisor from PLL */
+#define FIXED_RATE_PLL         BIT(5) /* fixed ouput rate PLL */
+
+#define MAX_PLL_SYSCLKS 16
+
+struct pll_data {
+       void __iomem *base;
+       u32 num;
+       u32 flags;
+       u32 input_rate;
+       u32 bypass_delay; /* in loops */
+       u32 reset_delay;  /* in loops */
+       u32 lock_delay;   /* in loops */
+       struct clk sysclks[MAX_PLL_SYSCLKS + 1];
+};
+
+/* pll_data flag bit */
+#define PLL_HAS_PRE    BIT(0)
+#define PLL_HAS_MUL    BIT(1)
+#define PLL_HAS_POST   BIT(2)
+
+#define CLK(dev, con, ck)      \
+       {                       \
+               .dev_id = dev,  \
+               .con_id = con,  \
+               .clk = ck,      \
+       }                       \
+
+extern void c6x_clks_init(struct clk_lookup *clocks);
+extern int clk_register(struct clk *clk);
+extern void clk_unregister(struct clk *clk);
+extern void c64x_setup_clocks(void);
+
+extern struct pll_data c6x_soc_pll1;
+
+extern struct clk clkin1;
+extern struct clk c6x_core_clk;
+extern struct clk c6x_i2c_clk;
+extern struct clk c6x_watchdog_clk;
+extern struct clk c6x_mcbsp1_clk;
+extern struct clk c6x_mcbsp2_clk;
+extern struct clk c6x_mdio_clk;
+
+#endif
+
+#endif /* _ASM_C6X_CLOCK_H */
diff --git a/arch/c6x/include/asm/delay.h b/arch/c6x/include/asm/delay.h
new file mode 100644 (file)
index 0000000..f314c2e
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_DELAY_H
+#define _ASM_C6X_DELAY_H
+
+#include <linux/kernel.h>
+
+extern unsigned int ticks_per_ns_scaled;
+
+static inline void __delay(unsigned long loops)
+{
+       uint32_t tmp;
+
+       /* 6 cycles per loop */
+       asm volatile ("        mv    .s1  %0,%1\n"
+                     "0: [%1] b     .s1  0b\n"
+                     "        add   .l1  -6,%0,%0\n"
+                     "        cmplt .l1  1,%0,%1\n"
+                     "        nop   3\n"
+                     : "+a"(loops), "=A"(tmp));
+}
+
+static inline void _c6x_tickdelay(unsigned int x)
+{
+       uint32_t cnt, endcnt;
+
+       asm volatile ("        mvc   .s2   TSCL,%0\n"
+                     "        add   .s2x  %0,%1,%2\n"
+                     " ||     mvk   .l2   1,B0\n"
+                     "0: [B0] b     .s2   0b\n"
+                     "        mvc   .s2   TSCL,%0\n"
+                     "        sub   .s2   %0,%2,%0\n"
+                     "        cmpgt .l2   0,%0,B0\n"
+                     "        nop   2\n"
+                     : "=b"(cnt), "+a"(x), "=b"(endcnt) : : "B0");
+}
+
+/* use scaled math to avoid slow division */
+#define C6X_NDELAY_SCALE 10
+
+static inline void _ndelay(unsigned int n)
+{
+       _c6x_tickdelay((ticks_per_ns_scaled * n) >> C6X_NDELAY_SCALE);
+}
+
+static inline void _udelay(unsigned int n)
+{
+       while (n >= 10) {
+               _ndelay(10000);
+               n -= 10;
+       }
+       while (n-- > 0)
+               _ndelay(1000);
+}
+
+#define udelay(x) _udelay((unsigned int)(x))
+#define ndelay(x) _ndelay((unsigned int)(x))
+
+#endif /* _ASM_C6X_DELAY_H */
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h
new file mode 100644 (file)
index 0000000..03579fd
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot <aurelien.jacquiot@ti.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ */
+#ifndef _ASM_C6X_DMA_MAPPING_H
+#define _ASM_C6X_DMA_MAPPING_H
+
+#include <linux/dma-debug.h>
+#include <asm-generic/dma-coherent.h>
+
+#define dma_supported(d, m)    1
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+       if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+               return -EIO;
+
+       *dev->dma_mask = dma_mask;
+
+       return 0;
+}
+
+/*
+ * DMA errors are defined by all-bits-set in the DMA address.
+ */
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+       return dma_addr == ~0;
+}
+
+extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+                                size_t size, enum dma_data_direction dir);
+
+extern void dma_unmap_single(struct device *dev, dma_addr_t handle,
+                            size_t size, enum dma_data_direction dir);
+
+extern int dma_map_sg(struct device *dev, struct scatterlist *sglist,
+                     int nents, enum dma_data_direction direction);
+
+extern void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+                        int nents, enum dma_data_direction direction);
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+                                     unsigned long offset, size_t size,
+                                     enum dma_data_direction dir)
+{
+       dma_addr_t handle;
+
+       handle = dma_map_single(dev, page_address(page) + offset, size, dir);
+
+       debug_dma_map_page(dev, page, offset, size, dir, handle, false);
+
+       return handle;
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir)
+{
+       dma_unmap_single(dev, handle, size, dir);
+
+       debug_dma_unmap_page(dev, handle, size, dir, false);
+}
+
+extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
+                                   size_t size, enum dma_data_direction dir);
+
+extern void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
+                                      size_t size,
+                                      enum dma_data_direction dir);
+
+extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+                               int nents, enum dma_data_direction dir);
+
+extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+                                  int nents, enum dma_data_direction dir);
+
+extern void coherent_mem_init(u32 start, u32 size);
+extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
+extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
+#define dma_free_noncoherent(d, s, v, h)  dma_free_coherent((d), (s), (v), (h))
+
+#endif /* _ASM_C6X_DMA_MAPPING_H */
diff --git a/arch/c6x/include/asm/dscr.h b/arch/c6x/include/asm/dscr.h
new file mode 100644 (file)
index 0000000..561ba83
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (C) 2011 Texas Instruments Incorporated
+ *  Author: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ */
+#ifndef _ASM_C6X_DSCR_H
+#define _ASM_C6X_DSCR_H
+
+enum dscr_devstate_t {
+       DSCR_DEVSTATE_ENABLED,
+       DSCR_DEVSTATE_DISABLED,
+};
+
+/*
+ * Set the device state of the device with the given ID.
+ *
+ * Individual drivers should use this to enable or disable the
+ * hardware device. The devid used to identify the device being
+ * controlled should be a property in the device's tree node.
+ */
+extern void dscr_set_devstate(int devid, enum dscr_devstate_t state);
+
+/*
+ * Assert or de-assert an RMII reset.
+ */
+extern void dscr_rmii_reset(int id, int assert);
+
+extern void dscr_probe(void);
+
+#endif /* _ASM_C6X_DSCR_H */
diff --git a/arch/c6x/include/asm/elf.h b/arch/c6x/include/asm/elf.h
new file mode 100644 (file)
index 0000000..d57865b
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_ELF_H
+#define _ASM_C6X_ELF_H
+
+/*
+ * ELF register definitions..
+ */
+#include <asm/ptrace.h>
+
+typedef unsigned long elf_greg_t;
+typedef unsigned long elf_fpreg_t;
+
+#define ELF_NGREG  58
+#define ELF_NFPREG 1
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == EM_TI_C6000)
+
+#define elf_check_const_displacement(x) (1)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#ifdef __LITTLE_ENDIAN__
+#define ELF_DATA       ELFDATA2LSB
+#else
+#define ELF_DATA       ELFDATA2MSB
+#endif
+
+#define ELF_CLASS      ELFCLASS32
+#define ELF_ARCH       EM_TI_C6000
+
+/* Nothing for now. Need to setup DP... */
+#define ELF_PLAT_INIT(_r)
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE      4096
+
+#define ELF_CORE_COPY_REGS(_dest, _regs)               \
+       memcpy((char *) &_dest, (char *) _regs,         \
+       sizeof(struct pt_regs));
+
+/* This yields a mask that user programs can use to figure out what
+   instruction set this cpu supports.  */
+
+#define ELF_HWCAP      (0)
+
+/* This yields a string that ld.so will use to load implementation
+   specific libraries for optimization.  This is more specific in
+   intent than poking at uname or /proc/cpuinfo.  */
+
+#define ELF_PLATFORM  (NULL)
+
+#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+
+/* C6X specific section types */
+#define SHT_C6000_UNWIND       0x70000001
+#define SHT_C6000_PREEMPTMAP   0x70000002
+#define SHT_C6000_ATTRIBUTES   0x70000003
+
+/* C6X specific DT_ tags */
+#define DT_C6000_DSBT_BASE     0x70000000
+#define DT_C6000_DSBT_SIZE     0x70000001
+#define DT_C6000_PREEMPTMAP    0x70000002
+#define DT_C6000_DSBT_INDEX    0x70000003
+
+/* C6X specific relocs */
+#define R_C6000_NONE           0
+#define R_C6000_ABS32          1
+#define R_C6000_ABS16          2
+#define R_C6000_ABS8           3
+#define R_C6000_PCR_S21                4
+#define R_C6000_PCR_S12                5
+#define R_C6000_PCR_S10                6
+#define R_C6000_PCR_S7         7
+#define R_C6000_ABS_S16                8
+#define R_C6000_ABS_L16                9
+#define R_C6000_ABS_H16                10
+#define R_C6000_SBR_U15_B      11
+#define R_C6000_SBR_U15_H      12
+#define R_C6000_SBR_U15_W      13
+#define R_C6000_SBR_S16                14
+#define R_C6000_SBR_L16_B      15
+#define R_C6000_SBR_L16_H      16
+#define R_C6000_SBR_L16_W      17
+#define R_C6000_SBR_H16_B      18
+#define R_C6000_SBR_H16_H      19
+#define R_C6000_SBR_H16_W      20
+#define R_C6000_SBR_GOT_U15_W  21
+#define R_C6000_SBR_GOT_L16_W  22
+#define R_C6000_SBR_GOT_H16_W  23
+#define R_C6000_DSBT_INDEX     24
+#define R_C6000_PREL31         25
+#define R_C6000_COPY           26
+#define R_C6000_ALIGN          253
+#define R_C6000_FPHEAD         254
+#define R_C6000_NOCMP          255
+
+#endif /*_ASM_C6X_ELF_H */
diff --git a/arch/c6x/include/asm/ftrace.h b/arch/c6x/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..3701958
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef _ASM_C6X_FTRACE_H
+#define _ASM_C6X_FTRACE_H
+
+/* empty */
+
+#endif /* _ASM_C6X_FTRACE_H */
diff --git a/arch/c6x/include/asm/hardirq.h b/arch/c6x/include/asm/hardirq.h
new file mode 100644 (file)
index 0000000..9621954
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_C6X_HARDIRQ_H
+#define _ASM_C6X_HARDIRQ_H
+
+extern void ack_bad_irq(int irq);
+#define ack_bad_irq ack_bad_irq
+
+#include <asm-generic/hardirq.h>
+
+#endif /* _ASM_C6X_HARDIRQ_H */
diff --git a/arch/c6x/include/asm/irq.h b/arch/c6x/include/asm/irq.h
new file mode 100644 (file)
index 0000000..a6ae3c9
--- /dev/null
@@ -0,0 +1,302 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  Large parts taken directly from powerpc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_IRQ_H
+#define _ASM_C6X_IRQ_H
+
+#include <linux/threads.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
+#include <asm/percpu.h>
+
+#define irq_canonicalize(irq)  (irq)
+
+/*
+ * The C64X+ core has 16 IRQ vectors. One each is used by Reset and NMI. Two
+ * are reserved. The remaining 12 vectors are used to route SoC interrupts.
+ * These interrupt vectors are prioritized with IRQ 4 having the highest
+ * priority and IRQ 15 having the lowest.
+ *
+ * The C64x+ megamodule provides a PIC which combines SoC IRQ sources into a
+ * single core IRQ vector. There are four combined sources, each of which
+ * feed into one of the 12 general interrupt vectors. The remaining 8 vectors
+ * can each route a single SoC interrupt directly.
+ */
+#define NR_PRIORITY_IRQS 16
+
+#define NR_IRQS_LEGACY NR_PRIORITY_IRQS
+
+/* Total number of virq in the platform */
+#define NR_IRQS                256
+
+/* This number is used when no interrupt has been assigned */
+#define NO_IRQ         0
+
+/* This type is the placeholder for a hardware interrupt number. It has to
+ * be big enough to enclose whatever representation is used by a given
+ * platform.
+ */
+typedef unsigned long irq_hw_number_t;
+
+/* Interrupt controller "host" data structure. This could be defined as a
+ * irq domain controller. That is, it handles the mapping between hardware
+ * and virtual interrupt numbers for a given interrupt domain. The host
+ * structure is generally created by the PIC code for a given PIC instance
+ * (though a host can cover more than one PIC if they have a flat number
+ * model). It's the host callbacks that are responsible for setting the
+ * irq_chip on a given irq_desc after it's been mapped.
+ *
+ * The host code and data structures are fairly agnostic to the fact that
+ * we use an open firmware device-tree. We do have references to struct
+ * device_node in two places: in irq_find_host() to find the host matching
+ * a given interrupt controller node, and of course as an argument to its
+ * counterpart host->ops->match() callback. However, those are treated as
+ * generic pointers by the core and the fact that it's actually a device-node
+ * pointer is purely a convention between callers and implementation. This
+ * code could thus be used on other architectures by replacing those two
+ * by some sort of arch-specific void * "token" used to identify interrupt
+ * controllers.
+ */
+struct irq_host;
+struct radix_tree_root;
+struct device_node;
+
+/* Functions below are provided by the host and called whenever a new mapping
+ * is created or an old mapping is disposed. The host can then proceed to
+ * whatever internal data structures management is required. It also needs
+ * to setup the irq_desc when returning from map().
+ */
+struct irq_host_ops {
+       /* Match an interrupt controller device node to a host, returns
+        * 1 on a match
+        */
+       int (*match)(struct irq_host *h, struct device_node *node);
+
+       /* Create or update a mapping between a virtual irq number and a hw
+        * irq number. This is called only once for a given mapping.
+        */
+       int (*map)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw);
+
+       /* Dispose of such a mapping */
+       void (*unmap)(struct irq_host *h, unsigned int virq);
+
+       /* Translate device-tree interrupt specifier from raw format coming
+        * from the firmware to a irq_hw_number_t (interrupt line number) and
+        * type (sense) that can be passed to set_irq_type(). In the absence
+        * of this callback, irq_create_of_mapping() and irq_of_parse_and_map()
+        * will return the hw number in the first cell and IRQ_TYPE_NONE for
+        * the type (which amount to keeping whatever default value the
+        * interrupt controller has for that line)
+        */
+       int (*xlate)(struct irq_host *h, struct device_node *ctrler,
+                    const u32 *intspec, unsigned int intsize,
+                    irq_hw_number_t *out_hwirq, unsigned int *out_type);
+};
+
+struct irq_host {
+       struct list_head        link;
+
+       /* type of reverse mapping technique */
+       unsigned int            revmap_type;
+#define IRQ_HOST_MAP_PRIORITY   0 /* core priority irqs, get irqs 1..15 */
+#define IRQ_HOST_MAP_NOMAP     1 /* no fast reverse mapping */
+#define IRQ_HOST_MAP_LINEAR    2 /* linear map of interrupts */
+#define IRQ_HOST_MAP_TREE      3 /* radix tree */
+       union {
+               struct {
+                       unsigned int size;
+                       unsigned int *revmap;
+               } linear;
+               struct radix_tree_root tree;
+       } revmap_data;
+       struct irq_host_ops     *ops;
+       void                    *host_data;
+       irq_hw_number_t         inval_irq;
+
+       /* Optional device node pointer */
+       struct device_node      *of_node;
+};
+
+struct irq_data;
+extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
+extern irq_hw_number_t virq_to_hw(unsigned int virq);
+extern bool virq_is_host(unsigned int virq, struct irq_host *host);
+
+/**
+ * irq_alloc_host - Allocate a new irq_host data structure
+ * @of_node: optional device-tree node of the interrupt controller
+ * @revmap_type: type of reverse mapping to use
+ * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map
+ * @ops: map/unmap host callbacks
+ * @inval_irq: provide a hw number in that host space that is always invalid
+ *
+ * Allocates and initialize and irq_host structure. Note that in the case of
+ * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns
+ * for all legacy interrupts except 0 (which is always the invalid irq for
+ * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by
+ * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated
+ * later during boot automatically (the reverse mapping will use the slow path
+ * until that happens).
+ */
+extern struct irq_host *irq_alloc_host(struct device_node *of_node,
+                                      unsigned int revmap_type,
+                                      unsigned int revmap_arg,
+                                      struct irq_host_ops *ops,
+                                      irq_hw_number_t inval_irq);
+
+
+/**
+ * irq_find_host - Locates a host for a given device node
+ * @node: device-tree node of the interrupt controller
+ */
+extern struct irq_host *irq_find_host(struct device_node *node);
+
+
+/**
+ * irq_set_default_host - Set a "default" host
+ * @host: default host pointer
+ *
+ * For convenience, it's possible to set a "default" host that will be used
+ * whenever NULL is passed to irq_create_mapping(). It makes life easier for
+ * platforms that want to manipulate a few hard coded interrupt numbers that
+ * aren't properly represented in the device-tree.
+ */
+extern void irq_set_default_host(struct irq_host *host);
+
+
+/**
+ * irq_set_virq_count - Set the maximum number of virt irqs
+ * @count: number of linux virtual irqs, capped with NR_IRQS
+ *
+ * This is mainly for use by platforms like iSeries who want to program
+ * the virtual irq number in the controller to avoid the reverse mapping
+ */
+extern void irq_set_virq_count(unsigned int count);
+
+
+/**
+ * irq_create_mapping - Map a hardware interrupt into linux virq space
+ * @host: host owning this hardware interrupt or NULL for default host
+ * @hwirq: hardware irq number in that host space
+ *
+ * Only one mapping per hardware interrupt is permitted. Returns a linux
+ * virq number.
+ * If the sense/trigger is to be specified, set_irq_type() should be called
+ * on the number returned from that call.
+ */
+extern unsigned int irq_create_mapping(struct irq_host *host,
+                                      irq_hw_number_t hwirq);
+
+
+/**
+ * irq_dispose_mapping - Unmap an interrupt
+ * @virq: linux virq number of the interrupt to unmap
+ */
+extern void irq_dispose_mapping(unsigned int virq);
+
+/**
+ * irq_find_mapping - Find a linux virq from an hw irq number.
+ * @host: host owning this hardware interrupt
+ * @hwirq: hardware irq number in that host space
+ *
+ * This is a slow path, for use by generic code. It's expected that an
+ * irq controller implementation directly calls the appropriate low level
+ * mapping function.
+ */
+extern unsigned int irq_find_mapping(struct irq_host *host,
+                                    irq_hw_number_t hwirq);
+
+/**
+ * irq_create_direct_mapping - Allocate a virq for direct mapping
+ * @host: host to allocate the virq for or NULL for default host
+ *
+ * This routine is used for irq controllers which can choose the hardware
+ * interrupt numbers they generate. In such a case it's simplest to use
+ * the linux virq as the hardware interrupt number.
+ */
+extern unsigned int irq_create_direct_mapping(struct irq_host *host);
+
+/**
+ * irq_radix_revmap_insert - Insert a hw irq to linux virq number mapping.
+ * @host: host owning this hardware interrupt
+ * @virq: linux irq number
+ * @hwirq: hardware irq number in that host space
+ *
+ * This is for use by irq controllers that use a radix tree reverse
+ * mapping for fast lookup.
+ */
+extern void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
+                                   irq_hw_number_t hwirq);
+
+/**
+ * irq_radix_revmap_lookup - Find a linux virq from a hw irq number.
+ * @host: host owning this hardware interrupt
+ * @hwirq: hardware irq number in that host space
+ *
+ * This is a fast path, for use by irq controller code that uses radix tree
+ * revmaps
+ */
+extern unsigned int irq_radix_revmap_lookup(struct irq_host *host,
+                                           irq_hw_number_t hwirq);
+
+/**
+ * irq_linear_revmap - Find a linux virq from a hw irq number.
+ * @host: host owning this hardware interrupt
+ * @hwirq: hardware irq number in that host space
+ *
+ * This is a fast path, for use by irq controller code that uses linear
+ * revmaps. It does fallback to the slow path if the revmap doesn't exist
+ * yet and will create the revmap entry with appropriate locking
+ */
+
+extern unsigned int irq_linear_revmap(struct irq_host *host,
+                                     irq_hw_number_t hwirq);
+
+
+
+/**
+ * irq_alloc_virt - Allocate virtual irq numbers
+ * @host: host owning these new virtual irqs
+ * @count: number of consecutive numbers to allocate
+ * @hint: pass a hint number, the allocator will try to use a 1:1 mapping
+ *
+ * This is a low level function that is used internally by irq_create_mapping()
+ * and that can be used by some irq controllers implementations for things
+ * like allocating ranges of numbers for MSIs. The revmaps are left untouched.
+ */
+extern unsigned int irq_alloc_virt(struct irq_host *host,
+                                  unsigned int count,
+                                  unsigned int hint);
+
+/**
+ * irq_free_virt - Free virtual irq numbers
+ * @virq: virtual irq number of the first interrupt to free
+ * @count: number of interrupts to free
+ *
+ * This function is the opposite of irq_alloc_virt. It will not clear reverse
+ * maps, this should be done previously by unmap'ing the interrupt. In fact,
+ * all interrupts covered by the range being freed should have been unmapped
+ * prior to calling this.
+ */
+extern void irq_free_virt(unsigned int virq, unsigned int count);
+
+extern void __init init_pic_c64xplus(void);
+
+extern void init_IRQ(void);
+
+struct pt_regs;
+
+extern asmlinkage void c6x_do_IRQ(unsigned int prio, struct pt_regs *regs);
+
+extern unsigned long irq_err_count;
+
+#endif /* _ASM_C6X_IRQ_H */
diff --git a/arch/c6x/include/asm/irqflags.h b/arch/c6x/include/asm/irqflags.h
new file mode 100644 (file)
index 0000000..cf78e09
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ *  C6X IRQ flag handling
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated
+ * Written by Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+#ifndef __ASSEMBLY__
+
+/* read interrupt enabled status */
+static inline unsigned long arch_local_save_flags(void)
+{
+       unsigned long flags;
+
+       asm volatile (" mvc .s2 CSR,%0\n" : "=b"(flags));
+       return flags;
+}
+
+/* set interrupt enabled status */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+       asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags));
+}
+
+/* unconditionally enable interrupts */
+static inline void arch_local_irq_enable(void)
+{
+       unsigned long flags = arch_local_save_flags();
+       flags |= 1;
+       arch_local_irq_restore(flags);
+}
+
+/* unconditionally disable interrupts */
+static inline void arch_local_irq_disable(void)
+{
+       unsigned long flags = arch_local_save_flags();
+       flags &= ~1;
+       arch_local_irq_restore(flags);
+}
+
+/* get status and disable interrupts */
+static inline unsigned long arch_local_irq_save(void)
+{
+       unsigned long flags;
+
+       flags = arch_local_save_flags();
+       arch_local_irq_restore(flags & ~1);
+       return flags;
+}
+
+/* test flags */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+       return (flags & 1) == 0;
+}
+
+/* test hardware interrupt enable bit */
+static inline int arch_irqs_disabled(void)
+{
+       return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_IRQFLAGS_H */
diff --git a/arch/c6x/include/asm/linkage.h b/arch/c6x/include/asm/linkage.h
new file mode 100644 (file)
index 0000000..376925c
--- /dev/null
@@ -0,0 +1,30 @@
+#ifndef _ASM_C6X_LINKAGE_H
+#define _ASM_C6X_LINKAGE_H
+
+#ifdef __ASSEMBLER__
+
+#define __ALIGN                .align 2
+#define __ALIGN_STR    ".align 2"
+
+#ifndef __DSBT__
+#define ENTRY(name)            \
+       .global name @          \
+       __ALIGN @               \
+name:
+#else
+#define ENTRY(name)            \
+       .global name @          \
+       .hidden name @          \
+       __ALIGN @               \
+name:
+#endif
+
+#define ENDPROC(name)          \
+       .type name, @function @ \
+       .size name, . - name
+
+#endif
+
+#include <asm-generic/linkage.h>
+
+#endif /* _ASM_C6X_LINKAGE_H */
diff --git a/arch/c6x/include/asm/megamod-pic.h b/arch/c6x/include/asm/megamod-pic.h
new file mode 100644 (file)
index 0000000..eca0a86
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _C6X_MEGAMOD_PIC_H
+#define _C6X_MEGAMOD_PIC_H
+
+#ifdef __KERNEL__
+
+extern void __init megamod_pic_init(void);
+
+#endif /* __KERNEL__ */
+#endif /* _C6X_MEGAMOD_PIC_H */
diff --git a/arch/c6x/include/asm/mmu.h b/arch/c6x/include/asm/mmu.h
new file mode 100644 (file)
index 0000000..41592bf
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_MMU_H
+#define _ASM_C6X_MMU_H
+
+typedef struct {
+       unsigned long           end_brk;
+} mm_context_t;
+
+#endif /* _ASM_C6X_MMU_H */
diff --git a/arch/c6x/include/asm/module.h b/arch/c6x/include/asm/module.h
new file mode 100644 (file)
index 0000000..a453f97
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  Updated for 2.6.34 by: Mark Salter (msalter@redhat.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_MODULE_H
+#define _ASM_C6X_MODULE_H
+
+#define Elf_Shdr       Elf32_Shdr
+#define Elf_Sym                Elf32_Sym
+#define Elf_Ehdr       Elf32_Ehdr
+#define Elf_Addr       Elf32_Addr
+#define Elf_Word       Elf32_Word
+
+/*
+ * This file contains the C6x architecture specific module code.
+ */
+struct mod_arch_specific {
+};
+
+struct loaded_sections {
+       unsigned int new_vaddr;
+       unsigned int loaded;
+};
+
+#endif /* _ASM_C6X_MODULE_H */
diff --git a/arch/c6x/include/asm/mutex.h b/arch/c6x/include/asm/mutex.h
new file mode 100644 (file)
index 0000000..7a7248e
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef _ASM_C6X_MUTEX_H
+#define _ASM_C6X_MUTEX_H
+
+#include <asm-generic/mutex-null.h>
+
+#endif /* _ASM_C6X_MUTEX_H */
diff --git a/arch/c6x/include/asm/page.h b/arch/c6x/include/asm/page.h
new file mode 100644 (file)
index 0000000..d18e2b0
--- /dev/null
@@ -0,0 +1,11 @@
+#ifndef _ASM_C6X_PAGE_H
+#define _ASM_C6X_PAGE_H
+
+#define VM_DATA_DEFAULT_FLAGS \
+       (VM_READ | VM_WRITE | \
+       ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
+                VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#include <asm-generic/page.h>
+
+#endif /* _ASM_C6X_PAGE_H */
diff --git a/arch/c6x/include/asm/pgtable.h b/arch/c6x/include/asm/pgtable.h
new file mode 100644 (file)
index 0000000..68c8af4
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_PGTABLE_H
+#define _ASM_C6X_PGTABLE_H
+
+#include <asm-generic/4level-fixup.h>
+
+#include <asm/setup.h>
+#include <asm/page.h>
+
+/*
+ * All 32bit addresses are effectively valid for vmalloc...
+ * Sort of meaningless for non-VM targets.
+ */
+#define        VMALLOC_START   0
+#define        VMALLOC_END     0xffffffff
+
+#define pgd_present(pgd)       (1)
+#define pgd_none(pgd)          (0)
+#define pgd_bad(pgd)           (0)
+#define pgd_clear(pgdp)
+#define kern_addr_valid(addr) (1)
+
+#define pmd_offset(a, b)       ((void *)0)
+#define pmd_none(x)            (!pmd_val(x))
+#define pmd_present(x)         (pmd_val(x))
+#define pmd_clear(xp)          do { set_pmd(xp, __pmd(0)); } while (0)
+#define pmd_bad(x)             (pmd_val(x) & ~PAGE_MASK)
+
+#define PAGE_NONE              __pgprot(0)    /* these mean nothing to NO_MM */
+#define PAGE_SHARED            __pgprot(0)    /* these mean nothing to NO_MM */
+#define PAGE_COPY              __pgprot(0)    /* these mean nothing to NO_MM */
+#define PAGE_READONLY          __pgprot(0)    /* these mean nothing to NO_MM */
+#define PAGE_KERNEL            __pgprot(0)    /* these mean nothing to NO_MM */
+#define pgprot_noncached(prot) (prot)
+
+extern void paging_init(void);
+
+#define __swp_type(x)          (0)
+#define __swp_offset(x)                (0)
+#define __swp_entry(typ, off)  ((swp_entry_t) { ((typ) | ((off) << 7)) })
+#define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
+
+static inline int pte_file(pte_t pte)
+{
+       return 0;
+}
+
+#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+#define ZERO_PAGE(vaddr)       virt_to_page(empty_zero_page)
+extern unsigned long empty_zero_page;
+
+#define swapper_pg_dir ((pgd_t *) 0)
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init()   do { } while (0)
+#define io_remap_pfn_range      remap_pfn_range
+
+#define io_remap_page_range(vma, vaddr, paddr, size, prot)             \
+               remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+
+#include <asm-generic/pgtable.h>
+
+#endif /* _ASM_C6X_PGTABLE_H */
diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h
new file mode 100644 (file)
index 0000000..8154c4e
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  Updated for 2.6.34: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_PROCESSOR_H
+#define _ASM_C6X_PROCESSOR_H
+
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#include <asm/current.h>
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr()                    \
+({                                             \
+       void *__pc;                             \
+       asm("mvc .S2 pce1,%0\n" : "=b"(__pc));  \
+       __pc;                                   \
+})
+
+/*
+ * User space process size. This is mostly meaningless for NOMMU
+ * but some C6X processors may have RAM addresses up to 0xFFFFFFFF.
+ * Since calls like mmap() can return an address or an error, we
+ * have to allow room for error returns when code does something
+ * like:
+ *
+ *       addr = do_mmap(...)
+ *       if ((unsigned long)addr >= TASK_SIZE)
+ *            ... its an error code, not an address ...
+ *
+ * Here, we allow for 4096 error codes which means we really can't
+ * use the last 4K page on systems with RAM extending all the way
+ * to the end of the 32-bit address space.
+ */
+#define TASK_SIZE      0xFFFFF000
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's. We won't be using it
+ */
+#define TASK_UNMAPPED_BASE     0
+
+struct thread_struct {
+       unsigned long long b15_14;
+       unsigned long long a15_14;
+       unsigned long long b13_12;
+       unsigned long long a13_12;
+       unsigned long long b11_10;
+       unsigned long long a11_10;
+       unsigned long long ricl_icl;
+       unsigned long  usp;             /* user stack pointer */
+       unsigned long  pc;              /* kernel pc */
+       unsigned long  wchan;
+};
+
+#define INIT_THREAD                                    \
+{                                                      \
+       .usp = 0,                                       \
+       .wchan = 0,                                     \
+}
+
+#define INIT_MMAP { \
+       &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, \
+       NULL, NULL }
+
+#define task_pt_regs(task) \
+       ((struct pt_regs *)(THREAD_START_SP + task_stack_page(task)) - 1)
+
+#define alloc_kernel_stack()   __get_free_page(GFP_KERNEL)
+#define free_kernel_stack(page) free_page((page))
+
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+extern void start_thread(struct pt_regs *regs, unsigned int pc,
+                        unsigned long usp);
+
+/* Free all resources held by a thread. */
+static inline void release_thread(struct task_struct *dead_task)
+{
+}
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk)   do { } while (0)
+
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+#define copy_segments(tsk, mm)         do { } while (0)
+#define release_segments(mm)           do { } while (0)
+
+/*
+ * saved PC of a blocked thread.
+ */
+#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
+
+/*
+ * saved kernel SP and DP of a blocked thread.
+ */
+#ifdef _BIG_ENDIAN
+#define thread_saved_ksp(tsk) \
+       (*(unsigned long *)&(tsk)->thread.b15_14)
+#define thread_saved_dp(tsk) \
+       (*(((unsigned long *)&(tsk)->thread.b15_14) + 1))
+#else
+#define thread_saved_ksp(tsk) \
+       (*(((unsigned long *)&(tsk)->thread.b15_14) + 1))
+#define thread_saved_dp(tsk) \
+       (*(unsigned long *)&(tsk)->thread.b15_14)
+#endif
+
+extern unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk)  (task_pt_regs(task)->pc)
+#define        KSTK_ESP(tsk)   (task_pt_regs(task)->sp)
+
+#define cpu_relax()            do { } while (0)
+
+extern const struct seq_operations cpuinfo_op;
+
+#endif /* ASM_C6X_PROCESSOR_H */
diff --git a/arch/c6x/include/asm/procinfo.h b/arch/c6x/include/asm/procinfo.h
new file mode 100644 (file)
index 0000000..c139d1e
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ *  Copyright (C) 2010 Texas Instruments Incorporated
+ *  Author: Mark Salter (msalter@redhat.com)
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_PROCINFO_H
+#define _ASM_C6X_PROCINFO_H
+
+#ifdef __KERNEL__
+
+struct proc_info_list {
+       unsigned int            cpu_val;
+       unsigned int            cpu_mask;
+       const char              *arch_name;
+       const char              *elf_name;
+       unsigned int            elf_hwcap;
+};
+
+#else  /* __KERNEL__ */
+#include <asm/elf.h>
+#warning "Please include asm/elf.h instead"
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_C6X_PROCINFO_H */
diff --git a/arch/c6x/include/asm/prom.h b/arch/c6x/include/asm/prom.h
new file mode 100644 (file)
index 0000000..b4ec95f
--- /dev/null
@@ -0,0 +1 @@
+/* dummy prom.h; here to make linux/of.h's #includes happy */
diff --git a/arch/c6x/include/asm/ptrace.h b/arch/c6x/include/asm/ptrace.h
new file mode 100644 (file)
index 0000000..21e8d79
--- /dev/null
@@ -0,0 +1,174 @@
+/*
+ *  Copyright (C) 2004, 2006, 2009, 2010 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  Updated for 2.6.34: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_PTRACE_H
+#define _ASM_C6X_PTRACE_H
+
+#define BKPT_OPCODE    0x56454314      /* illegal opcode */
+
+#ifdef _BIG_ENDIAN
+#define PT_LO(odd, even)  odd
+#define PT_HI(odd, even)  even
+#else
+#define PT_LO(odd, even)  even
+#define PT_HI(odd, even)  odd
+#endif
+
+#define PT_A4_ORG  PT_LO(1, 0)
+#define PT_TSR    PT_HI(1, 0)
+#define PT_ILC    PT_LO(3, 2)
+#define PT_RILC    PT_HI(3, 2)
+#define PT_CSR    PT_LO(5, 4)
+#define PT_PC     PT_HI(5, 4)
+#define PT_B16    PT_LO(7, 6)
+#define PT_B17    PT_HI(7, 6)
+#define PT_B18    PT_LO(9, 8)
+#define PT_B19    PT_HI(9, 8)
+#define PT_B20    PT_LO(11, 10)
+#define PT_B21    PT_HI(11, 10)
+#define PT_B22    PT_LO(13, 12)
+#define PT_B23    PT_HI(13, 12)
+#define PT_B24    PT_LO(15, 14)
+#define PT_B25    PT_HI(15, 14)
+#define PT_B26    PT_LO(17, 16)
+#define PT_B27    PT_HI(17, 16)
+#define PT_B28    PT_LO(19, 18)
+#define PT_B29    PT_HI(19, 18)
+#define PT_B30    PT_LO(21, 20)
+#define PT_B31    PT_HI(21, 20)
+#define PT_B0     PT_LO(23, 22)
+#define PT_B1     PT_HI(23, 22)
+#define PT_B2     PT_LO(25, 24)
+#define PT_B3     PT_HI(25, 24)
+#define PT_B4     PT_LO(27, 26)
+#define PT_B5     PT_HI(27, 26)
+#define PT_B6     PT_LO(29, 28)
+#define PT_B7     PT_HI(29, 28)
+#define PT_B8     PT_LO(31, 30)
+#define PT_B9     PT_HI(31, 30)
+#define PT_B10    PT_LO(33, 32)
+#define PT_B11    PT_HI(33, 32)
+#define PT_B12    PT_LO(35, 34)
+#define PT_B13    PT_HI(35, 34)
+#define PT_A16    PT_LO(37, 36)
+#define PT_A17    PT_HI(37, 36)
+#define PT_A18    PT_LO(39, 38)
+#define PT_A19    PT_HI(39, 38)
+#define PT_A20    PT_LO(41, 40)
+#define PT_A21    PT_HI(41, 40)
+#define PT_A22    PT_LO(43, 42)
+#define PT_A23    PT_HI(43, 42)
+#define PT_A24    PT_LO(45, 44)
+#define PT_A25    PT_HI(45, 44)
+#define PT_A26    PT_LO(47, 46)
+#define PT_A27    PT_HI(47, 46)
+#define PT_A28    PT_LO(49, 48)
+#define PT_A29    PT_HI(49, 48)
+#define PT_A30    PT_LO(51, 50)
+#define PT_A31    PT_HI(51, 50)
+#define PT_A0     PT_LO(53, 52)
+#define PT_A1     PT_HI(53, 52)
+#define PT_A2     PT_LO(55, 54)
+#define PT_A3     PT_HI(55, 54)
+#define PT_A4     PT_LO(57, 56)
+#define PT_A5     PT_HI(57, 56)
+#define PT_A6     PT_LO(59, 58)
+#define PT_A7     PT_HI(59, 58)
+#define PT_A8     PT_LO(61, 60)
+#define PT_A9     PT_HI(61, 60)
+#define PT_A10    PT_LO(63, 62)
+#define PT_A11    PT_HI(63, 62)
+#define PT_A12    PT_LO(65, 64)
+#define PT_A13    PT_HI(65, 64)
+#define PT_A14    PT_LO(67, 66)
+#define PT_A15    PT_HI(67, 66)
+#define PT_B14    PT_LO(69, 68)
+#define PT_B15    PT_HI(69, 68)
+
+#define NR_PTREGS  70
+
+#define PT_DP     PT_B14  /* Data Segment Pointer (B14) */
+#define PT_SP     PT_B15  /* Stack Pointer (B15)  */
+
+#ifndef __ASSEMBLY__
+
+#ifdef _BIG_ENDIAN
+#define REG_PAIR(odd, even) unsigned long odd; unsigned long even
+#else
+#define REG_PAIR(odd, even) unsigned long even; unsigned long odd
+#endif
+
+/*
+ * this struct defines the way the registers are stored on the
+ * stack during a system call. fields defined with REG_PAIR
+ * are saved and restored using double-word memory operations
+ * which means the word ordering of the pair depends on endianess.
+ */
+struct pt_regs {
+       REG_PAIR(tsr, orig_a4);
+       REG_PAIR(rilc, ilc);
+       REG_PAIR(pc, csr);
+
+       REG_PAIR(b17, b16);
+       REG_PAIR(b19, b18);
+       REG_PAIR(b21, b20);
+       REG_PAIR(b23, b22);
+       REG_PAIR(b25, b24);
+       REG_PAIR(b27, b26);
+       REG_PAIR(b29, b28);
+       REG_PAIR(b31, b30);
+
+       REG_PAIR(b1, b0);
+       REG_PAIR(b3, b2);
+       REG_PAIR(b5, b4);
+       REG_PAIR(b7, b6);
+       REG_PAIR(b9, b8);
+       REG_PAIR(b11, b10);
+       REG_PAIR(b13, b12);
+
+       REG_PAIR(a17, a16);
+       REG_PAIR(a19, a18);
+       REG_PAIR(a21, a20);
+       REG_PAIR(a23, a22);
+       REG_PAIR(a25, a24);
+       REG_PAIR(a27, a26);
+       REG_PAIR(a29, a28);
+       REG_PAIR(a31, a30);
+
+       REG_PAIR(a1, a0);
+       REG_PAIR(a3, a2);
+       REG_PAIR(a5, a4);
+       REG_PAIR(a7, a6);
+       REG_PAIR(a9, a8);
+       REG_PAIR(a11, a10);
+       REG_PAIR(a13, a12);
+
+       REG_PAIR(a15, a14);
+       REG_PAIR(sp, dp);
+};
+
+#ifdef __KERNEL__
+
+#include <linux/linkage.h>
+
+#define user_mode(regs)        ((((regs)->tsr) & 0x40) != 0)
+
+#define instruction_pointer(regs) ((regs)->pc)
+#define profile_pc(regs) instruction_pointer(regs)
+#define user_stack_pointer(regs) ((regs)->sp)
+
+extern void show_regs(struct pt_regs *);
+
+extern asmlinkage unsigned long syscall_trace_entry(struct pt_regs *regs);
+extern asmlinkage void syscall_trace_exit(struct pt_regs *regs);
+
+#endif /* __KERNEL__ */
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_C6X_PTRACE_H */
diff --git a/arch/c6x/include/asm/sections.h b/arch/c6x/include/asm/sections.h
new file mode 100644 (file)
index 0000000..f703989
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef _ASM_C6X_SECTIONS_H
+#define _ASM_C6X_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char _vectors_start[];
+extern char _vectors_end[];
+
+extern char _data_lma[];
+extern char _fdt_start[], _fdt_end[];
+
+#endif /* _ASM_C6X_SECTIONS_H */
diff --git a/arch/c6x/include/asm/setup.h b/arch/c6x/include/asm/setup.h
new file mode 100644 (file)
index 0000000..1808f27
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_SETUP_H
+#define _ASM_C6X_SETUP_H
+
+#define COMMAND_LINE_SIZE   1024
+
+#ifndef __ASSEMBLY__
+extern char c6x_command_line[COMMAND_LINE_SIZE];
+
+extern int c6x_add_memory(phys_addr_t start, unsigned long size);
+
+extern unsigned long ram_start;
+extern unsigned long ram_end;
+
+extern int c6x_num_cores;
+extern unsigned int c6x_silicon_rev;
+extern unsigned int c6x_devstat;
+extern unsigned char c6x_fuse_mac[6];
+
+extern void machine_init(unsigned long dt_ptr);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_C6X_SETUP_H */
diff --git a/arch/c6x/include/asm/sigcontext.h b/arch/c6x/include/asm/sigcontext.h
new file mode 100644 (file)
index 0000000..eb702f3
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_SIGCONTEXT_H
+#define _ASM_C6X_SIGCONTEXT_H
+
+
+struct sigcontext {
+       unsigned long  sc_mask;         /* old sigmask */
+       unsigned long  sc_sp;           /* old user stack pointer */
+
+       unsigned long  sc_a4;
+       unsigned long  sc_b4;
+       unsigned long  sc_a6;
+       unsigned long  sc_b6;
+       unsigned long  sc_a8;
+       unsigned long  sc_b8;
+
+       unsigned long  sc_a0;
+       unsigned long  sc_a1;
+       unsigned long  sc_a2;
+       unsigned long  sc_a3;
+       unsigned long  sc_a5;
+       unsigned long  sc_a7;
+       unsigned long  sc_a9;
+
+       unsigned long  sc_b0;
+       unsigned long  sc_b1;
+       unsigned long  sc_b2;
+       unsigned long  sc_b3;
+       unsigned long  sc_b5;
+       unsigned long  sc_b7;
+       unsigned long  sc_b9;
+
+       unsigned long  sc_a16;
+       unsigned long  sc_a17;
+       unsigned long  sc_a18;
+       unsigned long  sc_a19;
+       unsigned long  sc_a20;
+       unsigned long  sc_a21;
+       unsigned long  sc_a22;
+       unsigned long  sc_a23;
+       unsigned long  sc_a24;
+       unsigned long  sc_a25;
+       unsigned long  sc_a26;
+       unsigned long  sc_a27;
+       unsigned long  sc_a28;
+       unsigned long  sc_a29;
+       unsigned long  sc_a30;
+       unsigned long  sc_a31;
+
+       unsigned long  sc_b16;
+       unsigned long  sc_b17;
+       unsigned long  sc_b18;
+       unsigned long  sc_b19;
+       unsigned long  sc_b20;
+       unsigned long  sc_b21;
+       unsigned long  sc_b22;
+       unsigned long  sc_b23;
+       unsigned long  sc_b24;
+       unsigned long  sc_b25;
+       unsigned long  sc_b26;
+       unsigned long  sc_b27;
+       unsigned long  sc_b28;
+       unsigned long  sc_b29;
+       unsigned long  sc_b30;
+       unsigned long  sc_b31;
+
+       unsigned long  sc_csr;
+       unsigned long  sc_pc;
+};
+
+#endif /* _ASM_C6X_SIGCONTEXT_H */
diff --git a/arch/c6x/include/asm/signal.h b/arch/c6x/include/asm/signal.h
new file mode 100644 (file)
index 0000000..f1cd870
--- /dev/null
@@ -0,0 +1,17 @@
+#ifndef _ASM_C6X_SIGNAL_H
+#define _ASM_C6X_SIGNAL_H
+
+#include <asm-generic/signal.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/linkage.h>
+
+struct pt_regs;
+
+extern asmlinkage int do_rt_sigreturn(struct pt_regs *regs);
+extern asmlinkage void do_notify_resume(struct pt_regs *regs,
+                                       u32 thread_info_flags,
+                                       int syscall);
+#endif
+
+#endif /* _ASM_C6X_SIGNAL_H */
diff --git a/arch/c6x/include/asm/soc.h b/arch/c6x/include/asm/soc.h
new file mode 100644 (file)
index 0000000..43f5015
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Miscellaneous SoC-specific hooks.
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ *
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#ifndef _ASM_C6X_SOC_H
+#define _ASM_C6X_SOC_H
+
+struct soc_ops {
+       /* Return active exception event or -1 if none */
+       int             (*get_exception)(void);
+
+       /* Assert an event */
+       void            (*assert_event)(unsigned int evt);
+};
+
+extern struct soc_ops soc_ops;
+
+extern int soc_get_exception(void);
+extern void soc_assert_event(unsigned int event);
+extern int soc_mac_addr(unsigned int index, u8 *addr);
+
+/*
+ * for mmio on SoC devices. regs are always same byte order as cpu.
+ */
+#define soc_readl(addr)    __raw_readl(addr)
+#define soc_writel(b, addr) __raw_writel((b), (addr))
+
+#endif /* _ASM_C6X_SOC_H */
diff --git a/arch/c6x/include/asm/string.h b/arch/c6x/include/asm/string.h
new file mode 100644 (file)
index 0000000..b21517c
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_STRING_H
+#define _ASM_C6X_STRING_H
+
+#include <asm/page.h>
+#include <linux/linkage.h>
+
+asmlinkage extern void *memcpy(void *to, const void *from, size_t n);
+
+#define __HAVE_ARCH_MEMCPY
+
+#endif /* _ASM_C6X_STRING_H */
diff --git a/arch/c6x/include/asm/swab.h b/arch/c6x/include/asm/swab.h
new file mode 100644 (file)
index 0000000..fd4bb05
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ *  Copyright (C) 2011 Texas Instruments Incorporated
+ *  Author: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_SWAB_H
+#define _ASM_C6X_SWAB_H
+
+static inline __attribute_const__ __u16 __c6x_swab16(__u16 val)
+{
+       asm("swap4 .l1 %0,%0\n" : "+a"(val));
+       return val;
+}
+
+static inline __attribute_const__ __u32 __c6x_swab32(__u32 val)
+{
+       asm("swap4 .l1 %0,%0\n"
+           "swap2 .l1 %0,%0\n"
+           : "+a"(val));
+       return val;
+}
+
+static inline __attribute_const__ __u64 __c6x_swab64(__u64 val)
+{
+       asm("   swap2 .s1 %p0,%P0\n"
+           "|| swap2 .l1 %P0,%p0\n"
+           "   swap4 .l1 %p0,%p0\n"
+           "   swap4 .l1 %P0,%P0\n"
+           : "+a"(val));
+       return val;
+}
+
+static inline __attribute_const__ __u32 __c6x_swahw32(__u32 val)
+{
+       asm("swap2 .l1 %0,%0\n" : "+a"(val));
+       return val;
+}
+
+static inline __attribute_const__ __u32 __c6x_swahb32(__u32 val)
+{
+       asm("swap4 .l1 %0,%0\n" : "+a"(val));
+       return val;
+}
+
+#define __arch_swab16 __c6x_swab16
+#define __arch_swab32 __c6x_swab32
+#define __arch_swab64 __c6x_swab64
+#define __arch_swahw32 __c6x_swahw32
+#define __arch_swahb32 __c6x_swahb32
+
+#endif /* _ASM_C6X_SWAB_H */
diff --git a/arch/c6x/include/asm/syscall.h b/arch/c6x/include/asm/syscall.h
new file mode 100644 (file)
index 0000000..ae2be31
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_C6X_SYSCALL_H
+#define __ASM_C6X_SYSCALL_H
+
+#include <linux/err.h>
+#include <linux/sched.h>
+
+static inline int syscall_get_nr(struct task_struct *task,
+                                struct pt_regs *regs)
+{
+       return regs->b0;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+                                   struct pt_regs *regs)
+{
+       /* do nothing */
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+                                    struct pt_regs *regs)
+{
+       return IS_ERR_VALUE(regs->a4) ? regs->a4 : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+                                           struct pt_regs *regs)
+{
+       return regs->a4;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+                                           struct pt_regs *regs,
+                                           int error, long val)
+{
+       regs->a4 = error ?: val;
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+                                        struct pt_regs *regs, unsigned int i,
+                                        unsigned int n, unsigned long *args)
+{
+       switch (i) {
+       case 0:
+               if (!n--)
+                       break;
+               *args++ = regs->a4;
+       case 1:
+               if (!n--)
+                       break;
+               *args++ = regs->b4;
+       case 2:
+               if (!n--)
+                       break;
+               *args++ = regs->a6;
+       case 3:
+               if (!n--)
+                       break;
+               *args++ = regs->b6;
+       case 4:
+               if (!n--)
+                       break;
+               *args++ = regs->a8;
+       case 5:
+               if (!n--)
+                       break;
+               *args++ = regs->b8;
+       case 6:
+               if (!n--)
+                       break;
+       default:
+               BUG();
+       }
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+                                        struct pt_regs *regs,
+                                        unsigned int i, unsigned int n,
+                                        const unsigned long *args)
+{
+       switch (i) {
+       case 0:
+               if (!n--)
+                       break;
+               regs->a4 = *args++;
+       case 1:
+               if (!n--)
+                       break;
+               regs->b4 = *args++;
+       case 2:
+               if (!n--)
+                       break;
+               regs->a6 = *args++;
+       case 3:
+               if (!n--)
+                       break;
+               regs->b6 = *args++;
+       case 4:
+               if (!n--)
+                       break;
+               regs->a8 = *args++;
+       case 5:
+               if (!n--)
+                       break;
+               regs->a9 = *args++;
+       case 6:
+               if (!n)
+                       break;
+       default:
+               BUG();
+       }
+}
+
+#endif /* __ASM_C6X_SYSCALLS_H */
diff --git a/arch/c6x/include/asm/syscalls.h b/arch/c6x/include/asm/syscalls.h
new file mode 100644 (file)
index 0000000..aed53da
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ASM_C6X_SYSCALLS_H
+#define __ASM_C6X_SYSCALLS_H
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+/* The array of function pointers for syscalls. */
+extern void *sys_call_table[];
+
+/* The following are trampolines in entry.S to handle 64-bit arguments */
+extern long sys_pread_c6x(unsigned int fd, char __user *buf,
+                         size_t count, off_t pos_low, off_t pos_high);
+extern long sys_pwrite_c6x(unsigned int fd, const char __user *buf,
+                          size_t count, off_t pos_low, off_t pos_high);
+extern long sys_truncate64_c6x(const char __user *path,
+                              off_t length_low, off_t length_high);
+extern long sys_ftruncate64_c6x(unsigned int fd,
+                              off_t length_low, off_t length_high);
+extern long sys_fadvise64_c6x(int fd, u32 offset_lo, u32 offset_hi,
+                             u32 len, int advice);
+extern long sys_fadvise64_64_c6x(int fd, u32 offset_lo, u32 offset_hi,
+                               u32 len_lo, u32 len_hi, int advice);
+extern long sys_fallocate_c6x(int fd, int mode,
+                             u32 offset_lo, u32 offset_hi,
+                             u32 len_lo, u32 len_hi);
+extern int sys_cache_sync(unsigned long s, unsigned long e);
+
+struct pt_regs;
+
+extern asmlinkage long sys_c6x_clone(struct pt_regs *regs);
+extern asmlinkage long sys_c6x_execve(const char __user *name,
+                                     const char __user *const __user *argv,
+                                     const char __user *const __user *envp,
+                                     struct pt_regs *regs);
+
+
+#include <asm-generic/syscalls.h>
+
+#endif /* __ASM_C6X_SYSCALLS_H */
diff --git a/arch/c6x/include/asm/system.h b/arch/c6x/include/asm/system.h
new file mode 100644 (file)
index 0000000..e076dc0
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_SYSTEM_H
+#define _ASM_C6X_SYSTEM_H
+
+#include <linux/linkage.h>
+#include <linux/irqflags.h>
+
+#define prepare_to_switch()    do { } while (0)
+
+struct task_struct;
+struct thread_struct;
+asmlinkage void *__switch_to(struct thread_struct *prev,
+                            struct thread_struct *next,
+                            struct task_struct *tsk);
+
+#define switch_to(prev, next, last)                            \
+       do {                                                    \
+               current->thread.wchan = (u_long) __builtin_return_address(0); \
+               (last) = __switch_to(&(prev)->thread,           \
+                                    &(next)->thread, (prev));  \
+               mb();                                           \
+               current->thread.wchan = 0;                      \
+       } while (0)
+
+/* Reset the board */
+#define HARD_RESET_NOW()
+
+#define get_creg(reg) \
+       ({ unsigned int __x; \
+          asm volatile ("mvc .s2 " #reg ",%0\n" : "=b"(__x)); __x; })
+
+#define set_creg(reg, v) \
+       do { unsigned int __x = (unsigned int)(v); \
+               asm volatile ("mvc .s2 %0," #reg "\n" : : "b"(__x)); \
+       } while (0)
+
+#define or_creg(reg, n) \
+       do { unsigned __x, __n = (unsigned)(n);           \
+               asm volatile ("mvc .s2 " #reg ",%0\n"     \
+                             "or  .l2 %1,%0,%0\n"        \
+                             "mvc .s2 %0," #reg "\n"     \
+                             "nop\n"                     \
+                             : "=&b"(__x) : "b"(__n));   \
+       } while (0)
+
+#define and_creg(reg, n) \
+       do { unsigned __x, __n = (unsigned)(n);           \
+               asm volatile ("mvc .s2 " #reg ",%0\n"     \
+                             "and .l2 %1,%0,%0\n"        \
+                             "mvc .s2 %0," #reg "\n"     \
+                             "nop\n"    \
+                             : "=&b"(__x) : "b"(__n));   \
+       } while (0)
+
+#define get_coreid() (get_creg(DNUM) & 0xff)
+
+/* Set/get IST */
+#define set_ist(x)     set_creg(ISTP, x)
+#define get_ist()       get_creg(ISTP)
+
+/*
+ * Exception management
+ */
+asmlinkage void enable_exception(void);
+#define disable_exception()
+#define get_except_type()        get_creg(EFR)
+#define ack_exception(type)      set_creg(ECR, 1 << (type))
+#define get_iexcept()            get_creg(IERR)
+#define set_iexcept(mask)        set_creg(IERR, (mask))
+
+/*
+ * Misc. functions
+ */
+#define nop()                    asm("NOP\n");
+#define mb()                     barrier()
+#define rmb()                    barrier()
+#define wmb()                    barrier()
+#define set_mb(var, value)       do { var = value;  mb(); } while (0)
+#define set_wmb(var, value)      do { var = value; wmb(); } while (0)
+
+#define smp_mb()                barrier()
+#define smp_rmb()               barrier()
+#define smp_wmb()               barrier()
+#define smp_read_barrier_depends()     do { } while (0)
+
+#define xchg(ptr, x) \
+       ((__typeof__(*(ptr)))__xchg((unsigned int)(x), (void *) (ptr), \
+                                   sizeof(*(ptr))))
+#define tas(ptr)    xchg((ptr), 1)
+
+unsigned int _lmbd(unsigned int, unsigned int);
+unsigned int _bitr(unsigned int);
+
+struct __xchg_dummy { unsigned int a[100]; };
+#define __xg(x) ((volatile struct __xchg_dummy *)(x))
+
+static inline unsigned int __xchg(unsigned int x, volatile void *ptr, int size)
+{
+       unsigned int tmp;
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       switch (size) {
+       case 1:
+               tmp = 0;
+               tmp = *((unsigned char *) ptr);
+               *((unsigned char *) ptr) = (unsigned char) x;
+               break;
+       case 2:
+               tmp = 0;
+               tmp = *((unsigned short *) ptr);
+               *((unsigned short *) ptr) = x;
+               break;
+       case 4:
+               tmp = 0;
+               tmp = *((unsigned int *) ptr);
+               *((unsigned int *) ptr) = x;
+               break;
+       }
+       local_irq_restore(flags);
+       return tmp;
+}
+
+#include <asm-generic/cmpxchg-local.h>
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n)                                       \
+       ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr),             \
+                                                    (unsigned long)(o), \
+                                                    (unsigned long)(n), \
+                                                    sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+#include <asm-generic/cmpxchg.h>
+
+#define _extu(x, s, e)                                                 \
+       ({      unsigned int __x;                                       \
+               asm volatile ("extu .S2 %3,%1,%2,%0\n" :                \
+                             "=b"(__x) : "n"(s), "n"(e), "b"(x));      \
+              __x; })
+
+
+extern unsigned int c6x_core_freq;
+
+struct pt_regs;
+
+extern void die(char *str, struct pt_regs *fp, int nr);
+extern asmlinkage int process_exception(struct pt_regs *regs);
+extern void time_init(void);
+extern void free_initmem(void);
+
+extern void (*c6x_restart)(void);
+extern void (*c6x_halt)(void);
+
+#endif /* _ASM_C6X_SYSTEM_H */
diff --git a/arch/c6x/include/asm/thread_info.h b/arch/c6x/include/asm/thread_info.h
new file mode 100644 (file)
index 0000000..fd99148
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  Updated for 2.6.3x: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_THREAD_INFO_H
+#define _ASM_C6X_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#include <asm/page.h>
+
+#ifdef CONFIG_4KSTACKS
+#define THREAD_SIZE            4096
+#define THREAD_SHIFT           12
+#define THREAD_ORDER           0
+#else
+#define THREAD_SIZE            8192
+#define THREAD_SHIFT           13
+#define THREAD_ORDER           1
+#endif
+
+#define THREAD_START_SP                (THREAD_SIZE - 8)
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+       unsigned long seg;
+} mm_segment_t;
+
+/*
+ * low level task data.
+ */
+struct thread_info {
+       struct task_struct      *task;          /* main task structure */
+       struct exec_domain      *exec_domain;   /* execution domain */
+       unsigned long           flags;          /* low level flags */
+       int                     cpu;            /* cpu we're on */
+       int                     preempt_count;  /* 0 = preemptable, <0 = BUG */
+       mm_segment_t            addr_limit;     /* thread address space */
+       struct restart_block    restart_block;
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk)                  \
+{                                              \
+       .task           = &tsk,                 \
+       .exec_domain    = &default_exec_domain, \
+       .flags          = 0,                    \
+       .cpu            = 0,                    \
+       .preempt_count  = INIT_PREEMPT_COUNT,   \
+       .addr_limit     = KERNEL_DS,            \
+       .restart_block  = {                     \
+               .fn = do_no_restart_syscall,    \
+       },                                      \
+}
+
+#define init_thread_info       (init_thread_union.thread_info)
+#define init_stack             (init_thread_union.stack)
+
+/* get the thread information struct of current task */
+static inline __attribute__((const))
+struct thread_info *current_thread_info(void)
+{
+       struct thread_info *ti;
+       asm volatile (" clr   .s2 B15,0,%1,%0\n"
+                     : "=b" (ti)
+                     : "Iu5" (THREAD_SHIFT - 1));
+       return ti;
+}
+
+#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
+
+/* thread information allocation */
+#ifdef CONFIG_DEBUG_STACK_USAGE
+#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
+#else
+#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
+#endif
+
+#define alloc_thread_info_node(tsk, node)      \
+       ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
+
+#define free_thread_info(ti)   free_pages((unsigned long) (ti), THREAD_ORDER)
+#define get_thread_info(ti)    get_task_struct((ti)->task)
+#define put_thread_info(ti)    put_task_struct((ti)->task)
+#endif /* __ASSEMBLY__ */
+
+#define        PREEMPT_ACTIVE  0x10000000
+
+/*
+ * thread information flag bit numbers
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SYSCALL_TRACE      0       /* syscall trace active */
+#define TIF_NOTIFY_RESUME      1       /* resumption notification requested */
+#define TIF_SIGPENDING         2       /* signal pending */
+#define TIF_NEED_RESCHED       3       /* rescheduling necessary */
+#define TIF_RESTORE_SIGMASK    4       /* restore signal mask in do_signal() */
+
+#define TIF_POLLING_NRFLAG     16      /* true if polling TIF_NEED_RESCHED */
+#define TIF_MEMDIE             17      /* OOM killer killed process */
+
+#define TIF_WORK_MASK          0x00007FFE /* work on irq/exception return */
+#define TIF_ALLWORK_MASK       0x00007FFF /* work on any return to u-space */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_C6X_THREAD_INFO_H */
diff --git a/arch/c6x/include/asm/timer64.h b/arch/c6x/include/asm/timer64.h
new file mode 100644 (file)
index 0000000..bbe27bb
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef _C6X_TIMER64_H
+#define _C6X_TIMER64_H
+
+extern void __init timer64_init(void);
+
+#endif /* _C6X_TIMER64_H */
diff --git a/arch/c6x/include/asm/timex.h b/arch/c6x/include/asm/timex.h
new file mode 100644 (file)
index 0000000..508c3ec
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  Modified for 2.6.34: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_TIMEX_H
+#define _ASM_C6X_TIMEX_H
+
+#define CLOCK_TICK_RATE ((1000 * 1000000UL) / 6)
+
+/* 64-bit timestamp */
+typedef unsigned long long cycles_t;
+
+static inline cycles_t get_cycles(void)
+{
+       unsigned l, h;
+
+       asm volatile (" dint\n"
+                     " mvc .s2 TSCL,%0\n"
+                     " mvc .s2 TSCH,%1\n"
+                     " rint\n"
+                     : "=b"(l), "=b"(h));
+       return ((cycles_t)h << 32) | l;
+}
+
+#endif /* _ASM_C6X_TIMEX_H */
diff --git a/arch/c6x/include/asm/tlb.h b/arch/c6x/include/asm/tlb.h
new file mode 100644 (file)
index 0000000..8709e5e
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef _ASM_C6X_TLB_H
+#define _ASM_C6X_TLB_H
+
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+
+#endif /* _ASM_C6X_TLB_H */
diff --git a/arch/c6x/include/asm/traps.h b/arch/c6x/include/asm/traps.h
new file mode 100644 (file)
index 0000000..62124d7
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_TRAPS_H
+#define _ASM_C6X_TRAPS_H
+
+#define EXCEPT_TYPE_NXF   31      /* NMI */
+#define EXCEPT_TYPE_EXC   30      /* external exception */
+#define EXCEPT_TYPE_IXF   1       /* internal exception */
+#define EXCEPT_TYPE_SXF   0       /* software exception */
+
+#define EXCEPT_CAUSE_LBX  (1 << 7) /* loop buffer exception */
+#define EXCEPT_CAUSE_PRX  (1 << 6) /* privilege exception */
+#define EXCEPT_CAUSE_RAX  (1 << 5) /* resource access exception */
+#define EXCEPT_CAUSE_RCX  (1 << 4) /* resource conflict exception */
+#define EXCEPT_CAUSE_OPX  (1 << 3) /* opcode exception */
+#define EXCEPT_CAUSE_EPX  (1 << 2) /* execute packet exception */
+#define EXCEPT_CAUSE_FPX  (1 << 1) /* fetch packet exception */
+#define EXCEPT_CAUSE_IFX  (1 << 0) /* instruction fetch exception */
+
+struct exception_info {
+       char *kernel_str;
+       int  signo;
+       int  code;
+};
+
+extern int (*c6x_nmi_handler)(struct pt_regs *regs);
+
+#endif /* _ASM_C6X_TRAPS_H */
diff --git a/arch/c6x/include/asm/uaccess.h b/arch/c6x/include/asm/uaccess.h
new file mode 100644 (file)
index 0000000..453dd26
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ *  Copyright (C) 2011 Texas Instruments Incorporated
+ *  Author: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_UACCESS_H
+#define _ASM_C6X_UACCESS_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/string.h>
+
+#ifdef CONFIG_ACCESS_CHECK
+#define __access_ok _access_ok
+#endif
+
+/*
+ * __copy_from_user/copy_to_user are based on ones in asm-generic/uaccess.h
+ *
+ * C6X supports unaligned 32 and 64 bit loads and stores.
+ */
+static inline __must_check long __copy_from_user(void *to,
+               const void __user *from, unsigned long n)
+{
+       u32 tmp32;
+       u64 tmp64;
+
+       if (__builtin_constant_p(n)) {
+               switch (n) {
+               case 1:
+                       *(u8 *)to = *(u8 __force *)from;
+                       return 0;
+               case 4:
+                       asm volatile ("ldnw .d1t1 *%2,%0\n"
+                                     "nop  4\n"
+                                     "stnw .d1t1 %0,*%1\n"
+                                     : "=&a"(tmp32)
+                                     : "A"(to), "a"(from)
+                                     : "memory");
+                       return 0;
+               case 8:
+                       asm volatile ("ldndw .d1t1 *%2,%0\n"
+                                     "nop   4\n"
+                                     "stndw .d1t1 %0,*%1\n"
+                                     : "=&a"(tmp64)
+                                     : "a"(to), "a"(from)
+                                     : "memory");
+                       return 0;
+               default:
+                       break;
+               }
+       }
+
+       memcpy(to, (const void __force *)from, n);
+       return 0;
+}
+
+static inline __must_check long __copy_to_user(void __user *to,
+               const void *from, unsigned long n)
+{
+       u32 tmp32;
+       u64 tmp64;
+
+       if (__builtin_constant_p(n)) {
+               switch (n) {
+               case 1:
+                       *(u8 __force *)to = *(u8 *)from;
+                       return 0;
+               case 4:
+                       asm volatile ("ldnw .d1t1 *%2,%0\n"
+                                     "nop  4\n"
+                                     "stnw .d1t1 %0,*%1\n"
+                                     : "=&a"(tmp32)
+                                     : "a"(to), "a"(from)
+                                     : "memory");
+                       return 0;
+               case 8:
+                       asm volatile ("ldndw .d1t1 *%2,%0\n"
+                                     "nop   4\n"
+                                     "stndw .d1t1 %0,*%1\n"
+                                     : "=&a"(tmp64)
+                                     : "a"(to), "a"(from)
+                                     : "memory");
+                       return 0;
+               default:
+                       break;
+               }
+       }
+
+       memcpy((void __force *)to, from, n);
+       return 0;
+}
+
+#define __copy_to_user   __copy_to_user
+#define __copy_from_user __copy_from_user
+
+extern int _access_ok(unsigned long addr, unsigned long size);
+#ifdef CONFIG_ACCESS_CHECK
+#define __access_ok _access_ok
+#endif
+
+#include <asm-generic/uaccess.h>
+
+#endif /* _ASM_C6X_UACCESS_H */
diff --git a/arch/c6x/include/asm/unaligned.h b/arch/c6x/include/asm/unaligned.h
new file mode 100644 (file)
index 0000000..b976cb7
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *  Rewritten for 2.6.3x: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_UNALIGNED_H
+#define _ASM_C6X_UNALIGNED_H
+
+#include <linux/swab.h>
+
+/*
+ * The C64x+ can do unaligned word and dword accesses in hardware
+ * using special load/store instructions.
+ */
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+       const u8 *_p = p;
+       return _p[0] | _p[1] << 8;
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+       const u8 *_p = p;
+       return _p[0] << 8 | _p[1];
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+       u8 *_p = p;
+       _p[0] = val;
+       _p[1] = val >> 8;
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+       u8 *_p = p;
+       _p[0] = val >> 8;
+       _p[1] = val;
+}
+
+static inline u32 get_unaligned32(const void *p)
+{
+       u32 val = (u32) p;
+       asm (" ldnw     .d1t1   *%0,%0\n"
+            " nop     4\n"
+            : "+a"(val));
+       return val;
+}
+
+static inline void put_unaligned32(u32 val, void *p)
+{
+       asm volatile (" stnw    .d2t1   %0,*%1\n"
+                     : : "a"(val), "b"(p) : "memory");
+}
+
+static inline u64 get_unaligned64(const void *p)
+{
+       u64 val;
+       asm volatile (" ldndw   .d1t1   *%1,%0\n"
+                     " nop     4\n"
+                     : "=a"(val) : "a"(p));
+       return val;
+}
+
+static inline void put_unaligned64(u64 val, const void *p)
+{
+       asm volatile (" stndw   .d2t1   %0,*%1\n"
+                     : : "a"(val), "b"(p) : "memory");
+}
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+
+#define get_unaligned_le32(p)   __swab32(get_unaligned32(p))
+#define get_unaligned_le64(p)   __swab64(get_unaligned64(p))
+#define get_unaligned_be32(p)   get_unaligned32(p)
+#define get_unaligned_be64(p)   get_unaligned64(p)
+#define put_unaligned_le32(v, p) put_unaligned32(__swab32(v), (p))
+#define put_unaligned_le64(v, p) put_unaligned64(__swab64(v), (p))
+#define put_unaligned_be32(v, p) put_unaligned32((v), (p))
+#define put_unaligned_be64(v, p) put_unaligned64((v), (p))
+#define get_unaligned  __get_unaligned_be
+#define put_unaligned  __put_unaligned_be
+
+#else
+
+#define get_unaligned_le32(p)   get_unaligned32(p)
+#define get_unaligned_le64(p)   get_unaligned64(p)
+#define get_unaligned_be32(p)   __swab32(get_unaligned32(p))
+#define get_unaligned_be64(p)   __swab64(get_unaligned64(p))
+#define put_unaligned_le32(v, p) put_unaligned32((v), (p))
+#define put_unaligned_le64(v, p) put_unaligned64((v), (p))
+#define put_unaligned_be32(v, p) put_unaligned32(__swab32(v), (p))
+#define put_unaligned_be64(v, p) put_unaligned64(__swab64(v), (p))
+#define get_unaligned  __get_unaligned_le
+#define put_unaligned  __put_unaligned_le
+
+#endif
+
+/*
+ * Cause a link-time error if we try an unaligned access other than
+ * 1,2,4 or 8 bytes long
+ */
+extern int __bad_unaligned_access_size(void);
+
+#define __get_unaligned_le(ptr) (typeof(*(ptr)))({                     \
+       sizeof(*(ptr)) == 1 ? *(ptr) :                                  \
+         (sizeof(*(ptr)) == 2 ? get_unaligned_le16((ptr)) :            \
+            (sizeof(*(ptr)) == 4 ? get_unaligned_le32((ptr)) :         \
+               (sizeof(*(ptr)) == 8 ? get_unaligned_le64((ptr)) :      \
+                  __bad_unaligned_access_size())));                    \
+       })
+
+#define __get_unaligned_be(ptr) (__force typeof(*(ptr)))({     \
+       sizeof(*(ptr)) == 1 ? *(ptr) :                                  \
+         (sizeof(*(ptr)) == 2 ? get_unaligned_be16((ptr)) :            \
+            (sizeof(*(ptr)) == 4 ? get_unaligned_be32((ptr)) :         \
+               (sizeof(*(ptr)) == 8 ? get_unaligned_be64((ptr)) :      \
+                  __bad_unaligned_access_size())));                    \
+       })
+
+#define __put_unaligned_le(val, ptr) ({                                        \
+       void *__gu_p = (ptr);                                           \
+       switch (sizeof(*(ptr))) {                                       \
+       case 1:                                                         \
+               *(u8 *)__gu_p = (__force u8)(val);                      \
+               break;                                                  \
+       case 2:                                                         \
+               put_unaligned_le16((__force u16)(val), __gu_p);         \
+               break;                                                  \
+       case 4:                                                         \
+               put_unaligned_le32((__force u32)(val), __gu_p);         \
+               break;                                                  \
+       case 8:                                                         \
+               put_unaligned_le64((__force u64)(val), __gu_p);         \
+               break;                                                  \
+       default:                                                        \
+               __bad_unaligned_access_size();                          \
+               break;                                                  \
+       }                                                               \
+       (void)0; })
+
+#define __put_unaligned_be(val, ptr) ({                                        \
+       void *__gu_p = (ptr);                                           \
+       switch (sizeof(*(ptr))) {                                       \
+       case 1:                                                         \
+               *(u8 *)__gu_p = (__force u8)(val);                      \
+               break;                                                  \
+       case 2:                                                         \
+               put_unaligned_be16((__force u16)(val), __gu_p);         \
+               break;                                                  \
+       case 4:                                                         \
+               put_unaligned_be32((__force u32)(val), __gu_p);         \
+               break;                                                  \
+       case 8:                                                         \
+               put_unaligned_be64((__force u64)(val), __gu_p);         \
+               break;                                                  \
+       default:                                                        \
+               __bad_unaligned_access_size();                          \
+               break;                                                  \
+       }                                                               \
+       (void)0; })
+
+#endif /* _ASM_C6X_UNALIGNED_H */
diff --git a/arch/c6x/include/asm/unistd.h b/arch/c6x/include/asm/unistd.h
new file mode 100644 (file)
index 0000000..6d54ea4
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ *
+ * Based on arch/tile version.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT. See the GNU General Public License for
+ *   more details.
+ */
+#if !defined(_ASM_C6X_UNISTD_H) || defined(__SYSCALL)
+#define _ASM_C6X_UNISTD_H
+
+/* Use the standard ABI for syscalls. */
+#include <asm-generic/unistd.h>
+
+/* C6X-specific syscalls. */
+#define __NR_cache_sync        (__NR_arch_specific_syscall + 0)
+__SYSCALL(__NR_cache_sync, sys_cache_sync)
+
+#endif /* _ASM_C6X_UNISTD_H */
diff --git a/arch/c6x/kernel/Makefile b/arch/c6x/kernel/Makefile
new file mode 100644 (file)
index 0000000..580a515
--- /dev/null
@@ -0,0 +1,12 @@
+#
+# Makefile for arch/c6x/kernel/
+#
+
+extra-y := head.o vmlinux.lds
+
+obj-y := process.o traps.o irq.o signal.o ptrace.o
+obj-y += setup.o sys_c6x.o time.o devicetree.o
+obj-y += switch_to.o entry.o vectors.o c6x_ksyms.o
+obj-y += soc.o dma.o
+
+obj-$(CONFIG_MODULES)           += module.o
diff --git a/arch/c6x/kernel/asm-offsets.c b/arch/c6x/kernel/asm-offsets.c
new file mode 100644 (file)
index 0000000..759ad6d
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ */
+
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+#include <asm/procinfo.h>
+#include <linux/kbuild.h>
+#include <linux/unistd.h>
+
+void foo(void)
+{
+       OFFSET(REGS_A16,        pt_regs, a16);
+       OFFSET(REGS_A17,        pt_regs, a17);
+       OFFSET(REGS_A18,        pt_regs, a18);
+       OFFSET(REGS_A19,        pt_regs, a19);
+       OFFSET(REGS_A20,        pt_regs, a20);
+       OFFSET(REGS_A21,        pt_regs, a21);
+       OFFSET(REGS_A22,        pt_regs, a22);
+       OFFSET(REGS_A23,        pt_regs, a23);
+       OFFSET(REGS_A24,        pt_regs, a24);
+       OFFSET(REGS_A25,        pt_regs, a25);
+       OFFSET(REGS_A26,        pt_regs, a26);
+       OFFSET(REGS_A27,        pt_regs, a27);
+       OFFSET(REGS_A28,        pt_regs, a28);
+       OFFSET(REGS_A29,        pt_regs, a29);
+       OFFSET(REGS_A30,        pt_regs, a30);
+       OFFSET(REGS_A31,        pt_regs, a31);
+
+       OFFSET(REGS_B16,        pt_regs, b16);
+       OFFSET(REGS_B17,        pt_regs, b17);
+       OFFSET(REGS_B18,        pt_regs, b18);
+       OFFSET(REGS_B19,        pt_regs, b19);
+       OFFSET(REGS_B20,        pt_regs, b20);
+       OFFSET(REGS_B21,        pt_regs, b21);
+       OFFSET(REGS_B22,        pt_regs, b22);
+       OFFSET(REGS_B23,        pt_regs, b23);
+       OFFSET(REGS_B24,        pt_regs, b24);
+       OFFSET(REGS_B25,        pt_regs, b25);
+       OFFSET(REGS_B26,        pt_regs, b26);
+       OFFSET(REGS_B27,        pt_regs, b27);
+       OFFSET(REGS_B28,        pt_regs, b28);
+       OFFSET(REGS_B29,        pt_regs, b29);
+       OFFSET(REGS_B30,        pt_regs, b30);
+       OFFSET(REGS_B31,        pt_regs, b31);
+
+       OFFSET(REGS_A0,         pt_regs, a0);
+       OFFSET(REGS_A1,         pt_regs, a1);
+       OFFSET(REGS_A2,         pt_regs, a2);
+       OFFSET(REGS_A3,         pt_regs, a3);
+       OFFSET(REGS_A4,         pt_regs, a4);
+       OFFSET(REGS_A5,         pt_regs, a5);
+       OFFSET(REGS_A6,         pt_regs, a6);
+       OFFSET(REGS_A7,         pt_regs, a7);
+       OFFSET(REGS_A8,         pt_regs, a8);
+       OFFSET(REGS_A9,         pt_regs, a9);
+       OFFSET(REGS_A10,        pt_regs, a10);
+       OFFSET(REGS_A11,        pt_regs, a11);
+       OFFSET(REGS_A12,        pt_regs, a12);
+       OFFSET(REGS_A13,        pt_regs, a13);
+       OFFSET(REGS_A14,        pt_regs, a14);
+       OFFSET(REGS_A15,        pt_regs, a15);
+
+       OFFSET(REGS_B0,         pt_regs, b0);
+       OFFSET(REGS_B1,         pt_regs, b1);
+       OFFSET(REGS_B2,         pt_regs, b2);
+       OFFSET(REGS_B3,         pt_regs, b3);
+       OFFSET(REGS_B4,         pt_regs, b4);
+       OFFSET(REGS_B5,         pt_regs, b5);
+       OFFSET(REGS_B6,         pt_regs, b6);
+       OFFSET(REGS_B7,         pt_regs, b7);
+       OFFSET(REGS_B8,         pt_regs, b8);
+       OFFSET(REGS_B9,         pt_regs, b9);
+       OFFSET(REGS_B10,        pt_regs, b10);
+       OFFSET(REGS_B11,        pt_regs, b11);
+       OFFSET(REGS_B12,        pt_regs, b12);
+       OFFSET(REGS_B13,        pt_regs, b13);
+       OFFSET(REGS_DP,         pt_regs, dp);
+       OFFSET(REGS_SP,         pt_regs, sp);
+
+       OFFSET(REGS_TSR,        pt_regs, tsr);
+       OFFSET(REGS_ORIG_A4,    pt_regs, orig_a4);
+
+       DEFINE(REGS__END,       sizeof(struct pt_regs));
+       BLANK();
+
+       OFFSET(THREAD_PC,       thread_struct, pc);
+       OFFSET(THREAD_B15_14,   thread_struct, b15_14);
+       OFFSET(THREAD_A15_14,   thread_struct, a15_14);
+       OFFSET(THREAD_B13_12,   thread_struct, b13_12);
+       OFFSET(THREAD_A13_12,   thread_struct, a13_12);
+       OFFSET(THREAD_B11_10,   thread_struct, b11_10);
+       OFFSET(THREAD_A11_10,   thread_struct, a11_10);
+       OFFSET(THREAD_RICL_ICL, thread_struct, ricl_icl);
+       BLANK();
+
+       OFFSET(TASK_STATE,      task_struct, state);
+       BLANK();
+
+       OFFSET(THREAD_INFO_FLAGS,       thread_info, flags);
+       OFFSET(THREAD_INFO_PREEMPT_COUNT, thread_info, preempt_count);
+       BLANK();
+
+       /* These would be unneccessary if we ran asm files
+        * through the preprocessor.
+        */
+       DEFINE(KTHREAD_SIZE, THREAD_SIZE);
+       DEFINE(KTHREAD_SHIFT, THREAD_SHIFT);
+       DEFINE(KTHREAD_START_SP, THREAD_START_SP);
+       DEFINE(ENOSYS_, ENOSYS);
+       DEFINE(NR_SYSCALLS_, __NR_syscalls);
+
+       DEFINE(_TIF_SYSCALL_TRACE, (1<<TIF_SYSCALL_TRACE));
+       DEFINE(_TIF_NOTIFY_RESUME, (1<<TIF_NOTIFY_RESUME));
+       DEFINE(_TIF_SIGPENDING, (1<<TIF_SIGPENDING));
+       DEFINE(_TIF_NEED_RESCHED, (1<<TIF_NEED_RESCHED));
+       DEFINE(_TIF_POLLING_NRFLAG, (1<<TIF_POLLING_NRFLAG));
+
+       DEFINE(_TIF_ALLWORK_MASK, TIF_ALLWORK_MASK);
+       DEFINE(_TIF_WORK_MASK, TIF_WORK_MASK);
+}
diff --git a/arch/c6x/kernel/c6x_ksyms.c b/arch/c6x/kernel/c6x_ksyms.c
new file mode 100644 (file)
index 0000000..0ba3e0b
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <asm/checksum.h>
+#include <linux/io.h>
+
+/*
+ * libgcc functions - used internally by the compiler...
+ */
+extern int __c6xabi_divi(int dividend, int divisor);
+EXPORT_SYMBOL(__c6xabi_divi);
+
+extern unsigned __c6xabi_divu(unsigned dividend, unsigned divisor);
+EXPORT_SYMBOL(__c6xabi_divu);
+
+extern int __c6xabi_remi(int dividend, int divisor);
+EXPORT_SYMBOL(__c6xabi_remi);
+
+extern unsigned __c6xabi_remu(unsigned dividend, unsigned divisor);
+EXPORT_SYMBOL(__c6xabi_remu);
+
+extern int __c6xabi_divremi(int dividend, int divisor);
+EXPORT_SYMBOL(__c6xabi_divremi);
+
+extern unsigned __c6xabi_divremu(unsigned  dividend, unsigned divisor);
+EXPORT_SYMBOL(__c6xabi_divremu);
+
+extern unsigned long long __c6xabi_mpyll(unsigned long long src1,
+                                        unsigned long long src2);
+EXPORT_SYMBOL(__c6xabi_mpyll);
+
+extern long long __c6xabi_negll(long long src);
+EXPORT_SYMBOL(__c6xabi_negll);
+
+extern unsigned long long __c6xabi_llshl(unsigned long long src1, uint src2);
+EXPORT_SYMBOL(__c6xabi_llshl);
+
+extern long long __c6xabi_llshr(long long src1, uint src2);
+EXPORT_SYMBOL(__c6xabi_llshr);
+
+extern unsigned long long __c6xabi_llshru(unsigned long long src1, uint src2);
+EXPORT_SYMBOL(__c6xabi_llshru);
+
+extern void __c6xabi_strasgi(int *dst, const int *src, unsigned cnt);
+EXPORT_SYMBOL(__c6xabi_strasgi);
+
+extern void __c6xabi_push_rts(void);
+EXPORT_SYMBOL(__c6xabi_push_rts);
+
+extern void __c6xabi_pop_rts(void);
+EXPORT_SYMBOL(__c6xabi_pop_rts);
+
+extern void __c6xabi_strasgi_64plus(int *dst, const int *src, unsigned cnt);
+EXPORT_SYMBOL(__c6xabi_strasgi_64plus);
+
+/* lib functions */
+EXPORT_SYMBOL(memcpy);
diff --git a/arch/c6x/kernel/devicetree.c b/arch/c6x/kernel/devicetree.c
new file mode 100644 (file)
index 0000000..bdb56f0
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ *  Architecture specific OF callbacks.
+ *
+ *  Copyright (C) 2011 Texas Instruments Incorporated
+ *  Author: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ */
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/initrd.h>
+#include <linux/memblock.h>
+
+void __init early_init_devtree(void *params)
+{
+       /* Setup flat device-tree pointer */
+       initial_boot_params = params;
+
+       /* Retrieve various informations from the /chosen node of the
+        * device-tree, including the platform type, initrd location and
+        * size and more ...
+        */
+       of_scan_flat_dt(early_init_dt_scan_chosen, c6x_command_line);
+
+       /* Scan memory nodes and rebuild MEMBLOCKs */
+       of_scan_flat_dt(early_init_dt_scan_root, NULL);
+       of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+}
+
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+               unsigned long end)
+{
+       initrd_start = (unsigned long)__va(start);
+       initrd_end = (unsigned long)__va(end);
+       initrd_below_start_ok = 1;
+}
+#endif
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+       c6x_add_memory(base, size);
+}
+
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+       return __va(memblock_alloc(size, align));
+}
diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c
new file mode 100644 (file)
index 0000000..ab7b12d
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ *  Copyright (C) 2011 Texas Instruments Incorporated
+ *  Author: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+
+#include <asm/cacheflush.h>
+
+static void c6x_dma_sync(dma_addr_t handle, size_t size,
+                        enum dma_data_direction dir)
+{
+       unsigned long paddr = handle;
+
+       BUG_ON(!valid_dma_direction(dir));
+
+       switch (dir) {
+       case DMA_FROM_DEVICE:
+               L2_cache_block_invalidate(paddr, paddr + size);
+               break;
+       case DMA_TO_DEVICE:
+               L2_cache_block_writeback(paddr, paddr + size);
+               break;
+       case DMA_BIDIRECTIONAL:
+               L2_cache_block_writeback_invalidate(paddr, paddr + size);
+               break;
+       default:
+               break;
+       }
+}
+
+dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
+                         enum dma_data_direction dir)
+{
+       dma_addr_t addr = virt_to_phys(ptr);
+
+       c6x_dma_sync(addr, size, dir);
+
+       debug_dma_map_page(dev, virt_to_page(ptr),
+                          (unsigned long)ptr & ~PAGE_MASK, size,
+                          dir, addr, true);
+       return addr;
+}
+EXPORT_SYMBOL(dma_map_single);
+
+
+void dma_unmap_single(struct device *dev, dma_addr_t handle,
+                     size_t size, enum dma_data_direction dir)
+{
+       c6x_dma_sync(handle, size, dir);
+
+       debug_dma_unmap_page(dev, handle, size, dir, true);
+}
+EXPORT_SYMBOL(dma_unmap_single);
+
+
+int dma_map_sg(struct device *dev, struct scatterlist *sglist,
+              int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sglist, sg, nents, i)
+               sg->dma_address = dma_map_single(dev, sg_virt(sg), sg->length,
+                                                dir);
+
+       debug_dma_map_sg(dev, sglist, nents, nents, dir);
+
+       return nents;
+}
+EXPORT_SYMBOL(dma_map_sg);
+
+
+void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+                 int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sglist, sg, nents, i)
+               dma_unmap_single(dev, sg_dma_address(sg), sg->length, dir);
+
+       debug_dma_unmap_sg(dev, sglist, nents, dir);
+}
+EXPORT_SYMBOL(dma_unmap_sg);
+
+void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
+                            size_t size, enum dma_data_direction dir)
+{
+       c6x_dma_sync(handle, size, dir);
+
+       debug_dma_sync_single_for_cpu(dev, handle, size, dir);
+}
+EXPORT_SYMBOL(dma_sync_single_for_cpu);
+
+
+void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
+                               size_t size, enum dma_data_direction dir)
+{
+       c6x_dma_sync(handle, size, dir);
+
+       debug_dma_sync_single_for_device(dev, handle, size, dir);
+}
+EXPORT_SYMBOL(dma_sync_single_for_device);
+
+
+void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
+                        int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sglist, sg, nents, i)
+               dma_sync_single_for_cpu(dev, sg_dma_address(sg),
+                                       sg->length, dir);
+
+       debug_dma_sync_sg_for_cpu(dev, sglist, nents, dir);
+}
+EXPORT_SYMBOL(dma_sync_sg_for_cpu);
+
+
+void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
+                           int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sglist, sg, nents, i)
+               dma_sync_single_for_device(dev, sg_dma_address(sg),
+                                          sg->length, dir);
+
+       debug_dma_sync_sg_for_device(dev, sglist, nents, dir);
+}
+EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+
+/* Number of entries preallocated for DMA-API debugging */
+#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+
+static int __init dma_init(void)
+{
+       dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+
+       return 0;
+}
+fs_initcall(dma_init);
diff --git a/arch/c6x/kernel/entry.S b/arch/c6x/kernel/entry.S
new file mode 100644 (file)
index 0000000..3e977cc
--- /dev/null
@@ -0,0 +1,803 @@
+;
+;  Port on Texas Instruments TMS320C6x architecture
+;
+;  Copyright (C) 2004-2011 Texas Instruments Incorporated
+;  Author: Aurelien Jacquiot (aurelien.jacquiot@virtuallogix.com)
+;  Updated for 2.6.34: Mark Salter <msalter@redhat.com>
+;
+;  This program is free software; you can redistribute it and/or modify
+;  it under the terms of the GNU General Public License version 2 as
+;  published by the Free Software Foundation.
+;
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+
+; Registers naming
+#define DP     B14
+#define SP     B15
+
+#ifndef CONFIG_PREEMPT
+#define resume_kernel restore_all
+#endif
+
+       .altmacro
+
+       .macro MASK_INT reg
+       MVC     .S2     CSR,reg
+       CLR     .S2     reg,0,0,reg
+       MVC     .S2     reg,CSR
+       .endm
+
+       .macro UNMASK_INT reg
+       MVC     .S2     CSR,reg
+       SET     .S2     reg,0,0,reg
+       MVC     .S2     reg,CSR
+       .endm
+
+       .macro GET_THREAD_INFO reg
+       SHR     .S1X    SP,THREAD_SHIFT,reg
+       SHL     .S1     reg,THREAD_SHIFT,reg
+       .endm
+
+       ;;
+       ;;  This defines the normal kernel pt_regs layout.
+       ;;
+       .macro SAVE_ALL __rp __tsr
+       STW     .D2T2   B0,*SP--[2]             ; save original B0
+       MVKL    .S2     current_ksp,B0
+       MVKH    .S2     current_ksp,B0
+       LDW     .D2T2   *B0,B1                  ; KSP
+
+       NOP     3
+       STW     .D2T2   B1,*+SP[1]              ; save original B1
+       XOR     .D2     SP,B1,B0                ; (SP ^ KSP)
+       LDW     .D2T2   *+SP[1],B1              ; restore B0/B1
+       LDW     .D2T2   *++SP[2],B0
+       SHR     .S2     B0,THREAD_SHIFT,B0      ; 0 if already using kstack
+  [B0] STDW    .D2T2   SP:DP,*--B1[1]          ; user: save user sp/dp kstack
+  [B0] MV      .S2     B1,SP                   ;    and switch to kstack
+||[!B0] STDW   .D2T2   SP:DP,*--SP[1]          ; kernel: save on current stack
+
+       SUBAW   .D2     SP,2,SP
+
+       ADD     .D1X    SP,-8,A15
+ ||    STDW    .D2T1   A15:A14,*SP--[16]       ; save A15:A14
+
+       STDW    .D2T2   B13:B12,*SP--[1]
+ ||    STDW    .D1T1   A13:A12,*A15--[1]
+ ||    MVC     .S2     __rp,B13
+
+       STDW    .D2T2   B11:B10,*SP--[1]
+ ||    STDW    .D1T1   A11:A10,*A15--[1]
+ ||    MVC     .S2     CSR,B12
+
+       STDW    .D2T2   B9:B8,*SP--[1]
+ ||    STDW    .D1T1   A9:A8,*A15--[1]
+ ||    MVC     .S2     RILC,B11
+       STDW    .D2T2   B7:B6,*SP--[1]
+ ||    STDW    .D1T1   A7:A6,*A15--[1]
+ ||    MVC     .S2     ILC,B10
+
+       STDW    .D2T2   B5:B4,*SP--[1]
+ ||    STDW    .D1T1   A5:A4,*A15--[1]
+
+       STDW    .D2T2   B3:B2,*SP--[1]
+ ||    STDW    .D1T1   A3:A2,*A15--[1]
+ ||    MVC     .S2     __tsr,B5
+
+       STDW    .D2T2   B1:B0,*SP--[1]
+ ||    STDW    .D1T1   A1:A0,*A15--[1]
+ ||    MV      .S1X    B5,A5
+
+       STDW    .D2T2   B31:B30,*SP--[1]
+ ||    STDW    .D1T1   A31:A30,*A15--[1]
+       STDW    .D2T2   B29:B28,*SP--[1]
+ ||    STDW    .D1T1   A29:A28,*A15--[1]
+       STDW    .D2T2   B27:B26,*SP--[1]
+ ||    STDW    .D1T1   A27:A26,*A15--[1]
+       STDW    .D2T2   B25:B24,*SP--[1]
+ ||    STDW    .D1T1   A25:A24,*A15--[1]
+       STDW    .D2T2   B23:B22,*SP--[1]
+ ||    STDW    .D1T1   A23:A22,*A15--[1]
+       STDW    .D2T2   B21:B20,*SP--[1]
+ ||    STDW    .D1T1   A21:A20,*A15--[1]
+       STDW    .D2T2   B19:B18,*SP--[1]
+ ||    STDW    .D1T1   A19:A18,*A15--[1]
+       STDW    .D2T2   B17:B16,*SP--[1]
+ ||    STDW    .D1T1   A17:A16,*A15--[1]
+
+       STDW    .D2T2   B13:B12,*SP--[1]        ; save PC and CSR
+
+       STDW    .D2T2   B11:B10,*SP--[1]        ; save RILC and ILC
+       STDW    .D2T1   A5:A4,*SP--[1]          ; save TSR and orig A4
+
+       ;; We left an unused word on the stack just above pt_regs.
+       ;; It is used to save whether or not this frame is due to
+       ;; a syscall. It is cleared here, but the syscall handler
+       ;; sets it to a non-zero value.
+       MVK     .L2     0,B1
+       STW     .D2T2   B1,*+SP(REGS__END+8)    ; clear syscall flag
+       .endm
+
+       .macro RESTORE_ALL __rp __tsr
+       LDDW    .D2T2   *++SP[1],B9:B8          ; get TSR (B9)
+       LDDW    .D2T2   *++SP[1],B11:B10        ; get RILC (B11) and ILC (B10)
+       LDDW    .D2T2   *++SP[1],B13:B12        ; get PC (B13) and CSR (B12)
+
+       ADDAW   .D1X    SP,30,A15
+
+       LDDW    .D1T1   *++A15[1],A17:A16
+ ||    LDDW    .D2T2   *++SP[1],B17:B16
+       LDDW    .D1T1   *++A15[1],A19:A18
+ ||    LDDW    .D2T2   *++SP[1],B19:B18
+       LDDW    .D1T1   *++A15[1],A21:A20
+ ||    LDDW    .D2T2   *++SP[1],B21:B20
+       LDDW    .D1T1   *++A15[1],A23:A22
+ ||    LDDW    .D2T2   *++SP[1],B23:B22
+       LDDW    .D1T1   *++A15[1],A25:A24
+ ||    LDDW    .D2T2   *++SP[1],B25:B24
+       LDDW    .D1T1   *++A15[1],A27:A26
+ ||    LDDW    .D2T2   *++SP[1],B27:B26
+       LDDW    .D1T1   *++A15[1],A29:A28
+ ||    LDDW    .D2T2   *++SP[1],B29:B28
+       LDDW    .D1T1   *++A15[1],A31:A30
+ ||    LDDW    .D2T2   *++SP[1],B31:B30
+
+       LDDW    .D1T1   *++A15[1],A1:A0
+ ||    LDDW    .D2T2   *++SP[1],B1:B0
+
+       LDDW    .D1T1   *++A15[1],A3:A2
+ ||    LDDW    .D2T2   *++SP[1],B3:B2
+ ||    MVC     .S2     B9,__tsr
+       LDDW    .D1T1   *++A15[1],A5:A4
+ ||    LDDW    .D2T2   *++SP[1],B5:B4
+ ||    MVC     .S2     B11,RILC
+       LDDW    .D1T1   *++A15[1],A7:A6
+ ||    LDDW    .D2T2   *++SP[1],B7:B6
+ ||    MVC     .S2     B10,ILC
+
+       LDDW    .D1T1   *++A15[1],A9:A8
+ ||    LDDW    .D2T2   *++SP[1],B9:B8
+ ||    MVC     .S2     B13,__rp
+
+       LDDW    .D1T1   *++A15[1],A11:A10
+ ||    LDDW    .D2T2   *++SP[1],B11:B10
+ ||    MVC     .S2     B12,CSR
+
+       LDDW    .D1T1   *++A15[1],A13:A12
+ ||    LDDW    .D2T2   *++SP[1],B13:B12
+
+       MV      .D2X    A15,SP
+ ||    MVKL    .S1     current_ksp,A15
+       MVKH    .S1     current_ksp,A15
+ ||    ADDAW   .D1X    SP,6,A14
+       STW     .D1T1   A14,*A15        ; save kernel stack pointer
+
+       LDDW    .D2T1   *++SP[1],A15:A14
+
+       B       .S2     __rp            ; return from interruption
+       LDDW    .D2T2   *+SP[1],SP:DP
+       NOP     4
+       .endm
+
+       .section .text
+
+       ;;
+       ;; Jump to schedule() then return to ret_from_exception
+       ;;
+_reschedule:
+#ifdef CONFIG_C6X_BIG_KERNEL
+       MVKL    .S1     schedule,A0
+       MVKH    .S1     schedule,A0
+       B       .S2X    A0
+#else
+       B       .S1     schedule
+#endif
+       ADDKPC  .S2     ret_from_exception,B3,4
+
+       ;;
+       ;; Called before syscall handler when process is being debugged
+       ;;
+tracesys_on:
+#ifdef CONFIG_C6X_BIG_KERNEL
+       MVKL    .S1     syscall_trace_entry,A0
+       MVKH    .S1     syscall_trace_entry,A0
+       B       .S2X    A0
+#else
+       B       .S1     syscall_trace_entry
+#endif
+       ADDKPC  .S2     ret_from_syscall_trace,B3,3
+       ADD     .S1X    8,SP,A4
+
+ret_from_syscall_trace:
+       ;; tracing returns (possibly new) syscall number
+       MV      .D2X    A4,B0
+ ||    MVK     .S2     __NR_syscalls,B1
+       CMPLTU  .L2     B0,B1,B1
+
+ [!B1] BNOP    .S2     ret_from_syscall_function,5
+ ||    MVK     .S1     -ENOSYS,A4
+
+       ;; reload syscall args from (possibly modified) stack frame
+       ;; and get syscall handler addr from sys_call_table:
+       LDW     .D2T2   *+SP(REGS_B4+8),B4
+ ||    MVKL    .S2     sys_call_table,B1
+       LDW     .D2T1   *+SP(REGS_A6+8),A6
+ ||    MVKH    .S2     sys_call_table,B1
+       LDW     .D2T2   *+B1[B0],B0
+ ||    MVKL    .S2     ret_from_syscall_function,B3
+       LDW     .D2T2   *+SP(REGS_B6+8),B6
+ ||    MVKH    .S2     ret_from_syscall_function,B3
+       LDW     .D2T1   *+SP(REGS_A8+8),A8
+       LDW     .D2T2   *+SP(REGS_B8+8),B8
+       NOP
+       ; B0 = sys_call_table[__NR_*]
+       BNOP    .S2     B0,5                    ; branch to syscall handler
+ ||    LDW     .D2T1   *+SP(REGS_ORIG_A4+8),A4
+
+syscall_exit_work:
+       AND     .D1     _TIF_SYSCALL_TRACE,A2,A0
+ [!A0] BNOP    .S1     work_pending,5
+ [A0]  B       .S2     syscall_trace_exit
+       ADDKPC  .S2     resume_userspace,B3,1
+       MVC     .S2     CSR,B1
+       SET     .S2     B1,0,0,B1
+       MVC     .S2     B1,CSR          ; enable ints
+
+work_pending:
+       AND     .D1     _TIF_NEED_RESCHED,A2,A0
+ [!A0] BNOP    .S1     work_notifysig,5
+
+work_resched:
+#ifdef CONFIG_C6X_BIG_KERNEL
+       MVKL    .S1     schedule,A1
+       MVKH    .S1     schedule,A1
+       B       .S2X    A1
+#else
+       B       .S2     schedule
+#endif
+       ADDKPC  .S2     work_rescheduled,B3,4
+work_rescheduled:
+       ;; make sure we don't miss an interrupt setting need_resched or
+       ;; sigpending between sampling and the rti
+       MASK_INT B2
+       GET_THREAD_INFO A12
+       LDW     .D1T1   *+A12(THREAD_INFO_FLAGS),A2
+       MVK     .S1     _TIF_WORK_MASK,A1
+       MVK     .S1     _TIF_NEED_RESCHED,A3
+       NOP     2
+       AND     .D1     A1,A2,A0
+ ||    AND     .S1     A3,A2,A1
+ [!A0] BNOP    .S1     restore_all,5
+ [A1]  BNOP    .S1     work_resched,5
+
+work_notifysig:
+       B       .S2     do_notify_resume
+       LDW     .D2T1   *+SP(REGS__END+8),A6 ; syscall flag
+       ADDKPC  .S2     resume_userspace,B3,1
+       ADD     .S1X    8,SP,A4         ; pt_regs pointer is first arg
+       MV      .D2X    A2,B4           ; thread_info flags is second arg
+
+       ;;
+       ;; On C64x+, the return way from exception and interrupt
+       ;; is a little bit different
+       ;;
+ENTRY(ret_from_exception)
+#ifdef CONFIG_PREEMPT
+       MASK_INT B2
+#endif
+
+ENTRY(ret_from_interrupt)
+       ;;
+       ;; Check if we are comming from user mode.
+       ;;
+       LDW     .D2T2   *+SP(REGS_TSR+8),B0
+       MVK     .S2     0x40,B1
+       NOP     3
+       AND     .D2     B0,B1,B0
+ [!B0] BNOP    .S2     resume_kernel,5
+
+resume_userspace:
+       ;; make sure we don't miss an interrupt setting need_resched or
+       ;; sigpending between sampling and the rti
+       MASK_INT B2
+       GET_THREAD_INFO A12
+       LDW     .D1T1   *+A12(THREAD_INFO_FLAGS),A2
+       MVK     .S1     _TIF_WORK_MASK,A1
+       MVK     .S1     _TIF_NEED_RESCHED,A3
+       NOP     2
+       AND     .D1     A1,A2,A0
+ [A0]  BNOP    .S1     work_pending,5
+       BNOP    .S1     restore_all,5
+
+       ;;
+       ;; System call handling
+       ;; B0 = syscall number (in sys_call_table)
+       ;; A4,B4,A6,B6,A8,B8 = arguments of the syscall function
+       ;; A4 is the return value register
+       ;;
+system_call_saved:
+       MVK     .L2     1,B2
+       STW     .D2T2   B2,*+SP(REGS__END+8)    ; set syscall flag
+       MVC     .S2     B2,ECR                  ; ack the software exception
+
+       UNMASK_INT B2                   ; re-enable global IT
+
+system_call_saved_noack:
+       ;; Check system call number
+       MVK     .S2     __NR_syscalls,B1
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||    MVKL    .S1     sys_ni_syscall,A0
+#endif
+       CMPLTU  .L2     B0,B1,B1
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||    MVKH    .S1     sys_ni_syscall,A0
+#endif
+
+       ;; Check for ptrace
+       GET_THREAD_INFO A12
+
+#ifdef CONFIG_C6X_BIG_KERNEL
+ [!B1] B       .S2X    A0
+#else
+ [!B1] B       .S2     sys_ni_syscall
+#endif
+ [!B1] ADDKPC  .S2     ret_from_syscall_function,B3,4
+
+       ;; Get syscall handler addr from sys_call_table
+       ;; call tracesys_on or call syscall handler
+       LDW     .D1T1   *+A12(THREAD_INFO_FLAGS),A2
+ ||    MVKL    .S2     sys_call_table,B1
+       MVKH    .S2     sys_call_table,B1
+       LDW     .D2T2   *+B1[B0],B0
+       NOP     2
+       ; A2 = thread_info flags
+       AND     .D1     _TIF_SYSCALL_TRACE,A2,A2
+ [A2]  BNOP    .S1     tracesys_on,5
+       ;; B0 = _sys_call_table[__NR_*]
+       B       .S2     B0
+       ADDKPC  .S2     ret_from_syscall_function,B3,4
+
+ret_from_syscall_function:
+       STW     .D2T1   A4,*+SP(REGS_A4+8)      ; save return value in A4
+                                               ; original A4 is in orig_A4
+syscall_exit:
+       ;; make sure we don't miss an interrupt setting need_resched or
+       ;; sigpending between sampling and the rti
+       MASK_INT B2
+       LDW     .D1T1   *+A12(THREAD_INFO_FLAGS),A2
+       MVK     .S1     _TIF_ALLWORK_MASK,A1
+       NOP     3
+       AND     .D1     A1,A2,A2 ; check for work to do
+ [A2]  BNOP    .S1     syscall_exit_work,5
+
+restore_all:
+       RESTORE_ALL NRP,NTSR
+
+       ;;
+       ;; After a fork we jump here directly from resume,
+       ;; so that A4 contains the previous task structure.
+       ;;
+ENTRY(ret_from_fork)
+#ifdef CONFIG_C6X_BIG_KERNEL
+       MVKL    .S1     schedule_tail,A0
+       MVKH    .S1     schedule_tail,A0
+       B       .S2X    A0
+#else
+       B       .S2     schedule_tail
+#endif
+       ADDKPC  .S2     ret_from_fork_2,B3,4
+ret_from_fork_2:
+       ;; return 0 in A4 for child process
+       GET_THREAD_INFO A12
+       BNOP    .S2     syscall_exit,3
+       MVK     .L2     0,B0
+       STW     .D2T2   B0,*+SP(REGS_A4+8)
+ENDPROC(ret_from_fork)
+
+       ;;
+       ;; These are the interrupt handlers, responsible for calling __do_IRQ()
+       ;; int6 is used for syscalls (see _system_call entry)
+       ;;
+       .macro SAVE_ALL_INT
+       SAVE_ALL IRP,ITSR
+       .endm
+
+       .macro CALL_INT int
+#ifdef CONFIG_C6X_BIG_KERNEL
+       MVKL    .S1     c6x_do_IRQ,A0
+       MVKH    .S1     c6x_do_IRQ,A0
+       BNOP    .S2X    A0,1
+       MVK     .S1     int,A4
+       ADDAW   .D2     SP,2,B4
+       MVKL    .S2     ret_from_interrupt,B3
+       MVKH    .S2     ret_from_interrupt,B3
+#else
+       CALLP   .S2     c6x_do_IRQ,B3
+ ||    MVK     .S1     int,A4
+ ||    ADDAW   .D2     SP,2,B4
+       B       .S1     ret_from_interrupt
+       NOP     5
+#endif
+       .endm
+
+ENTRY(_int4_handler)
+       SAVE_ALL_INT
+       CALL_INT 4
+ENDPROC(_int4_handler)
+
+ENTRY(_int5_handler)
+       SAVE_ALL_INT
+       CALL_INT 5
+ENDPROC(_int5_handler)
+
+ENTRY(_int6_handler)
+       SAVE_ALL_INT
+       CALL_INT 6
+ENDPROC(_int6_handler)
+
+ENTRY(_int7_handler)
+       SAVE_ALL_INT
+       CALL_INT 7
+ENDPROC(_int7_handler)
+
+ENTRY(_int8_handler)
+       SAVE_ALL_INT
+       CALL_INT 8
+ENDPROC(_int8_handler)
+
+ENTRY(_int9_handler)
+       SAVE_ALL_INT
+       CALL_INT 9
+ENDPROC(_int9_handler)
+
+ENTRY(_int10_handler)
+       SAVE_ALL_INT
+       CALL_INT 10
+ENDPROC(_int10_handler)
+
+ENTRY(_int11_handler)
+       SAVE_ALL_INT
+       CALL_INT 11
+ENDPROC(_int11_handler)
+
+ENTRY(_int12_handler)
+       SAVE_ALL_INT
+       CALL_INT 12
+ENDPROC(_int12_handler)
+
+ENTRY(_int13_handler)
+       SAVE_ALL_INT
+       CALL_INT 13
+ENDPROC(_int13_handler)
+
+ENTRY(_int14_handler)
+       SAVE_ALL_INT
+       CALL_INT 14
+ENDPROC(_int14_handler)
+
+ENTRY(_int15_handler)
+       SAVE_ALL_INT
+       CALL_INT 15
+ENDPROC(_int15_handler)
+
+       ;;
+       ;; Handler for uninitialized and spurious interrupts
+       ;;
+ENTRY(_bad_interrupt)
+       B       .S2     IRP
+       NOP     5
+ENDPROC(_bad_interrupt)
+
+       ;;
+       ;; Entry for NMI/exceptions/syscall
+       ;;
+ENTRY(_nmi_handler)
+       SAVE_ALL NRP,NTSR
+
+       MVC     .S2     EFR,B2
+       CMPEQ   .L2     1,B2,B2
+ ||    MVC     .S2     TSR,B1
+       CLR     .S2     B1,10,10,B1
+       MVC     .S2     B1,TSR
+#ifdef CONFIG_C6X_BIG_KERNEL
+ [!B2] MVKL    .S1     process_exception,A0
+ [!B2] MVKH    .S1     process_exception,A0
+ [!B2] B       .S2X    A0
+#else
+ [!B2] B       .S2     process_exception
+#endif
+ [B2]  B       .S2     system_call_saved
+ [!B2] ADDAW   .D2     SP,2,B1
+ [!B2] MV      .D1X    B1,A4
+       ADDKPC  .S2     ret_from_trap,B3,2
+
+ret_from_trap:
+       MV      .D2X    A4,B0
+ [!B0] BNOP    .S2     ret_from_exception,5
+
+#ifdef CONFIG_C6X_BIG_KERNEL
+       MVKL    .S2     system_call_saved_noack,B3
+       MVKH    .S2     system_call_saved_noack,B3
+#endif
+       LDW     .D2T2   *+SP(REGS_B0+8),B0
+       LDW     .D2T1   *+SP(REGS_A4+8),A4
+       LDW     .D2T2   *+SP(REGS_B4+8),B4
+       LDW     .D2T1   *+SP(REGS_A6+8),A6
+       LDW     .D2T2   *+SP(REGS_B6+8),B6
+       LDW     .D2T1   *+SP(REGS_A8+8),A8
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||    B       .S2     B3
+#else
+ ||    B       .S2     system_call_saved_noack
+#endif
+       LDW     .D2T2   *+SP(REGS_B8+8),B8
+       NOP     4
+ENDPROC(_nmi_handler)
+
+       ;;
+       ;; Jump to schedule() then return to ret_from_isr
+       ;;
+#ifdef CONFIG_PREEMPT
+resume_kernel:
+       GET_THREAD_INFO A12
+       LDW     .D1T1   *+A12(THREAD_INFO_PREEMPT_COUNT),A1
+       NOP     4
+ [A1]  BNOP    .S2     restore_all,5
+
+preempt_schedule:
+       GET_THREAD_INFO A2
+       LDW     .D1T1   *+A2(THREAD_INFO_FLAGS),A1
+#ifdef CONFIG_C6X_BIG_KERNEL
+       MVKL    .S2     preempt_schedule_irq,B0
+       MVKH    .S2     preempt_schedule_irq,B0
+       NOP     2
+#else
+       NOP     4
+#endif
+       AND     .D1     _TIF_NEED_RESCHED,A1,A1
+ [!A1] BNOP    .S2     restore_all,5
+#ifdef CONFIG_C6X_BIG_KERNEL
+       B       .S2     B0
+#else
+       B       .S2     preempt_schedule_irq
+#endif
+       ADDKPC  .S2     preempt_schedule,B3,4
+#endif /* CONFIG_PREEMPT */
+
+ENTRY(enable_exception)
+       DINT
+       MVC     .S2     TSR,B0
+       MVC     .S2     B3,NRP
+       MVK     .L2     0xc,B1
+       OR      .D2     B0,B1,B0
+       MVC     .S2     B0,TSR                  ;  Set GEE and XEN in TSR
+       B       .S2     NRP
+       NOP     5
+ENDPROC(enable_exception)
+
+ENTRY(sys_sigaltstack)
+#ifdef CONFIG_C6X_BIG_KERNEL
+       MVKL    .S1     do_sigaltstack,A0       ; branch to do_sigaltstack
+       MVKH    .S1     do_sigaltstack,A0
+       B       .S2X    A0
+#else
+       B       .S2     do_sigaltstack
+#endif
+       LDW     .D2T1   *+SP(REGS_SP+8),A6
+       NOP     4
+ENDPROC(sys_sigaltstack)
+
+       ;; kernel_execve
+ENTRY(kernel_execve)
+       MVK     .S2     __NR_execve,B0
+       SWE
+       BNOP    .S2     B3,5
+ENDPROC(kernel_execve)
+
+       ;;
+       ;; Special system calls
+       ;; return address is in B3
+       ;;
+ENTRY(sys_clone)
+       ADD     .D1X    SP,8,A4
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||    MVKL    .S1     sys_c6x_clone,A0
+       MVKH    .S1     sys_c6x_clone,A0
+       BNOP    .S2X    A0,5
+#else
+ ||    B       .S2     sys_c6x_clone
+       NOP     5
+#endif
+ENDPROC(sys_clone)
+
+ENTRY(sys_rt_sigreturn)
+       ADD     .D1X    SP,8,A4
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||    MVKL    .S1     do_rt_sigreturn,A0
+       MVKH    .S1     do_rt_sigreturn,A0
+       BNOP    .S2X    A0,5
+#else
+ ||    B       .S2     do_rt_sigreturn
+       NOP     5
+#endif
+ENDPROC(sys_rt_sigreturn)
+
+ENTRY(sys_execve)
+       ADDAW   .D2     SP,2,B6         ; put regs addr in 4th parameter
+                                       ; & adjust regs stack addr
+       LDW     .D2T2   *+SP(REGS_B4+8),B4
+
+       ;; c6x_execve(char *name, char **argv,
+       ;;            char **envp, struct pt_regs *regs)
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||    MVKL    .S1     sys_c6x_execve,A0
+       MVKH    .S1     sys_c6x_execve,A0
+       B       .S2X    A0
+#else
+ ||    B       .S2     sys_c6x_execve
+#endif
+       STW     .D2T2   B3,*SP--[2]
+       ADDKPC  .S2     ret_from_c6x_execve,B3,3
+
+ret_from_c6x_execve:
+       LDW     .D2T2   *++SP[2],B3
+       NOP     4
+       BNOP    .S2     B3,5
+ENDPROC(sys_execve)
+
+ENTRY(sys_pread_c6x)
+       MV      .D2X    A8,B7
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||    MVKL    .S1     sys_pread64,A0
+       MVKH    .S1     sys_pread64,A0
+       BNOP    .S2X    A0,5
+#else
+ ||    B       .S2     sys_pread64
+       NOP     5
+#endif
+ENDPROC(sys_pread_c6x)
+
+ENTRY(sys_pwrite_c6x)
+       MV      .D2X    A8,B7
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||    MVKL    .S1     sys_pwrite64,A0
+       MVKH    .S1     sys_pwrite64,A0
+       BNOP    .S2X    A0,5
+#else
+ ||    B       .S2     sys_pwrite64
+       NOP     5
+#endif
+ENDPROC(sys_pwrite_c6x)
+
+;; On Entry
+;;   A4 - path
+;;   B4 - offset_lo (LE), offset_hi (BE)
+;;   A6 - offset_lo (BE), offset_hi (LE)
+ENTRY(sys_truncate64_c6x)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       MV      .S2     B4,B5
+       MV      .D2X    A6,B4
+#else
+       MV      .D2X    A6,B5
+#endif
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||    MVKL    .S1     sys_truncate64,A0
+       MVKH    .S1     sys_truncate64,A0
+       BNOP    .S2X    A0,5
+#else
+ ||    B       .S2     sys_truncate64
+       NOP     5
+#endif
+ENDPROC(sys_truncate64_c6x)
+
+;; On Entry
+;;   A4 - fd
+;;   B4 - offset_lo (LE), offset_hi (BE)
+;;   A6 - offset_lo (BE), offset_hi (LE)
+ENTRY(sys_ftruncate64_c6x)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       MV      .S2     B4,B5
+       MV      .D2X    A6,B4
+#else
+       MV      .D2X    A6,B5
+#endif
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||    MVKL    .S1     sys_ftruncate64,A0
+       MVKH    .S1     sys_ftruncate64,A0
+       BNOP    .S2X    A0,5
+#else
+ ||    B       .S2     sys_ftruncate64
+       NOP     5
+#endif
+ENDPROC(sys_ftruncate64_c6x)
+
+#ifdef __ARCH_WANT_SYSCALL_OFF_T
+;; On Entry
+;;   A4 - fd
+;;   B4 - offset_lo (LE), offset_hi (BE)
+;;   A6 - offset_lo (BE), offset_hi (LE)
+;;   B6 - len
+;;   A8 - advice
+ENTRY(sys_fadvise64_c6x)
+#ifdef CONFIG_C6X_BIG_KERNEL
+       MVKL    .S1     sys_fadvise64,A0
+       MVKH    .S1     sys_fadvise64,A0
+       BNOP    .S2X    A0,2
+#else
+       B       .S2     sys_fadvise64
+       NOP     2
+#endif
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       MV      .L2     B4,B5
+ ||    MV      .D2X    A6,B4
+#else
+       MV      .D2X    A6,B5
+#endif
+       MV      .D1X    B6,A6
+       MV      .D2X    A8,B6
+#endif
+ENDPROC(sys_fadvise64_c6x)
+
+;; On Entry
+;;   A4 - fd
+;;   B4 - offset_lo (LE), offset_hi (BE)
+;;   A6 - offset_lo (BE), offset_hi (LE)
+;;   B6 - len_lo (LE), len_hi (BE)
+;;   A8 - len_lo (BE), len_hi (LE)
+;;   B8 - advice
+ENTRY(sys_fadvise64_64_c6x)
+#ifdef CONFIG_C6X_BIG_KERNEL
+       MVKL    .S1     sys_fadvise64_64,A0
+       MVKH    .S1     sys_fadvise64_64,A0
+       BNOP    .S2X    A0,2
+#else
+       B       .S2     sys_fadvise64_64
+       NOP     2
+#endif
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       MV      .L2     B4,B5
+ ||    MV      .D2X    A6,B4
+       MV      .L1     A8,A6
+ ||    MV      .D1X    B6,A7
+#else
+       MV      .D2X    A6,B5
+       MV      .L1     A8,A7
+ ||    MV      .D1X    B6,A6
+#endif
+       MV      .L2     B8,B6
+ENDPROC(sys_fadvise64_64_c6x)
+
+;; On Entry
+;;   A4 - fd
+;;   B4 - mode
+;;   A6 - offset_hi
+;;   B6 - offset_lo
+;;   A8 - len_hi
+;;   B8 - len_lo
+ENTRY(sys_fallocate_c6x)
+#ifdef CONFIG_C6X_BIG_KERNEL
+       MVKL    .S1     sys_fallocate,A0
+       MVKH    .S1     sys_fallocate,A0
+       BNOP    .S2X    A0,1
+#else
+       B       .S2     sys_fallocate
+       NOP
+#endif
+       MV      .D1     A6,A7
+       MV      .D1X    B6,A6
+       MV      .D2X    A8,B7
+       MV      .D2     B8,B6
+ENDPROC(sys_fallocate_c6x)
+
+       ;; put this in .neardata for faster access when using DSBT mode
+       .section .neardata,"aw",@progbits
+       .global current_ksp
+       .hidden current_ksp
+current_ksp:
+       .word   init_thread_union + THREAD_START_SP
diff --git a/arch/c6x/kernel/head.S b/arch/c6x/kernel/head.S
new file mode 100644 (file)
index 0000000..133eab6
--- /dev/null
@@ -0,0 +1,84 @@
+;
+;  Port on Texas Instruments TMS320C6x architecture
+;
+;  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+;  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+;
+;  This program is free software; you can redistribute it and/or modify
+;  it under the terms of the GNU General Public License version 2 as
+;  published by the Free Software Foundation.
+;
+#include <linux/linkage.h>
+#include <linux/of_fdt.h>
+#include <asm/asm-offsets.h>
+
+       __HEAD
+ENTRY(_c_int00)
+       ;; Save magic and pointer
+       MV      .S1     A4,A10
+       MV      .S2     B4,B10
+       MVKL    .S2     __bss_start,B5
+       MVKH    .S2     __bss_start,B5
+       MVKL    .S2     __bss_stop,B6
+       MVKH    .S2     __bss_stop,B6
+       SUB     .L2     B6,B5,B6 ; bss size
+
+       ;; Set the stack pointer
+       MVKL    .S2     current_ksp,B0
+       MVKH    .S2     current_ksp,B0
+       LDW     .D2T2   *B0,B15
+
+       ;; clear bss
+       SHR     .S2     B6,3,B0   ; number of dwords to clear
+       ZERO    .L2     B13
+       ZERO    .L2     B12
+bss_loop:
+       BDEC    .S2     bss_loop,B0
+       NOP     3
+       CMPLT   .L2     B0,0,B1
+ [!B1] STDW    .D2T2   B13:B12,*B5++[1]
+
+       NOP     4
+       AND     .D2     ~7,B15,B15
+
+       ;; Clear GIE and PGIE
+       MVC     .S2     CSR,B2
+       CLR     .S2     B2,0,1,B2
+       MVC     .S2     B2,CSR
+       MVC     .S2     TSR,B2
+       CLR     .S2     B2,0,1,B2
+       MVC     .S2     B2,TSR
+       MVC     .S2     ITSR,B2
+       CLR     .S2     B2,0,1,B2
+       MVC     .S2     B2,ITSR
+       MVC     .S2     NTSR,B2
+       CLR     .S2     B2,0,1,B2
+       MVC     .S2     B2,NTSR
+
+       ;; pass DTB pointer to machine_init (or zero if none)
+       MVKL    .S1     OF_DT_HEADER,A0
+       MVKH    .S1     OF_DT_HEADER,A0
+       CMPEQ   .L1     A10,A0,A0
+  [A0] MV      .S1X    B10,A4
+  [!A0] MVK    .S1     0,A4
+
+#ifdef CONFIG_C6X_BIG_KERNEL
+       MVKL    .S1     machine_init,A0
+       MVKH    .S1     machine_init,A0
+       B       .S2X    A0
+       ADDKPC  .S2     0f,B3,4
+0:
+#else
+       CALLP   .S2     machine_init,B3
+#endif
+
+       ;; Jump to Linux init
+#ifdef CONFIG_C6X_BIG_KERNEL
+       MVKL    .S1     start_kernel,A0
+       MVKH    .S1     start_kernel,A0
+       B       .S2X    A0
+#else
+       B       .S2     start_kernel
+#endif
+       NOP     5
+L1:    BNOP    .S2     L1,5
diff --git a/arch/c6x/kernel/irq.c b/arch/c6x/kernel/irq.c
new file mode 100644 (file)
index 0000000..0929e4b
--- /dev/null
@@ -0,0 +1,728 @@
+/*
+ *  Copyright (C) 2011 Texas Instruments Incorporated
+ *
+ *  This borrows heavily from powerpc version, which is:
+ *
+ *  Derived from arch/i386/kernel/irq.c
+ *    Copyright (C) 1992 Linus Torvalds
+ *  Adapted from arch/i386 by Gary Thomas
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
+ *    Copyright (C) 1996-2001 Cort Dougan
+ *  Adapted for Power Macintosh by Paul Mackerras
+ *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/radix-tree.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/megamod-pic.h>
+
+unsigned long irq_err_count;
+
+static DEFINE_RAW_SPINLOCK(core_irq_lock);
+
+static void mask_core_irq(struct irq_data *data)
+{
+       unsigned int prio = data->irq;
+
+       BUG_ON(prio < 4 || prio >= NR_PRIORITY_IRQS);
+
+       raw_spin_lock(&core_irq_lock);
+       and_creg(IER, ~(1 << prio));
+       raw_spin_unlock(&core_irq_lock);
+}
+
+static void unmask_core_irq(struct irq_data *data)
+{
+       unsigned int prio = data->irq;
+
+       raw_spin_lock(&core_irq_lock);
+       or_creg(IER, 1 << prio);
+       raw_spin_unlock(&core_irq_lock);
+}
+
+static struct irq_chip core_chip = {
+       .name           = "core",
+       .irq_mask       = mask_core_irq,
+       .irq_unmask     = unmask_core_irq,
+};
+
+asmlinkage void c6x_do_IRQ(unsigned int prio, struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
+
+       irq_enter();
+
+       BUG_ON(prio < 4 || prio >= NR_PRIORITY_IRQS);
+
+       generic_handle_irq(prio);
+
+       irq_exit();
+
+       set_irq_regs(old_regs);
+}
+
+static struct irq_host *core_host;
+
+static int core_host_map(struct irq_host *h, unsigned int virq,
+                        irq_hw_number_t hw)
+{
+       if (hw < 4 || hw >= NR_PRIORITY_IRQS)
+               return -EINVAL;
+
+       irq_set_status_flags(virq, IRQ_LEVEL);
+       irq_set_chip_and_handler(virq, &core_chip, handle_level_irq);
+       return 0;
+}
+
+static struct irq_host_ops core_host_ops = {
+       .map = core_host_map,
+};
+
+void __init init_IRQ(void)
+{
+       struct device_node *np;
+
+       /* Mask all priority IRQs */
+       and_creg(IER, ~0xfff0);
+
+       np = of_find_compatible_node(NULL, NULL, "ti,c64x+core-pic");
+       if (np != NULL) {
+               /* create the core host */
+               core_host = irq_alloc_host(np, IRQ_HOST_MAP_PRIORITY, 0,
+                                          &core_host_ops, 0);
+               if (core_host)
+                       irq_set_default_host(core_host);
+               of_node_put(np);
+       }
+
+       printk(KERN_INFO "Core interrupt controller initialized\n");
+
+       /* now we're ready for other SoC controllers */
+       megamod_pic_init();
+
+       /* Clear all general IRQ flags */
+       set_creg(ICR, 0xfff0);
+}
+
+void ack_bad_irq(int irq)
+{
+       printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
+       irq_err_count++;
+}
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+       seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
+       return 0;
+}
+
+/*
+ * IRQ controller and virtual interrupts
+ */
+
+/* The main irq map itself is an array of NR_IRQ entries containing the
+ * associate host and irq number. An entry with a host of NULL is free.
+ * An entry can be allocated if it's free, the allocator always then sets
+ * hwirq first to the host's invalid irq number and then fills ops.
+ */
+struct irq_map_entry {
+       irq_hw_number_t hwirq;
+       struct irq_host *host;
+};
+
+static LIST_HEAD(irq_hosts);
+static DEFINE_RAW_SPINLOCK(irq_big_lock);
+static DEFINE_MUTEX(revmap_trees_mutex);
+static struct irq_map_entry irq_map[NR_IRQS];
+static unsigned int irq_virq_count = NR_IRQS;
+static struct irq_host *irq_default_host;
+
+irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
+{
+       return irq_map[d->irq].hwirq;
+}
+EXPORT_SYMBOL_GPL(irqd_to_hwirq);
+
+irq_hw_number_t virq_to_hw(unsigned int virq)
+{
+       return irq_map[virq].hwirq;
+}
+EXPORT_SYMBOL_GPL(virq_to_hw);
+
+bool virq_is_host(unsigned int virq, struct irq_host *host)
+{
+       return irq_map[virq].host == host;
+}
+EXPORT_SYMBOL_GPL(virq_is_host);
+
+static int default_irq_host_match(struct irq_host *h, struct device_node *np)
+{
+       return h->of_node != NULL && h->of_node == np;
+}
+
+struct irq_host *irq_alloc_host(struct device_node *of_node,
+                               unsigned int revmap_type,
+                               unsigned int revmap_arg,
+                               struct irq_host_ops *ops,
+                               irq_hw_number_t inval_irq)
+{
+       struct irq_host *host;
+       unsigned int size = sizeof(struct irq_host);
+       unsigned int i;
+       unsigned int *rmap;
+       unsigned long flags;
+
+       /* Allocate structure and revmap table if using linear mapping */
+       if (revmap_type == IRQ_HOST_MAP_LINEAR)
+               size += revmap_arg * sizeof(unsigned int);
+       host = kzalloc(size, GFP_KERNEL);
+       if (host == NULL)
+               return NULL;
+
+       /* Fill structure */
+       host->revmap_type = revmap_type;
+       host->inval_irq = inval_irq;
+       host->ops = ops;
+       host->of_node = of_node_get(of_node);
+
+       if (host->ops->match == NULL)
+               host->ops->match = default_irq_host_match;
+
+       raw_spin_lock_irqsave(&irq_big_lock, flags);
+
+       /* Check for the priority controller. */
+       if (revmap_type == IRQ_HOST_MAP_PRIORITY) {
+               if (irq_map[0].host != NULL) {
+                       raw_spin_unlock_irqrestore(&irq_big_lock, flags);
+                       of_node_put(host->of_node);
+                       kfree(host);
+                       return NULL;
+               }
+               irq_map[0].host = host;
+       }
+
+       list_add(&host->link, &irq_hosts);
+       raw_spin_unlock_irqrestore(&irq_big_lock, flags);
+
+       /* Additional setups per revmap type */
+       switch (revmap_type) {
+       case IRQ_HOST_MAP_PRIORITY:
+               /* 0 is always the invalid number for priority */
+               host->inval_irq = 0;
+               /* setup us as the host for all priority interrupts */
+               for (i = 1; i < NR_PRIORITY_IRQS; i++) {
+                       irq_map[i].hwirq = i;
+                       smp_wmb();
+                       irq_map[i].host = host;
+                       smp_wmb();
+
+                       ops->map(host, i, i);
+               }
+               break;
+       case IRQ_HOST_MAP_LINEAR:
+               rmap = (unsigned int *)(host + 1);
+               for (i = 0; i < revmap_arg; i++)
+                       rmap[i] = NO_IRQ;
+               host->revmap_data.linear.size = revmap_arg;
+               smp_wmb();
+               host->revmap_data.linear.revmap = rmap;
+               break;
+       case IRQ_HOST_MAP_TREE:
+               INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
+               break;
+       default:
+               break;
+       }
+
+       pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
+
+       return host;
+}
+
+struct irq_host *irq_find_host(struct device_node *node)
+{
+       struct irq_host *h, *found = NULL;
+       unsigned long flags;
+
+       /* We might want to match the legacy controller last since
+        * it might potentially be set to match all interrupts in
+        * the absence of a device node. This isn't a problem so far
+        * yet though...
+        */
+       raw_spin_lock_irqsave(&irq_big_lock, flags);
+       list_for_each_entry(h, &irq_hosts, link)
+               if (h->ops->match(h, node)) {
+                       found = h;
+                       break;
+               }
+       raw_spin_unlock_irqrestore(&irq_big_lock, flags);
+       return found;
+}
+EXPORT_SYMBOL_GPL(irq_find_host);
+
+void irq_set_default_host(struct irq_host *host)
+{
+       pr_debug("irq: Default host set to @0x%p\n", host);
+
+       irq_default_host = host;
+}
+
+void irq_set_virq_count(unsigned int count)
+{
+       pr_debug("irq: Trying to set virq count to %d\n", count);
+
+       BUG_ON(count < NR_PRIORITY_IRQS);
+       if (count < NR_IRQS)
+               irq_virq_count = count;
+}
+
+static int irq_setup_virq(struct irq_host *host, unsigned int virq,
+                           irq_hw_number_t hwirq)
+{
+       int res;
+
+       res = irq_alloc_desc_at(virq, 0);
+       if (res != virq) {
+               pr_debug("irq: -> allocating desc failed\n");
+               goto error;
+       }
+
+       /* map it */
+       smp_wmb();
+       irq_map[virq].hwirq = hwirq;
+       smp_mb();
+
+       if (host->ops->map(host, virq, hwirq)) {
+               pr_debug("irq: -> mapping failed, freeing\n");
+               goto errdesc;
+       }
+
+       irq_clear_status_flags(virq, IRQ_NOREQUEST);
+
+       return 0;
+
+errdesc:
+       irq_free_descs(virq, 1);
+error:
+       irq_free_virt(virq, 1);
+       return -1;
+}
+
+unsigned int irq_create_direct_mapping(struct irq_host *host)
+{
+       unsigned int virq;
+
+       if (host == NULL)
+               host = irq_default_host;
+
+       BUG_ON(host == NULL);
+       WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
+
+       virq = irq_alloc_virt(host, 1, 0);
+       if (virq == NO_IRQ) {
+               pr_debug("irq: create_direct virq allocation failed\n");
+               return NO_IRQ;
+       }
+
+       pr_debug("irq: create_direct obtained virq %d\n", virq);
+
+       if (irq_setup_virq(host, virq, virq))
+               return NO_IRQ;
+
+       return virq;
+}
+
+unsigned int irq_create_mapping(struct irq_host *host,
+                               irq_hw_number_t hwirq)
+{
+       unsigned int virq, hint;
+
+       pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
+
+       /* Look for default host if nececssary */
+       if (host == NULL)
+               host = irq_default_host;
+       if (host == NULL) {
+               printk(KERN_WARNING "irq_create_mapping called for"
+                      " NULL host, hwirq=%lx\n", hwirq);
+               WARN_ON(1);
+               return NO_IRQ;
+       }
+       pr_debug("irq: -> using host @%p\n", host);
+
+       /* Check if mapping already exists */
+       virq = irq_find_mapping(host, hwirq);
+       if (virq != NO_IRQ) {
+               pr_debug("irq: -> existing mapping on virq %d\n", virq);
+               return virq;
+       }
+
+       /* Allocate a virtual interrupt number */
+       hint = hwirq % irq_virq_count;
+       virq = irq_alloc_virt(host, 1, hint);
+       if (virq == NO_IRQ) {
+               pr_debug("irq: -> virq allocation failed\n");
+               return NO_IRQ;
+       }
+
+       if (irq_setup_virq(host, virq, hwirq))
+               return NO_IRQ;
+
+       pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
+               hwirq, host->of_node ? host->of_node->full_name : "null", virq);
+
+       return virq;
+}
+EXPORT_SYMBOL_GPL(irq_create_mapping);
+
+unsigned int irq_create_of_mapping(struct device_node *controller,
+                                  const u32 *intspec, unsigned int intsize)
+{
+       struct irq_host *host;
+       irq_hw_number_t hwirq;
+       unsigned int type = IRQ_TYPE_NONE;
+       unsigned int virq;
+
+       if (controller == NULL)
+               host = irq_default_host;
+       else
+               host = irq_find_host(controller);
+       if (host == NULL) {
+               printk(KERN_WARNING "irq: no irq host found for %s !\n",
+                      controller->full_name);
+               return NO_IRQ;
+       }
+
+       /* If host has no translation, then we assume interrupt line */
+       if (host->ops->xlate == NULL)
+               hwirq = intspec[0];
+       else {
+               if (host->ops->xlate(host, controller, intspec, intsize,
+                                    &hwirq, &type))
+                       return NO_IRQ;
+       }
+
+       /* Create mapping */
+       virq = irq_create_mapping(host, hwirq);
+       if (virq == NO_IRQ)
+               return virq;
+
+       /* Set type if specified and different than the current one */
+       if (type != IRQ_TYPE_NONE &&
+           type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
+               irq_set_irq_type(virq, type);
+       return virq;
+}
+EXPORT_SYMBOL_GPL(irq_create_of_mapping);
+
+void irq_dispose_mapping(unsigned int virq)
+{
+       struct irq_host *host;
+       irq_hw_number_t hwirq;
+
+       if (virq == NO_IRQ)
+               return;
+
+       /* Never unmap priority interrupts */
+       if (virq < NR_PRIORITY_IRQS)
+               return;
+
+       host = irq_map[virq].host;
+       if (WARN_ON(host == NULL))
+               return;
+
+       irq_set_status_flags(virq, IRQ_NOREQUEST);
+
+       /* remove chip and handler */
+       irq_set_chip_and_handler(virq, NULL, NULL);
+
+       /* Make sure it's completed */
+       synchronize_irq(virq);
+
+       /* Tell the PIC about it */
+       if (host->ops->unmap)
+               host->ops->unmap(host, virq);
+       smp_mb();
+
+       /* Clear reverse map */
+       hwirq = irq_map[virq].hwirq;
+       switch (host->revmap_type) {
+       case IRQ_HOST_MAP_LINEAR:
+               if (hwirq < host->revmap_data.linear.size)
+                       host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
+               break;
+       case IRQ_HOST_MAP_TREE:
+               mutex_lock(&revmap_trees_mutex);
+               radix_tree_delete(&host->revmap_data.tree, hwirq);
+               mutex_unlock(&revmap_trees_mutex);
+               break;
+       }
+
+       /* Destroy map */
+       smp_mb();
+       irq_map[virq].hwirq = host->inval_irq;
+
+       irq_free_descs(virq, 1);
+       /* Free it */
+       irq_free_virt(virq, 1);
+}
+EXPORT_SYMBOL_GPL(irq_dispose_mapping);
+
+unsigned int irq_find_mapping(struct irq_host *host,
+                             irq_hw_number_t hwirq)
+{
+       unsigned int i;
+       unsigned int hint = hwirq % irq_virq_count;
+
+       /* Look for default host if nececssary */
+       if (host == NULL)
+               host = irq_default_host;
+       if (host == NULL)
+               return NO_IRQ;
+
+       /* Slow path does a linear search of the map */
+       i = hint;
+       do  {
+               if (irq_map[i].host == host &&
+                   irq_map[i].hwirq == hwirq)
+                       return i;
+               i++;
+               if (i >= irq_virq_count)
+                       i = 4;
+       } while (i != hint);
+       return NO_IRQ;
+}
+EXPORT_SYMBOL_GPL(irq_find_mapping);
+
+unsigned int irq_radix_revmap_lookup(struct irq_host *host,
+                                    irq_hw_number_t hwirq)
+{
+       struct irq_map_entry *ptr;
+       unsigned int virq;
+
+       if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
+               return irq_find_mapping(host, hwirq);
+
+       /*
+        * The ptr returned references the static global irq_map.
+        * but freeing an irq can delete nodes along the path to
+        * do the lookup via call_rcu.
+        */
+       rcu_read_lock();
+       ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
+       rcu_read_unlock();
+
+       /*
+        * If found in radix tree, then fine.
+        * Else fallback to linear lookup - this should not happen in practice
+        * as it means that we failed to insert the node in the radix tree.
+        */
+       if (ptr)
+               virq = ptr - irq_map;
+       else
+               virq = irq_find_mapping(host, hwirq);
+
+       return virq;
+}
+
+void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
+                            irq_hw_number_t hwirq)
+{
+       if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
+               return;
+
+       if (virq != NO_IRQ) {
+               mutex_lock(&revmap_trees_mutex);
+               radix_tree_insert(&host->revmap_data.tree, hwirq,
+                                 &irq_map[virq]);
+               mutex_unlock(&revmap_trees_mutex);
+       }
+}
+
+unsigned int irq_linear_revmap(struct irq_host *host,
+                              irq_hw_number_t hwirq)
+{
+       unsigned int *revmap;
+
+       if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
+               return irq_find_mapping(host, hwirq);
+
+       /* Check revmap bounds */
+       if (unlikely(hwirq >= host->revmap_data.linear.size))
+               return irq_find_mapping(host, hwirq);
+
+       /* Check if revmap was allocated */
+       revmap = host->revmap_data.linear.revmap;
+       if (unlikely(revmap == NULL))
+               return irq_find_mapping(host, hwirq);
+
+       /* Fill up revmap with slow path if no mapping found */
+       if (unlikely(revmap[hwirq] == NO_IRQ))
+               revmap[hwirq] = irq_find_mapping(host, hwirq);
+
+       return revmap[hwirq];
+}
+
+unsigned int irq_alloc_virt(struct irq_host *host,
+                           unsigned int count,
+                           unsigned int hint)
+{
+       unsigned long flags;
+       unsigned int i, j, found = NO_IRQ;
+
+       if (count == 0 || count > (irq_virq_count - NR_PRIORITY_IRQS))
+               return NO_IRQ;
+
+       raw_spin_lock_irqsave(&irq_big_lock, flags);
+
+       /* Use hint for 1 interrupt if any */
+       if (count == 1 && hint >= NR_PRIORITY_IRQS &&
+           hint < irq_virq_count && irq_map[hint].host == NULL) {
+               found = hint;
+               goto hint_found;
+       }
+
+       /* Look for count consecutive numbers in the allocatable
+        * (non-legacy) space
+        */
+       for (i = NR_PRIORITY_IRQS, j = 0; i < irq_virq_count; i++) {
+               if (irq_map[i].host != NULL)
+                       j = 0;
+               else
+                       j++;
+
+               if (j == count) {
+                       found = i - count + 1;
+                       break;
+               }
+       }
+       if (found == NO_IRQ) {
+               raw_spin_unlock_irqrestore(&irq_big_lock, flags);
+               return NO_IRQ;
+       }
+ hint_found:
+       for (i = found; i < (found + count); i++) {
+               irq_map[i].hwirq = host->inval_irq;
+               smp_wmb();
+               irq_map[i].host = host;
+       }
+       raw_spin_unlock_irqrestore(&irq_big_lock, flags);
+       return found;
+}
+
+void irq_free_virt(unsigned int virq, unsigned int count)
+{
+       unsigned long flags;
+       unsigned int i;
+
+       WARN_ON(virq < NR_PRIORITY_IRQS);
+       WARN_ON(count == 0 || (virq + count) > irq_virq_count);
+
+       if (virq < NR_PRIORITY_IRQS) {
+               if (virq + count < NR_PRIORITY_IRQS)
+                       return;
+               count  -= NR_PRIORITY_IRQS - virq;
+               virq = NR_PRIORITY_IRQS;
+       }
+
+       if (count > irq_virq_count || virq > irq_virq_count - count) {
+               if (virq > irq_virq_count)
+                       return;
+               count = irq_virq_count - virq;
+       }
+
+       raw_spin_lock_irqsave(&irq_big_lock, flags);
+       for (i = virq; i < (virq + count); i++) {
+               struct irq_host *host;
+
+               host = irq_map[i].host;
+               irq_map[i].hwirq = host->inval_irq;
+               smp_wmb();
+               irq_map[i].host = NULL;
+       }
+       raw_spin_unlock_irqrestore(&irq_big_lock, flags);
+}
+
+#ifdef CONFIG_VIRQ_DEBUG
+static int virq_debug_show(struct seq_file *m, void *private)
+{
+       unsigned long flags;
+       struct irq_desc *desc;
+       const char *p;
+       static const char none[] = "none";
+       void *data;
+       int i;
+
+       seq_printf(m, "%-5s  %-7s  %-15s  %-18s  %s\n", "virq", "hwirq",
+                     "chip name", "chip data", "host name");
+
+       for (i = 1; i < nr_irqs; i++) {
+               desc = irq_to_desc(i);
+               if (!desc)
+                       continue;
+
+               raw_spin_lock_irqsave(&desc->lock, flags);
+
+               if (desc->action && desc->action->handler) {
+                       struct irq_chip *chip;
+
+                       seq_printf(m, "%5d  ", i);
+                       seq_printf(m, "0x%05lx  ", irq_map[i].hwirq);
+
+                       chip = irq_desc_get_chip(desc);
+                       if (chip && chip->name)
+                               p = chip->name;
+                       else
+                               p = none;
+                       seq_printf(m, "%-15s  ", p);
+
+                       data = irq_desc_get_chip_data(desc);
+                       seq_printf(m, "0x%16p  ", data);
+
+                       if (irq_map[i].host && irq_map[i].host->of_node)
+                               p = irq_map[i].host->of_node->full_name;
+                       else
+                               p = none;
+                       seq_printf(m, "%s\n", p);
+               }
+
+               raw_spin_unlock_irqrestore(&desc->lock, flags);
+       }
+
+       return 0;
+}
+
+static int virq_debug_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, virq_debug_show, inode->i_private);
+}
+
+static const struct file_operations virq_debug_fops = {
+       .open = virq_debug_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static int __init irq_debugfs_init(void)
+{
+       if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
+                                NULL, &virq_debug_fops) == NULL)
+               return -ENOMEM;
+
+       return 0;
+}
+device_initcall(irq_debugfs_init);
+#endif /* CONFIG_VIRQ_DEBUG */
diff --git a/arch/c6x/kernel/module.c b/arch/c6x/kernel/module.c
new file mode 100644 (file)
index 0000000..5fc03f1
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2005, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Thomas Charleux (thomas.charleux@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ */
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/kernel.h>
+
+static inline int fixup_pcr(u32 *ip, Elf32_Addr dest, u32 maskbits, int shift)
+{
+       u32 opcode;
+       long ep = (long)ip & ~31;
+       long delta = ((long)dest - ep) >> 2;
+       long mask = (1 << maskbits) - 1;
+
+       if ((delta >> (maskbits - 1)) == 0 ||
+           (delta >> (maskbits - 1)) == -1) {
+               opcode = *ip;
+               opcode &= ~(mask << shift);
+               opcode |= ((delta & mask) << shift);
+               *ip = opcode;
+
+               pr_debug("REL PCR_S%d[%p] dest[%p] opcode[%08x]\n",
+                        maskbits, ip, (void *)dest, opcode);
+
+               return 0;
+       }
+       pr_err("PCR_S%d reloc %p -> %p out of range!\n",
+              maskbits, ip, (void *)dest);
+
+       return -1;
+}
+
+/*
+ * apply a RELA relocation
+ */
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+                      const char *strtab,
+                      unsigned int symindex,
+                      unsigned int relsec,
+                      struct module *me)
+{
+       Elf32_Rela *rel = (void *) sechdrs[relsec].sh_addr;
+       Elf_Sym *sym;
+       u32 *location, opcode;
+       unsigned int i;
+       Elf32_Addr v;
+       Elf_Addr offset = 0;
+
+       pr_debug("Applying relocate section %u to %u with offset 0x%x\n",
+                relsec, sechdrs[relsec].sh_info, offset);
+
+       for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+               /* This is where to make the change */
+               location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+                       + rel[i].r_offset - offset;
+
+               /* This is the symbol it is referring to.  Note that all
+                  undefined symbols have been resolved.  */
+               sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+                       + ELF32_R_SYM(rel[i].r_info);
+
+               /* this is the adjustment to be made */
+               v = sym->st_value + rel[i].r_addend;
+
+               switch (ELF32_R_TYPE(rel[i].r_info)) {
+               case R_C6000_ABS32:
+                       pr_debug("RELA ABS32: [%p] = 0x%x\n", location, v);
+                       *location = v;
+                       break;
+               case R_C6000_ABS16:
+                       pr_debug("RELA ABS16: [%p] = 0x%x\n", location, v);
+                       *(u16 *)location = v;
+                       break;
+               case R_C6000_ABS8:
+                       pr_debug("RELA ABS8: [%p] = 0x%x\n", location, v);
+                       *(u8 *)location = v;
+                       break;
+               case R_C6000_ABS_L16:
+                       opcode = *location;
+                       opcode &= ~0x7fff80;
+                       opcode |= ((v & 0xffff) << 7);
+                       pr_debug("RELA ABS_L16[%p] v[0x%x] opcode[0x%x]\n",
+                                location, v, opcode);
+                       *location = opcode;
+                       break;
+               case R_C6000_ABS_H16:
+                       opcode = *location;
+                       opcode &= ~0x7fff80;
+                       opcode |= ((v >> 9) & 0x7fff80);
+                       pr_debug("RELA ABS_H16[%p] v[0x%x] opcode[0x%x]\n",
+                                location, v, opcode);
+                       *location = opcode;
+                       break;
+               case R_C6000_PCR_S21:
+                       if (fixup_pcr(location, v, 21, 7))
+                               return -ENOEXEC;
+                       break;
+               case R_C6000_PCR_S12:
+                       if (fixup_pcr(location, v, 12, 16))
+                               return -ENOEXEC;
+                       break;
+               case R_C6000_PCR_S10:
+                       if (fixup_pcr(location, v, 10, 13))
+                               return -ENOEXEC;
+                       break;
+               default:
+                       pr_err("module %s: Unknown RELA relocation: %u\n",
+                              me->name, ELF32_R_TYPE(rel[i].r_info));
+                       return -ENOEXEC;
+               }
+       }
+
+       return 0;
+}
diff --git a/arch/c6x/kernel/process.c b/arch/c6x/kernel/process.c
new file mode 100644 (file)
index 0000000..7ca8c41
--- /dev/null
@@ -0,0 +1,265 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/init_task.h>
+#include <linux/tick.h>
+#include <linux/mqueue.h>
+#include <linux/syscalls.h>
+#include <linux/reboot.h>
+
+#include <asm/syscalls.h>
+
+/* hooks for board specific support */
+void   (*c6x_restart)(void);
+void   (*c6x_halt)(void);
+
+extern asmlinkage void ret_from_fork(void);
+
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+
+/*
+ * Initial thread structure.
+ */
+union thread_union init_thread_union __init_task_data =        {
+       INIT_THREAD_INFO(init_task)
+};
+
+/*
+ * Initial task structure.
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+EXPORT_SYMBOL(init_task);
+
+/*
+ * power off function, if any
+ */
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+static void c6x_idle(void)
+{
+       unsigned long tmp;
+
+       /*
+        * Put local_irq_enable and idle in same execute packet
+        * to make them atomic and avoid race to idle with
+        * interrupts enabled.
+        */
+       asm volatile ("   mvc .s2 CSR,%0\n"
+                     "   or  .d2 1,%0,%0\n"
+                     "   mvc .s2 %0,CSR\n"
+                     "|| idle\n"
+                     : "=b"(tmp));
+}
+
+/*
+ * The idle loop for C64x
+ */
+void cpu_idle(void)
+{
+       /* endless idle loop with no priority at all */
+       while (1) {
+               tick_nohz_idle_enter();
+               rcu_idle_enter();
+               while (1) {
+                       local_irq_disable();
+                       if (need_resched()) {
+                               local_irq_enable();
+                               break;
+                       }
+                       c6x_idle(); /* enables local irqs */
+               }
+               rcu_idle_exit();
+               tick_nohz_idle_exit();
+
+               preempt_enable_no_resched();
+               schedule();
+               preempt_disable();
+       }
+}
+
+static void halt_loop(void)
+{
+       printk(KERN_EMERG "System Halted, OK to turn off power\n");
+       local_irq_disable();
+       while (1)
+               asm volatile("idle\n");
+}
+
+void machine_restart(char *__unused)
+{
+       if (c6x_restart)
+               c6x_restart();
+       halt_loop();
+}
+
+void machine_halt(void)
+{
+       if (c6x_halt)
+               c6x_halt();
+       halt_loop();
+}
+
+void machine_power_off(void)
+{
+       if (pm_power_off)
+               pm_power_off();
+       halt_loop();
+}
+
+static void kernel_thread_helper(int dummy, void *arg, int (*fn)(void *))
+{
+       do_exit(fn(arg));
+}
+
+/*
+ * Create a kernel thread
+ */
+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+       struct pt_regs regs;
+
+       /*
+        * copy_thread sets a4 to zero (child return from fork)
+        * so we can't just set things up to directly return to
+        * fn.
+        */
+       memset(&regs, 0, sizeof(regs));
+       regs.b4 = (unsigned long) arg;
+       regs.a6 = (unsigned long) fn;
+       regs.pc = (unsigned long) kernel_thread_helper;
+       local_save_flags(regs.csr);
+       regs.csr |= 1;
+       regs.tsr = 5; /* Set GEE and GIE in TSR */
+
+       /* Ok, create the new process.. */
+       return do_fork(flags | CLONE_VM | CLONE_UNTRACED, -1, &regs,
+                      0, NULL, NULL);
+}
+EXPORT_SYMBOL(kernel_thread);
+
+void flush_thread(void)
+{
+}
+
+void exit_thread(void)
+{
+}
+
+SYSCALL_DEFINE1(c6x_clone, struct pt_regs *, regs)
+{
+       unsigned long clone_flags;
+       unsigned long newsp;
+
+       /* syscall puts clone_flags in A4 and usp in B4 */
+       clone_flags = regs->orig_a4;
+       if (regs->b4)
+               newsp = regs->b4;
+       else
+               newsp = regs->sp;
+
+       return do_fork(clone_flags, newsp, regs, 0, (int __user *)regs->a6,
+                      (int __user *)regs->b6);
+}
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ */
+void start_thread(struct pt_regs *regs, unsigned int pc, unsigned long usp)
+{
+       /*
+        * The binfmt loader will setup a "full" stack, but the C6X
+        * operates an "empty" stack. So we adjust the usp so that
+        * argc doesn't get destroyed if an interrupt is taken before
+        * it is read from the stack.
+        *
+        * NB: Library startup code needs to match this.
+        */
+       usp -= 8;
+
+       set_fs(USER_DS);
+       regs->pc  = pc;
+       regs->sp  = usp;
+       regs->tsr |= 0x40; /* set user mode */
+       current->thread.usp = usp;
+}
+
+/*
+ * Copy a new thread context in its stack.
+ */
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+               unsigned long ustk_size,
+               struct task_struct *p, struct pt_regs *regs)
+{
+       struct pt_regs *childregs;
+
+       childregs = task_pt_regs(p);
+
+       *childregs = *regs;
+       childregs->a4 = 0;
+
+       if (usp == -1)
+               /* case of  __kernel_thread: we return to supervisor space */
+               childregs->sp = (unsigned long)(childregs + 1);
+       else
+               /* Otherwise use the given stack */
+               childregs->sp = usp;
+
+       /* Set usp/ksp */
+       p->thread.usp = childregs->sp;
+       /* switch_to uses stack to save/restore 14 callee-saved regs */
+       thread_saved_ksp(p) = (unsigned long)childregs - 8;
+       p->thread.pc = (unsigned int) ret_from_fork;
+       p->thread.wchan = (unsigned long) ret_from_fork;
+#ifdef __DSBT__
+       {
+               unsigned long dp;
+
+               asm volatile ("mv .S2 b14,%0\n" : "=b"(dp));
+
+               thread_saved_dp(p) = dp;
+               if (usp == -1)
+                       childregs->dp = dp;
+       }
+#endif
+       return 0;
+}
+
+/*
+ * c6x_execve() executes a new program.
+ */
+SYSCALL_DEFINE4(c6x_execve, const char __user *, name,
+               const char __user *const __user *, argv,
+               const char __user *const __user *, envp,
+               struct pt_regs *, regs)
+{
+       int error;
+       char *filename;
+
+       filename = getname(name);
+       error = PTR_ERR(filename);
+       if (IS_ERR(filename))
+               goto out;
+
+       error = do_execve(filename, argv, envp, regs);
+       putname(filename);
+out:
+       return error;
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+       return p->thread.wchan;
+}
diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c
new file mode 100644 (file)
index 0000000..3c494e8
--- /dev/null
@@ -0,0 +1,187 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  Updated for 2.6.34: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <linux/ptrace.h>
+#include <linux/tracehook.h>
+#include <linux/regset.h>
+#include <linux/elf.h>
+
+#include <asm/cacheflush.h>
+
+#define PT_REG_SIZE      (sizeof(struct pt_regs))
+
+/*
+ * Called by kernel/ptrace.c when detaching.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+       /* nothing to do */
+}
+
+/*
+ * Get a register number from live pt_regs for the specified task.
+ */
+static inline long get_reg(struct task_struct *task, int regno)
+{
+       long *addr = (long *)task_pt_regs(task);
+
+       if (regno == PT_TSR || regno == PT_CSR)
+               return 0;
+
+       return addr[regno];
+}
+
+/*
+ * Write contents of register REGNO in task TASK.
+ */
+static inline int put_reg(struct task_struct *task,
+                         int regno,
+                         unsigned long data)
+{
+       unsigned long *addr = (unsigned long *)task_pt_regs(task);
+
+       if (regno != PT_TSR && regno != PT_CSR)
+               addr[regno] = data;
+
+       return 0;
+}
+
+/* regset get/set implementations */
+
+static int gpr_get(struct task_struct *target,
+                  const struct user_regset *regset,
+                  unsigned int pos, unsigned int count,
+                  void *kbuf, void __user *ubuf)
+{
+       struct pt_regs *regs = task_pt_regs(target);
+
+       return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                  regs,
+                                  0, sizeof(*regs));
+}
+
+static int gpr_set(struct task_struct *target,
+                  const struct user_regset *regset,
+                  unsigned int pos, unsigned int count,
+                  const void *kbuf, const void __user *ubuf)
+{
+       int ret;
+       struct pt_regs *regs = task_pt_regs(target);
+
+       /* Don't copyin TSR or CSR */
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &regs,
+                                0, PT_TSR * sizeof(long));
+       if (ret)
+               return ret;
+
+       ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+                                       PT_TSR * sizeof(long),
+                                       (PT_TSR + 1) * sizeof(long));
+       if (ret)
+               return ret;
+
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &regs,
+                                (PT_TSR + 1) * sizeof(long),
+                                PT_CSR * sizeof(long));
+       if (ret)
+               return ret;
+
+       ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+                                       PT_CSR * sizeof(long),
+                                       (PT_CSR + 1) * sizeof(long));
+       if (ret)
+               return ret;
+
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &regs,
+                                (PT_CSR + 1) * sizeof(long), -1);
+       return ret;
+}
+
+enum c6x_regset {
+       REGSET_GPR,
+};
+
+static const struct user_regset c6x_regsets[] = {
+       [REGSET_GPR] = {
+               .core_note_type = NT_PRSTATUS,
+               .n = ELF_NGREG,
+               .size = sizeof(u32),
+               .align = sizeof(u32),
+               .get = gpr_get,
+               .set = gpr_set
+       },
+};
+
+static const struct user_regset_view user_c6x_native_view = {
+       .name           = "tic6x",
+       .e_machine      = EM_TI_C6000,
+       .regsets        = c6x_regsets,
+       .n              = ARRAY_SIZE(c6x_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+       return &user_c6x_native_view;
+}
+
+/*
+ * Perform ptrace request
+ */
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
+{
+       int ret = 0;
+
+       switch (request) {
+               /*
+                * write the word at location addr.
+                */
+       case PTRACE_POKETEXT:
+               ret = generic_ptrace_pokedata(child, addr, data);
+               if (ret == 0 && request == PTRACE_POKETEXT)
+                       flush_icache_range(addr, addr + 4);
+               break;
+       default:
+               ret = ptrace_request(child, request, addr, data);
+               break;
+       }
+
+       return ret;
+}
+
+/*
+ * handle tracing of system call entry
+ * - return the revised system call number or ULONG_MAX to cause ENOSYS
+ */
+asmlinkage unsigned long syscall_trace_entry(struct pt_regs *regs)
+{
+       if (tracehook_report_syscall_entry(regs))
+               /* tracing decided this syscall should not happen, so
+                * We'll return a bogus call number to get an ENOSYS
+                * error, but leave the original number in
+                * regs->orig_a4
+                */
+               return ULONG_MAX;
+
+       return regs->b0;
+}
+
+/*
+ * handle tracing of system call exit
+ */
+asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+{
+       tracehook_report_syscall_exit(regs, 0);
+}
diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c
new file mode 100644 (file)
index 0000000..0c07921
--- /dev/null
@@ -0,0 +1,510 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/memblock.h>
+#include <linux/seq_file.h>
+#include <linux/bootmem.h>
+#include <linux/clkdev.h>
+#include <linux/initrd.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_fdt.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/cache.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/fs.h>
+#include <linux/of.h>
+
+
+#include <asm/sections.h>
+#include <asm/div64.h>
+#include <asm/setup.h>
+#include <asm/dscr.h>
+#include <asm/clock.h>
+#include <asm/soc.h>
+
+static const char *c6x_soc_name;
+
+int c6x_num_cores;
+EXPORT_SYMBOL_GPL(c6x_num_cores);
+
+unsigned int c6x_silicon_rev;
+EXPORT_SYMBOL_GPL(c6x_silicon_rev);
+
+/*
+ * Device status register. This holds information
+ * about device configuration needed by some drivers.
+ */
+unsigned int c6x_devstat;
+EXPORT_SYMBOL_GPL(c6x_devstat);
+
+/*
+ * Some SoCs have fuse registers holding a unique MAC
+ * address. This is parsed out of the device tree with
+ * the resulting MAC being held here.
+ */
+unsigned char c6x_fuse_mac[6];
+
+unsigned long memory_start;
+unsigned long memory_end;
+
+unsigned long ram_start;
+unsigned long ram_end;
+
+/* Uncached memory for DMA consistent use (memdma=) */
+static unsigned long dma_start __initdata;
+static unsigned long dma_size __initdata;
+
+char c6x_command_line[COMMAND_LINE_SIZE];
+
+#if defined(CONFIG_CMDLINE_BOOL)
+static const char default_command_line[COMMAND_LINE_SIZE] __section(.cmdline) =
+       CONFIG_CMDLINE;
+#endif
+
+struct cpuinfo_c6x {
+       const char *cpu_name;
+       const char *cpu_voltage;
+       const char *mmu;
+       const char *fpu;
+       char *cpu_rev;
+       unsigned int core_id;
+       char __cpu_rev[5];
+};
+
+static DEFINE_PER_CPU(struct cpuinfo_c6x, cpu_data);
+
+unsigned int ticks_per_ns_scaled;
+EXPORT_SYMBOL(ticks_per_ns_scaled);
+
+unsigned int c6x_core_freq;
+
+static void __init get_cpuinfo(void)
+{
+       unsigned cpu_id, rev_id, csr;
+       struct clk *coreclk = clk_get_sys(NULL, "core");
+       unsigned long core_khz;
+       u64 tmp;
+       struct cpuinfo_c6x *p;
+       struct device_node *node, *np;
+
+       p = &per_cpu(cpu_data, smp_processor_id());
+
+       if (!IS_ERR(coreclk))
+               c6x_core_freq = clk_get_rate(coreclk);
+       else {
+               printk(KERN_WARNING
+                      "Cannot find core clock frequency. Using 700MHz\n");
+               c6x_core_freq = 700000000;
+       }
+
+       core_khz = c6x_core_freq / 1000;
+
+       tmp = (uint64_t)core_khz << C6X_NDELAY_SCALE;
+       do_div(tmp, 1000000);
+       ticks_per_ns_scaled = tmp;
+
+       csr = get_creg(CSR);
+       cpu_id = csr >> 24;
+       rev_id = (csr >> 16) & 0xff;
+
+       p->mmu = "none";
+       p->fpu = "none";
+       p->cpu_voltage = "unknown";
+
+       switch (cpu_id) {
+       case 0:
+               p->cpu_name = "C67x";
+               p->fpu = "yes";
+               break;
+       case 2:
+               p->cpu_name = "C62x";
+               break;
+       case 8:
+               p->cpu_name = "C64x";
+               break;
+       case 12:
+               p->cpu_name = "C64x";
+               break;
+       case 16:
+               p->cpu_name = "C64x+";
+               p->cpu_voltage = "1.2";
+               break;
+       default:
+               p->cpu_name = "unknown";
+               break;
+       }
+
+       if (cpu_id < 16) {
+               switch (rev_id) {
+               case 0x1:
+                       if (cpu_id > 8) {
+                               p->cpu_rev = "DM640/DM641/DM642/DM643";
+                               p->cpu_voltage = "1.2 - 1.4";
+                       } else {
+                               p->cpu_rev = "C6201";
+                               p->cpu_voltage = "2.5";
+                       }
+                       break;
+               case 0x2:
+                       p->cpu_rev = "C6201B/C6202/C6211";
+                       p->cpu_voltage = "1.8";
+                       break;
+               case 0x3:
+                       p->cpu_rev = "C6202B/C6203/C6204/C6205";
+                       p->cpu_voltage = "1.5";
+                       break;
+               case 0x201:
+                       p->cpu_rev = "C6701 revision 0 (early CPU)";
+                       p->cpu_voltage = "1.8";
+                       break;
+               case 0x202:
+                       p->cpu_rev = "C6701/C6711/C6712";
+                       p->cpu_voltage = "1.8";
+                       break;
+               case 0x801:
+                       p->cpu_rev = "C64x";
+                       p->cpu_voltage = "1.5";
+                       break;
+               default:
+                       p->cpu_rev = "unknown";
+               }
+       } else {
+               p->cpu_rev = p->__cpu_rev;
+               snprintf(p->__cpu_rev, sizeof(p->__cpu_rev), "0x%x", cpu_id);
+       }
+
+       p->core_id = get_coreid();
+
+       node = of_find_node_by_name(NULL, "cpus");
+       if (node) {
+               for_each_child_of_node(node, np)
+                       if (!strcmp("cpu", np->name))
+                               ++c6x_num_cores;
+               of_node_put(node);
+       }
+
+       node = of_find_node_by_name(NULL, "soc");
+       if (node) {
+               if (of_property_read_string(node, "model", &c6x_soc_name))
+                       c6x_soc_name = "unknown";
+               of_node_put(node);
+       } else
+               c6x_soc_name = "unknown";
+
+       printk(KERN_INFO "CPU%d: %s rev %s, %s volts, %uMHz\n",
+              p->core_id, p->cpu_name, p->cpu_rev,
+              p->cpu_voltage, c6x_core_freq / 1000000);
+}
+
+/*
+ * Early parsing of the command line
+ */
+static u32 mem_size __initdata;
+
+/* "mem=" parsing. */
+static int __init early_mem(char *p)
+{
+       if (!p)
+               return -EINVAL;
+
+       mem_size = memparse(p, &p);
+       /* don't remove all of memory when handling "mem={invalid}" */
+       if (mem_size == 0)
+               return -EINVAL;
+
+       return 0;
+}
+early_param("mem", early_mem);
+
+/* "memdma=<size>[@<address>]" parsing. */
+static int __init early_memdma(char *p)
+{
+       if (!p)
+               return -EINVAL;
+
+       dma_size = memparse(p, &p);
+       if (*p == '@')
+               dma_start = memparse(p, &p);
+
+       return 0;
+}
+early_param("memdma", early_memdma);
+
+int __init c6x_add_memory(phys_addr_t start, unsigned long size)
+{
+       static int ram_found __initdata;
+
+       /* We only handle one bank (the one with PAGE_OFFSET) for now */
+       if (ram_found)
+               return -EINVAL;
+
+       if (start > PAGE_OFFSET || PAGE_OFFSET >= (start + size))
+               return 0;
+
+       ram_start = start;
+       ram_end = start + size;
+
+       ram_found = 1;
+       return 0;
+}
+
+/*
+ * Do early machine setup and device tree parsing. This is called very
+ * early on the boot process.
+ */
+notrace void __init machine_init(unsigned long dt_ptr)
+{
+       struct boot_param_header *dtb = __va(dt_ptr);
+       struct boot_param_header *fdt = (struct boot_param_header *)_fdt_start;
+
+       /* interrupts must be masked */
+       set_creg(IER, 2);
+
+       /*
+        * Set the Interrupt Service Table (IST) to the beginning of the
+        * vector table.
+        */
+       set_ist(_vectors_start);
+
+       lockdep_init();
+
+       /*
+        * dtb is passed in from bootloader.
+        * fdt is linked in blob.
+        */
+       if (dtb && dtb != fdt)
+               fdt = dtb;
+
+       /* Do some early initialization based on the flat device tree */
+       early_init_devtree(fdt);
+
+       /* parse_early_param needs a boot_command_line */
+       strlcpy(boot_command_line, c6x_command_line, COMMAND_LINE_SIZE);
+       parse_early_param();
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+       int bootmap_size;
+       struct memblock_region *reg;
+
+       printk(KERN_INFO "Initializing kernel\n");
+
+       /* Initialize command line */
+       *cmdline_p = c6x_command_line;
+
+       memory_end = ram_end;
+       memory_end &= ~(PAGE_SIZE - 1);
+
+       if (mem_size && (PAGE_OFFSET + PAGE_ALIGN(mem_size)) < memory_end)
+               memory_end = PAGE_OFFSET + PAGE_ALIGN(mem_size);
+
+       /* add block that this kernel can use */
+       memblock_add(PAGE_OFFSET, memory_end - PAGE_OFFSET);
+
+       /* reserve kernel text/data/bss */
+       memblock_reserve(PAGE_OFFSET,
+                        PAGE_ALIGN((unsigned long)&_end - PAGE_OFFSET));
+
+       if (dma_size) {
+               /* align to cacheability granularity */
+               dma_size = CACHE_REGION_END(dma_size);
+
+               if (!dma_start)
+                       dma_start = memory_end - dma_size;
+
+               /* align to cacheability granularity */
+               dma_start = CACHE_REGION_START(dma_start);
+
+               /* reserve DMA memory taken from kernel memory */
+               if (memblock_is_region_memory(dma_start, dma_size))
+                       memblock_reserve(dma_start, dma_size);
+       }
+
+       memory_start = PAGE_ALIGN((unsigned int) &_end);
+
+       printk(KERN_INFO "Memory Start=%08lx, Memory End=%08lx\n",
+              memory_start, memory_end);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+       /*
+        * Reserve initrd memory if in kernel memory.
+        */
+       if (initrd_start < initrd_end)
+               if (memblock_is_region_memory(initrd_start,
+                                             initrd_end - initrd_start))
+                       memblock_reserve(initrd_start,
+                                        initrd_end - initrd_start);
+#endif
+
+       init_mm.start_code = (unsigned long) &_stext;
+       init_mm.end_code   = (unsigned long) &_etext;
+       init_mm.end_data   = memory_start;
+       init_mm.brk        = memory_start;
+
+       /*
+        * Give all the memory to the bootmap allocator,  tell it to put the
+        * boot mem_map at the start of memory
+        */
+       bootmap_size = init_bootmem_node(NODE_DATA(0),
+                                        memory_start >> PAGE_SHIFT,
+                                        PAGE_OFFSET >> PAGE_SHIFT,
+                                        memory_end >> PAGE_SHIFT);
+       memblock_reserve(memory_start, bootmap_size);
+
+       unflatten_device_tree();
+
+       c6x_cache_init();
+
+       /* Set the whole external memory as non-cacheable */
+       disable_caching(ram_start, ram_end - 1);
+
+       /* Set caching of external RAM used by Linux */
+       for_each_memblock(memory, reg)
+               enable_caching(CACHE_REGION_START(reg->base),
+                              CACHE_REGION_START(reg->base + reg->size - 1));
+
+#ifdef CONFIG_BLK_DEV_INITRD
+       /*
+        * Enable caching for initrd which falls outside kernel memory.
+        */
+       if (initrd_start < initrd_end) {
+               if (!memblock_is_region_memory(initrd_start,
+                                              initrd_end - initrd_start))
+                       enable_caching(CACHE_REGION_START(initrd_start),
+                                      CACHE_REGION_START(initrd_end - 1));
+       }
+#endif
+
+       /*
+        * Disable caching for dma coherent memory taken from kernel memory.
+        */
+       if (dma_size && memblock_is_region_memory(dma_start, dma_size))
+               disable_caching(dma_start,
+                               CACHE_REGION_START(dma_start + dma_size - 1));
+
+       /* Initialize the coherent memory allocator */
+       coherent_mem_init(dma_start, dma_size);
+
+       /*
+        * Free all memory as a starting point.
+        */
+       free_bootmem(PAGE_OFFSET, memory_end - PAGE_OFFSET);
+
+       /*
+        * Then reserve memory which is already being used.
+        */
+       for_each_memblock(reserved, reg) {
+               pr_debug("reserved - 0x%08x-0x%08x\n",
+                        (u32) reg->base, (u32) reg->size);
+               reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
+       }
+
+       max_low_pfn = PFN_DOWN(memory_end);
+       min_low_pfn = PFN_UP(memory_start);
+       max_mapnr = max_low_pfn - min_low_pfn;
+
+       /* Get kmalloc into gear */
+       paging_init();
+
+       /*
+        * Probe for Device State Configuration Registers.
+        * We have to do this early in case timer needs to be enabled
+        * through DSCR.
+        */
+       dscr_probe();
+
+       /* We do this early for timer and core clock frequency */
+       c64x_setup_clocks();
+
+       /* Get CPU info */
+       get_cpuinfo();
+
+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
+       conswitchp = &dummy_con;
+#endif
+}
+
+#define cpu_to_ptr(n) ((void *)((long)(n)+1))
+#define ptr_to_cpu(p) ((long)(p) - 1)
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+       int n = ptr_to_cpu(v);
+       struct cpuinfo_c6x *p = &per_cpu(cpu_data, n);
+
+       if (n == 0) {
+               seq_printf(m,
+                          "soc\t\t: %s\n"
+                          "soc revision\t: 0x%x\n"
+                          "soc cores\t: %d\n",
+                          c6x_soc_name, c6x_silicon_rev, c6x_num_cores);
+       }
+
+       seq_printf(m,
+                  "\n"
+                  "processor\t: %d\n"
+                  "cpu\t\t: %s\n"
+                  "core revision\t: %s\n"
+                  "core voltage\t: %s\n"
+                  "core id\t\t: %d\n"
+                  "mmu\t\t: %s\n"
+                  "fpu\t\t: %s\n"
+                  "cpu MHz\t\t: %u\n"
+                  "bogomips\t: %lu.%02lu\n\n",
+                  n,
+                  p->cpu_name, p->cpu_rev, p->cpu_voltage,
+                  p->core_id, p->mmu, p->fpu,
+                  (c6x_core_freq + 500000) / 1000000,
+                  (loops_per_jiffy/(500000/HZ)),
+                  (loops_per_jiffy/(5000/HZ))%100);
+
+       return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+       return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
+}
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       ++*pos;
+       return NULL;
+}
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+       c_start,
+       c_stop,
+       c_next,
+       show_cpuinfo
+};
+
+static struct cpu cpu_devices[NR_CPUS];
+
+static int __init topology_init(void)
+{
+       int i;
+
+       for_each_present_cpu(i)
+               register_cpu(&cpu_devices[i], i);
+
+       return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c
new file mode 100644 (file)
index 0000000..304f675
--- /dev/null
@@ -0,0 +1,377 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  Updated for 2.6.34: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/tracehook.h>
+
+#include <asm/ucontext.h>
+#include <asm/cacheflush.h>
+
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+/*
+ * Do a signal return, undo the signal stack.
+ */
+
+#define RETCODE_SIZE (9 << 2)  /* 9 instructions = 36 bytes */
+
+struct rt_sigframe {
+       struct siginfo __user *pinfo;
+       void __user *puc;
+       struct siginfo info;
+       struct ucontext uc;
+       unsigned long retcode[RETCODE_SIZE >> 2];
+};
+
+static int restore_sigcontext(struct pt_regs *regs,
+                             struct sigcontext __user *sc)
+{
+       int err = 0;
+
+       /* The access_ok check was done by caller, so use __get_user here */
+#define COPY(x)  (err |= __get_user(regs->x, &sc->sc_##x))
+
+       COPY(sp); COPY(a4); COPY(b4); COPY(a6); COPY(b6); COPY(a8); COPY(b8);
+       COPY(a0); COPY(a1); COPY(a2); COPY(a3); COPY(a5); COPY(a7); COPY(a9);
+       COPY(b0); COPY(b1); COPY(b2); COPY(b3); COPY(b5); COPY(b7); COPY(b9);
+
+       COPY(a16); COPY(a17); COPY(a18); COPY(a19);
+       COPY(a20); COPY(a21); COPY(a22); COPY(a23);
+       COPY(a24); COPY(a25); COPY(a26); COPY(a27);
+       COPY(a28); COPY(a29); COPY(a30); COPY(a31);
+       COPY(b16); COPY(b17); COPY(b18); COPY(b19);
+       COPY(b20); COPY(b21); COPY(b22); COPY(b23);
+       COPY(b24); COPY(b25); COPY(b26); COPY(b27);
+       COPY(b28); COPY(b29); COPY(b30); COPY(b31);
+
+       COPY(csr); COPY(pc);
+
+#undef COPY
+
+       return err;
+}
+
+asmlinkage int do_rt_sigreturn(struct pt_regs *regs)
+{
+       struct rt_sigframe __user *frame;
+       sigset_t set;
+
+       /*
+        * Since we stacked the signal on a dword boundary,
+        * 'sp' should be dword aligned here.  If it's
+        * not, then the user is trying to mess with us.
+        */
+       if (regs->sp & 7)
+               goto badframe;
+
+       frame = (struct rt_sigframe __user *) ((unsigned long) regs->sp + 8);
+
+       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+               goto badframe;
+       if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+               goto badframe;
+
+       sigdelsetmask(&set, ~_BLOCKABLE);
+       spin_lock_irq(&current->sighand->siglock);
+       current->blocked = set;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
+               goto badframe;
+
+       return regs->a4;
+
+badframe:
+       force_sig(SIGSEGV, current);
+       return 0;
+}
+
+static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+                           unsigned long mask)
+{
+       int err = 0;
+
+       err |= __put_user(mask, &sc->sc_mask);
+
+       /* The access_ok check was done by caller, so use __put_user here */
+#define COPY(x) (err |= __put_user(regs->x, &sc->sc_##x))
+
+       COPY(sp); COPY(a4); COPY(b4); COPY(a6); COPY(b6); COPY(a8); COPY(b8);
+       COPY(a0); COPY(a1); COPY(a2); COPY(a3); COPY(a5); COPY(a7); COPY(a9);
+       COPY(b0); COPY(b1); COPY(b2); COPY(b3); COPY(b5); COPY(b7); COPY(b9);
+
+       COPY(a16); COPY(a17); COPY(a18); COPY(a19);
+       COPY(a20); COPY(a21); COPY(a22); COPY(a23);
+       COPY(a24); COPY(a25); COPY(a26); COPY(a27);
+       COPY(a28); COPY(a29); COPY(a30); COPY(a31);
+       COPY(b16); COPY(b17); COPY(b18); COPY(b19);
+       COPY(b20); COPY(b21); COPY(b22); COPY(b23);
+       COPY(b24); COPY(b25); COPY(b26); COPY(b27);
+       COPY(b28); COPY(b29); COPY(b30); COPY(b31);
+
+       COPY(csr); COPY(pc);
+
+#undef COPY
+
+       return err;
+}
+
+static inline void __user *get_sigframe(struct k_sigaction *ka,
+                                       struct pt_regs *regs,
+                                       unsigned long framesize)
+{
+       unsigned long sp = regs->sp;
+
+       /*
+        * This is the X/Open sanctioned signal stack switching.
+        */
+       if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags(sp) == 0)
+               sp = current->sas_ss_sp + current->sas_ss_size;
+
+       /*
+        * No matter what happens, 'sp' must be dword
+        * aligned. Otherwise, nasty things will happen
+        */
+       return (void __user *)((sp - framesize) & ~7);
+}
+
+static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
+                          sigset_t *set, struct pt_regs *regs)
+{
+       struct rt_sigframe __user *frame;
+       unsigned long __user *retcode;
+       int err = 0;
+
+       frame = get_sigframe(ka, regs, sizeof(*frame));
+
+       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+               goto segv_and_exit;
+
+       err |= __put_user(&frame->info, &frame->pinfo);
+       err |= __put_user(&frame->uc, &frame->puc);
+       err |= copy_siginfo_to_user(&frame->info, info);
+
+       /* Clear all the bits of the ucontext we don't use.  */
+       err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
+
+       err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
+       err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+       /* Set up to return from userspace */
+       retcode = (unsigned long __user *) &frame->retcode;
+
+       /* The access_ok check was done above, so use __put_user here */
+#define COPY(x) (err |= __put_user(x, retcode++))
+
+       COPY(0x0000002AUL | (__NR_rt_sigreturn << 7));
+                               /* MVK __NR_rt_sigreturn,B0 */
+       COPY(0x10000000UL);     /* SWE */
+       COPY(0x00006000UL);     /* NOP 4 */
+       COPY(0x00006000UL);     /* NOP 4 */
+       COPY(0x00006000UL);     /* NOP 4 */
+       COPY(0x00006000UL);     /* NOP 4 */
+       COPY(0x00006000UL);     /* NOP 4 */
+       COPY(0x00006000UL);     /* NOP 4 */
+       COPY(0x00006000UL);     /* NOP 4 */
+
+#undef COPY
+
+       if (err)
+               goto segv_and_exit;
+
+       flush_icache_range((unsigned long) &frame->retcode,
+                          (unsigned long) &frame->retcode + RETCODE_SIZE);
+
+       retcode = (unsigned long __user *) &frame->retcode;
+
+       /* Change user context to branch to signal handler */
+       regs->sp = (unsigned long) frame - 8;
+       regs->b3 = (unsigned long) retcode;
+       regs->pc = (unsigned long) ka->sa.sa_handler;
+
+       /* Give the signal number to the handler */
+       regs->a4 = signr;
+
+       /*
+        * For realtime signals we must also set the second and third
+        * arguments for the signal handler.
+        *   -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
+        */
+       regs->b4 = (unsigned long)&frame->info;
+       regs->a6 = (unsigned long)&frame->uc;
+
+       return 0;
+
+segv_and_exit:
+       force_sigsegv(signr, current);
+       return -EFAULT;
+}
+
+static inline void
+handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
+{
+       switch (regs->a4) {
+       case -ERESTARTNOHAND:
+               if (!has_handler)
+                       goto do_restart;
+               regs->a4 = -EINTR;
+               break;
+
+       case -ERESTARTSYS:
+               if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
+                       regs->a4 = -EINTR;
+                       break;
+               }
+       /* fallthrough */
+       case -ERESTARTNOINTR:
+do_restart:
+               regs->a4 = regs->orig_a4;
+               regs->pc -= 4;
+               break;
+       }
+}
+
+/*
+ * handle the actual delivery of a signal to userspace
+ */
+static int handle_signal(int sig,
+                        siginfo_t *info, struct k_sigaction *ka,
+                        sigset_t *oldset, struct pt_regs *regs,
+                        int syscall)
+{
+       int ret;
+
+       /* Are we from a system call? */
+       if (syscall) {
+               /* If so, check system call restarting.. */
+               switch (regs->a4) {
+               case -ERESTART_RESTARTBLOCK:
+               case -ERESTARTNOHAND:
+                       regs->a4 = -EINTR;
+                       break;
+
+               case -ERESTARTSYS:
+                       if (!(ka->sa.sa_flags & SA_RESTART)) {
+                               regs->a4 = -EINTR;
+                               break;
+                       }
+
+                       /* fallthrough */
+               case -ERESTARTNOINTR:
+                       regs->a4 = regs->orig_a4;
+                       regs->pc -= 4;
+               }
+       }
+
+       /* Set up the stack frame */
+       ret = setup_rt_frame(sig, ka, info, oldset, regs);
+       if (ret == 0) {
+               spin_lock_irq(&current->sighand->siglock);
+               sigorsets(&current->blocked, &current->blocked,
+                         &ka->sa.sa_mask);
+               if (!(ka->sa.sa_flags & SA_NODEFER))
+                       sigaddset(&current->blocked, sig);
+               recalc_sigpending();
+               spin_unlock_irq(&current->sighand->siglock);
+       }
+
+       return ret;
+}
+
+/*
+ * handle a potential signal
+ */
+static void do_signal(struct pt_regs *regs, int syscall)
+{
+       struct k_sigaction ka;
+       siginfo_t info;
+       sigset_t *oldset;
+       int signr;
+
+       /* we want the common case to go fast, which is why we may in certain
+        * cases get here from kernel mode */
+       if (!user_mode(regs))
+               return;
+
+       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+               oldset = &current->saved_sigmask;
+       else
+               oldset = &current->blocked;
+
+       signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+       if (signr > 0) {
+               if (handle_signal(signr, &info, &ka, oldset,
+                                 regs, syscall) == 0) {
+                       /* a signal was successfully delivered; the saved
+                        * sigmask will have been stored in the signal frame,
+                        * and will be restored by sigreturn, so we can simply
+                        * clear the TIF_RESTORE_SIGMASK flag */
+                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+                               clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+                       tracehook_signal_handler(signr, &info, &ka, regs, 0);
+               }
+
+               return;
+       }
+
+       /* did we come from a system call? */
+       if (syscall) {
+               /* restart the system call - no handlers present */
+               switch (regs->a4) {
+               case -ERESTARTNOHAND:
+               case -ERESTARTSYS:
+               case -ERESTARTNOINTR:
+                       regs->a4 = regs->orig_a4;
+                       regs->pc -= 4;
+                       break;
+
+               case -ERESTART_RESTARTBLOCK:
+                       regs->a4 = regs->orig_a4;
+                       regs->b0 = __NR_restart_syscall;
+                       regs->pc -= 4;
+                       break;
+               }
+       }
+
+       /* if there's no signal to deliver, we just put the saved sigmask
+        * back */
+       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+       }
+}
+
+/*
+ * notification of userspace execution resumption
+ * - triggered by current->work.notify_resume
+ */
+asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags,
+                                int syscall)
+{
+       /* deal with pending signal delivery */
+       if (thread_info_flags & ((1 << TIF_SIGPENDING) |
+                                (1 << TIF_RESTORE_SIGMASK)))
+               do_signal(regs, syscall);
+
+       if (thread_info_flags & (1 << TIF_NOTIFY_RESUME)) {
+               clear_thread_flag(TIF_NOTIFY_RESUME);
+               tracehook_notify_resume(regs);
+               if (current->replacement_session_keyring)
+                       key_replace_session_keyring();
+       }
+}
diff --git a/arch/c6x/kernel/soc.c b/arch/c6x/kernel/soc.c
new file mode 100644 (file)
index 0000000..dd45bc3
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ *  Miscellaneous SoC-specific hooks.
+ *
+ *  Copyright (C) 2011 Texas Instruments Incorporated
+ *  Author: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/etherdevice.h>
+#include <asm/system.h>
+#include <asm/setup.h>
+#include <asm/soc.h>
+
+struct soc_ops soc_ops;
+
+int soc_get_exception(void)
+{
+       if (!soc_ops.get_exception)
+               return -1;
+       return soc_ops.get_exception();
+}
+
+void soc_assert_event(unsigned int evt)
+{
+       if (soc_ops.assert_event)
+               soc_ops.assert_event(evt);
+}
+
+static u8 cmdline_mac[6];
+
+static int __init get_mac_addr_from_cmdline(char *str)
+{
+       int count, i, val;
+
+       for (count = 0; count < 6 && *str; count++, str += 3) {
+               if (!isxdigit(str[0]) || !isxdigit(str[1]))
+                       return 0;
+               if (str[2] != ((count < 5) ? ':' : '\0'))
+                       return 0;
+
+               for (i = 0, val = 0; i < 2; i++) {
+                       val = val << 4;
+                       val |= isdigit(str[i]) ?
+                               str[i] - '0' : toupper(str[i]) - 'A' + 10;
+               }
+               cmdline_mac[count] = val;
+       }
+       return 1;
+}
+__setup("emac_addr=", get_mac_addr_from_cmdline);
+
+/*
+ * Setup the MAC address for SoC ethernet devices.
+ *
+ * Before calling this function, the ethernet driver will have
+ * initialized the addr with local-mac-address from the device
+ * tree (if found). Allow command line to override, but not
+ * the fused address.
+ */
+int soc_mac_addr(unsigned int index, u8 *addr)
+{
+       int i, have_dt_mac = 0, have_cmdline_mac = 0, have_fuse_mac = 0;
+
+       for (i = 0; i < 6; i++) {
+               if (cmdline_mac[i])
+                       have_cmdline_mac = 1;
+               if (c6x_fuse_mac[i])
+                       have_fuse_mac = 1;
+               if (addr[i])
+                       have_dt_mac = 1;
+       }
+
+       /* cmdline overrides all */
+       if (have_cmdline_mac)
+               memcpy(addr, cmdline_mac, 6);
+       else if (!have_dt_mac) {
+               if (have_fuse_mac)
+                       memcpy(addr, c6x_fuse_mac, 6);
+               else
+                       random_ether_addr(addr);
+       }
+
+       /* adjust for specific EMAC device */
+       addr[5] += index * c6x_num_cores;
+       return 1;
+}
+EXPORT_SYMBOL_GPL(soc_mac_addr);
diff --git a/arch/c6x/kernel/switch_to.S b/arch/c6x/kernel/switch_to.S
new file mode 100644 (file)
index 0000000..09177ed
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ *  Copyright (C) 2011 Texas Instruments Incorporated
+ *  Author: Mark Salter (msalter@redhat.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+
+#define SP     B15
+
+       /*
+        * void __switch_to(struct thread_info *prev,
+        *                  struct thread_info *next,
+        *                  struct task_struct *tsk) ;
+        */
+ENTRY(__switch_to)
+       LDDW    .D2T2   *+B4(THREAD_B15_14),B7:B6
+ ||    MV      .L2X    A4,B5   ; prev
+ ||    MV      .L1X    B4,A5   ; next
+ ||    MVC     .S2     RILC,B1
+
+       STW     .D2T2   B3,*+B5(THREAD_PC)
+ ||    STDW    .D1T1   A13:A12,*+A4(THREAD_A13_12)
+ ||    MVC     .S2     ILC,B0
+
+       LDW     .D2T2   *+B4(THREAD_PC),B3
+ ||    LDDW    .D1T1   *+A5(THREAD_A13_12),A13:A12
+
+       STDW    .D1T1   A11:A10,*+A4(THREAD_A11_10)
+ ||    STDW    .D2T2   B1:B0,*+B5(THREAD_RICL_ICL)
+#ifndef __DSBT__
+ ||    MVKL    .S2     current_ksp,B1
+#endif
+
+       STDW    .D2T2   B15:B14,*+B5(THREAD_B15_14)
+ ||    STDW    .D1T1   A15:A14,*+A4(THREAD_A15_14)
+#ifndef __DSBT__
+ ||    MVKH    .S2     current_ksp,B1
+#endif
+
+       ;; Switch to next SP
+       MV      .S2     B7,SP
+#ifdef __DSBT__
+ ||    STW     .D2T2   B7,*+B14(current_ksp)
+#else
+ ||    STW     .D2T2   B7,*B1
+ ||    MV      .L2     B6,B14
+#endif
+ ||    LDDW    .D1T1   *+A5(THREAD_RICL_ICL),A1:A0
+
+       STDW    .D2T2   B11:B10,*+B5(THREAD_B11_10)
+ ||    LDDW    .D1T1   *+A5(THREAD_A15_14),A15:A14
+
+       STDW    .D2T2   B13:B12,*+B5(THREAD_B13_12)
+ ||    LDDW    .D1T1   *+A5(THREAD_A11_10),A11:A10
+
+       B       .S2     B3              ; return in next E1
+ ||    LDDW    .D2T2   *+B4(THREAD_B13_12),B13:B12
+
+       LDDW    .D2T2   *+B4(THREAD_B11_10),B11:B10
+       NOP
+
+       MV      .L2X    A0,B0
+ ||    MV      .S1     A6,A4
+
+       MVC     .S2     B0,ILC
+ ||    MV      .L2X    A1,B1
+
+       MVC     .S2     B1,RILC
+ENDPROC(__switch_to)
diff --git a/arch/c6x/kernel/sys_c6x.c b/arch/c6x/kernel/sys_c6x.c
new file mode 100644 (file)
index 0000000..3e9bdfb
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+#include <asm/syscalls.h>
+
+#ifdef CONFIG_ACCESS_CHECK
+int _access_ok(unsigned long addr, unsigned long size)
+{
+       if (!size)
+               return 1;
+
+       if (!addr || addr > (0xffffffffUL - (size - 1)))
+               goto _bad_access;
+
+       if (segment_eq(get_fs(), KERNEL_DS))
+               return 1;
+
+       if (memory_start <= addr && (addr + size - 1) < memory_end)
+               return 1;
+
+_bad_access:
+       pr_debug("Bad access attempt: pid[%d] addr[%08lx] size[0x%lx]\n",
+                current->pid, addr, size);
+       return 0;
+}
+EXPORT_SYMBOL(_access_ok);
+#endif
+
+/* sys_cache_sync -- sync caches over given range */
+asmlinkage int sys_cache_sync(unsigned long s, unsigned long e)
+{
+       L1D_cache_block_writeback_invalidate(s, e);
+       L1P_cache_block_invalidate(s, e);
+
+       return 0;
+}
+
+/* Provide the actual syscall number to call mapping. */
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+/*
+ * Use trampolines
+ */
+#define sys_pread64            sys_pread_c6x
+#define sys_pwrite64           sys_pwrite_c6x
+#define sys_truncate64         sys_truncate64_c6x
+#define sys_ftruncate64                sys_ftruncate64_c6x
+#define sys_fadvise64          sys_fadvise64_c6x
+#define sys_fadvise64_64       sys_fadvise64_64_c6x
+#define sys_fallocate          sys_fallocate_c6x
+
+/* Use sys_mmap_pgoff directly */
+#define sys_mmap2 sys_mmap_pgoff
+
+/*
+ * Note that we can't include <linux/unistd.h> here since the header
+ * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
+ */
+void *sys_call_table[__NR_syscalls] = {
+       [0 ... __NR_syscalls-1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
new file mode 100644 (file)
index 0000000..4c9f136
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clocksource.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/profile.h>
+
+#include <asm/timer64.h>
+
+static u32 sched_clock_multiplier;
+#define SCHED_CLOCK_SHIFT 16
+
+static cycle_t tsc_read(struct clocksource *cs)
+{
+       return get_cycles();
+}
+
+static struct clocksource clocksource_tsc = {
+       .name           = "timestamp",
+       .rating         = 300,
+       .read           = tsc_read,
+       .mask           = CLOCKSOURCE_MASK(64),
+       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+/*
+ * scheduler clock - returns current time in nanoseconds.
+ */
+u64 sched_clock(void)
+{
+       u64 tsc = get_cycles();
+
+       return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
+}
+
+void time_init(void)
+{
+       u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
+
+       do_div(tmp, c6x_core_freq);
+       sched_clock_multiplier = tmp;
+
+       clocksource_register_hz(&clocksource_tsc, c6x_core_freq);
+
+       /* write anything into TSCL to enable counting */
+       set_creg(TSCL, 0);
+
+       /* probe for timer64 event timer */
+       timer64_init();
+}
diff --git a/arch/c6x/kernel/traps.c b/arch/c6x/kernel/traps.c
new file mode 100644 (file)
index 0000000..f50e3ed
--- /dev/null
@@ -0,0 +1,423 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/kallsyms.h>
+#include <linux/bug.h>
+
+#include <asm/soc.h>
+#include <asm/traps.h>
+
+int (*c6x_nmi_handler)(struct pt_regs *regs);
+
+void __init trap_init(void)
+{
+       ack_exception(EXCEPT_TYPE_NXF);
+       ack_exception(EXCEPT_TYPE_EXC);
+       ack_exception(EXCEPT_TYPE_IXF);
+       ack_exception(EXCEPT_TYPE_SXF);
+       enable_exception();
+}
+
+void show_regs(struct pt_regs *regs)
+{
+       pr_err("\n");
+       pr_err("PC: %08lx SP: %08lx\n", regs->pc, regs->sp);
+       pr_err("Status: %08lx ORIG_A4: %08lx\n", regs->csr, regs->orig_a4);
+       pr_err("A0: %08lx  B0: %08lx\n", regs->a0, regs->b0);
+       pr_err("A1: %08lx  B1: %08lx\n", regs->a1, regs->b1);
+       pr_err("A2: %08lx  B2: %08lx\n", regs->a2, regs->b2);
+       pr_err("A3: %08lx  B3: %08lx\n", regs->a3, regs->b3);
+       pr_err("A4: %08lx  B4: %08lx\n", regs->a4, regs->b4);
+       pr_err("A5: %08lx  B5: %08lx\n", regs->a5, regs->b5);
+       pr_err("A6: %08lx  B6: %08lx\n", regs->a6, regs->b6);
+       pr_err("A7: %08lx  B7: %08lx\n", regs->a7, regs->b7);
+       pr_err("A8: %08lx  B8: %08lx\n", regs->a8, regs->b8);
+       pr_err("A9: %08lx  B9: %08lx\n", regs->a9, regs->b9);
+       pr_err("A10: %08lx  B10: %08lx\n", regs->a10, regs->b10);
+       pr_err("A11: %08lx  B11: %08lx\n", regs->a11, regs->b11);
+       pr_err("A12: %08lx  B12: %08lx\n", regs->a12, regs->b12);
+       pr_err("A13: %08lx  B13: %08lx\n", regs->a13, regs->b13);
+       pr_err("A14: %08lx  B14: %08lx\n", regs->a14, regs->dp);
+       pr_err("A15: %08lx  B15: %08lx\n", regs->a15, regs->sp);
+       pr_err("A16: %08lx  B16: %08lx\n", regs->a16, regs->b16);
+       pr_err("A17: %08lx  B17: %08lx\n", regs->a17, regs->b17);
+       pr_err("A18: %08lx  B18: %08lx\n", regs->a18, regs->b18);
+       pr_err("A19: %08lx  B19: %08lx\n", regs->a19, regs->b19);
+       pr_err("A20: %08lx  B20: %08lx\n", regs->a20, regs->b20);
+       pr_err("A21: %08lx  B21: %08lx\n", regs->a21, regs->b21);
+       pr_err("A22: %08lx  B22: %08lx\n", regs->a22, regs->b22);
+       pr_err("A23: %08lx  B23: %08lx\n", regs->a23, regs->b23);
+       pr_err("A24: %08lx  B24: %08lx\n", regs->a24, regs->b24);
+       pr_err("A25: %08lx  B25: %08lx\n", regs->a25, regs->b25);
+       pr_err("A26: %08lx  B26: %08lx\n", regs->a26, regs->b26);
+       pr_err("A27: %08lx  B27: %08lx\n", regs->a27, regs->b27);
+       pr_err("A28: %08lx  B28: %08lx\n", regs->a28, regs->b28);
+       pr_err("A29: %08lx  B29: %08lx\n", regs->a29, regs->b29);
+       pr_err("A30: %08lx  B30: %08lx\n", regs->a30, regs->b30);
+       pr_err("A31: %08lx  B31: %08lx\n", regs->a31, regs->b31);
+}
+
+void dump_stack(void)
+{
+       unsigned long stack;
+
+       show_stack(current, &stack);
+}
+EXPORT_SYMBOL(dump_stack);
+
+
+void die(char *str, struct pt_regs *fp, int nr)
+{
+       console_verbose();
+       pr_err("%s: %08x\n", str, nr);
+       show_regs(fp);
+
+       pr_err("Process %s (pid: %d, stackpage=%08lx)\n",
+              current->comm, current->pid, (PAGE_SIZE +
+                                            (unsigned long) current));
+
+       dump_stack();
+       while (1)
+               ;
+}
+
+static void die_if_kernel(char *str, struct pt_regs *fp, int nr)
+{
+       if (user_mode(fp))
+               return;
+
+       die(str, fp, nr);
+}
+
+
+/* Internal exceptions */
+static struct exception_info iexcept_table[10] = {
+       { "Oops - instruction fetch", SIGBUS, BUS_ADRERR },
+       { "Oops - fetch packet", SIGBUS, BUS_ADRERR },
+       { "Oops - execute packet", SIGILL, ILL_ILLOPC },
+       { "Oops - undefined instruction", SIGILL, ILL_ILLOPC },
+       { "Oops - resource conflict", SIGILL, ILL_ILLOPC },
+       { "Oops - resource access", SIGILL, ILL_PRVREG },
+       { "Oops - privilege", SIGILL, ILL_PRVOPC },
+       { "Oops - loops buffer", SIGILL, ILL_ILLOPC },
+       { "Oops - software exception", SIGILL, ILL_ILLTRP },
+       { "Oops - unknown exception", SIGILL, ILL_ILLOPC }
+};
+
+/* External exceptions */
+static struct exception_info eexcept_table[128] = {
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - external exception", SIGBUS, BUS_ADRERR },
+       { "Oops - CPU memory protection fault", SIGSEGV, SEGV_ACCERR },
+       { "Oops - CPU memory protection fault in L1P", SIGSEGV, SEGV_ACCERR },
+       { "Oops - DMA memory protection fault in L1P", SIGSEGV, SEGV_ACCERR },
+       { "Oops - CPU memory protection fault in L1D", SIGSEGV, SEGV_ACCERR },
+       { "Oops - DMA memory protection fault in L1D", SIGSEGV, SEGV_ACCERR },
+       { "Oops - CPU memory protection fault in L2", SIGSEGV, SEGV_ACCERR },
+       { "Oops - DMA memory protection fault in L2", SIGSEGV, SEGV_ACCERR },
+       { "Oops - EMC CPU memory protection fault", SIGSEGV, SEGV_ACCERR },
+       { "Oops - EMC bus error", SIGBUS, BUS_ADRERR }
+};
+
+static void do_trap(struct exception_info *except_info, struct pt_regs *regs)
+{
+       unsigned long addr = instruction_pointer(regs);
+       siginfo_t info;
+
+       if (except_info->code != TRAP_BRKPT)
+               pr_err("TRAP: %s PC[0x%lx] signo[%d] code[%d]\n",
+                      except_info->kernel_str, regs->pc,
+                      except_info->signo, except_info->code);
+
+       die_if_kernel(except_info->kernel_str, regs, addr);
+
+       info.si_signo = except_info->signo;
+       info.si_errno = 0;
+       info.si_code  = except_info->code;
+       info.si_addr  = (void __user *)addr;
+
+       force_sig_info(except_info->signo, &info, current);
+}
+
+/*
+ * Process an internal exception (non maskable)
+ */
+static int process_iexcept(struct pt_regs *regs)
+{
+       unsigned int iexcept_report = get_iexcept();
+       unsigned int iexcept_num;
+
+       ack_exception(EXCEPT_TYPE_IXF);
+
+       pr_err("IEXCEPT: PC[0x%lx]\n", regs->pc);
+
+       while (iexcept_report) {
+               iexcept_num = __ffs(iexcept_report);
+               iexcept_report &= ~(1 << iexcept_num);
+               set_iexcept(iexcept_report);
+               if (*(unsigned int *)regs->pc == BKPT_OPCODE) {
+                       /* This is a breakpoint */
+                       struct exception_info bkpt_exception = {
+                               "Oops - undefined instruction",
+                                 SIGTRAP, TRAP_BRKPT
+                       };
+                       do_trap(&bkpt_exception, regs);
+                       iexcept_report &= ~(0xFF);
+                       set_iexcept(iexcept_report);
+                       continue;
+               }
+
+               do_trap(&iexcept_table[iexcept_num], regs);
+       }
+       return 0;
+}
+
+/*
+ * Process an external exception (maskable)
+ */
+static void process_eexcept(struct pt_regs *regs)
+{
+       int evt;
+
+       pr_err("EEXCEPT: PC[0x%lx]\n", regs->pc);
+
+       while ((evt = soc_get_exception()) >= 0)
+               do_trap(&eexcept_table[evt], regs);
+
+       ack_exception(EXCEPT_TYPE_EXC);
+}
+
+/*
+ * Main exception processing
+ */
+asmlinkage int process_exception(struct pt_regs *regs)
+{
+       unsigned int type;
+       unsigned int type_num;
+       unsigned int ie_num = 9; /* default is unknown exception */
+
+       while ((type = get_except_type()) != 0) {
+               type_num = fls(type) - 1;
+
+               switch (type_num) {
+               case EXCEPT_TYPE_NXF:
+                       ack_exception(EXCEPT_TYPE_NXF);
+                       if (c6x_nmi_handler)
+                               (c6x_nmi_handler)(regs);
+                       else
+                               pr_alert("NMI interrupt!\n");
+                       break;
+
+               case EXCEPT_TYPE_IXF:
+                       if (process_iexcept(regs))
+                               return 1;
+                       break;
+
+               case EXCEPT_TYPE_EXC:
+                       process_eexcept(regs);
+                       break;
+
+               case EXCEPT_TYPE_SXF:
+                       ie_num = 8;
+               default:
+                       ack_exception(type_num);
+                       do_trap(&iexcept_table[ie_num], regs);
+                       break;
+               }
+       }
+       return 0;
+}
+
+static int kstack_depth_to_print = 48;
+
+static void show_trace(unsigned long *stack, unsigned long *endstack)
+{
+       unsigned long addr;
+       int i;
+
+       pr_debug("Call trace:");
+       i = 0;
+       while (stack + 1 <= endstack) {
+               addr = *stack++;
+               /*
+                * If the address is either in the text segment of the
+                * kernel, or in the region which contains vmalloc'ed
+                * memory, it *may* be the address of a calling
+                * routine; if so, print it so that someone tracing
+                * down the cause of the crash will be able to figure
+                * out the call path that was taken.
+                */
+               if (__kernel_text_address(addr)) {
+#ifndef CONFIG_KALLSYMS
+                       if (i % 5 == 0)
+                               pr_debug("\n        ");
+#endif
+                       pr_debug(" [<%08lx>]", addr);
+                       print_symbol(" %s\n", addr);
+                       i++;
+               }
+       }
+       pr_debug("\n");
+}
+
+void show_stack(struct task_struct *task, unsigned long *stack)
+{
+       unsigned long *p, *endstack;
+       int i;
+
+       if (!stack) {
+               if (task && task != current)
+                       /* We know this is a kernel stack,
+                          so this is the start/end */
+                       stack = (unsigned long *)thread_saved_ksp(task);
+               else
+                       stack = (unsigned long *)&stack;
+       }
+       endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1)
+                                    & -THREAD_SIZE);
+
+       pr_debug("Stack from %08lx:", (unsigned long)stack);
+       for (i = 0, p = stack; i < kstack_depth_to_print; i++) {
+               if (p + 1 > endstack)
+                       break;
+               if (i % 8 == 0)
+                       pr_cont("\n         ");
+               pr_cont(" %08lx", *p++);
+       }
+       pr_cont("\n");
+       show_trace(stack, endstack);
+}
+
+int is_valid_bugaddr(unsigned long addr)
+{
+       return __kernel_text_address(addr);
+}
diff --git a/arch/c6x/kernel/vectors.S b/arch/c6x/kernel/vectors.S
new file mode 100644 (file)
index 0000000..c95c66f
--- /dev/null
@@ -0,0 +1,81 @@
+;
+;  Port on Texas Instruments TMS320C6x architecture
+;
+;  Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+;  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+;
+;  This program is free software; you can redistribute it and/or modify
+;  it under the terms of the GNU General Public License version 2 as
+;  published by the Free Software Foundation.
+;
+;  This section handles all the interrupt vector routines.
+;  At RESET the processor sets up the DRAM timing parameters and
+;  branches to the label _c_int00 which handles initialization for the C code.
+;
+
+#define ALIGNMENT 5
+
+       .macro IRQVEC name, handler
+       .align ALIGNMENT
+       .hidden \name
+       .global \name
+\name:
+#ifdef CONFIG_C6X_BIG_KERNEL
+       STW     .D2T1   A0,*B15--[2]
+ ||    MVKL    .S1     \handler,A0
+       MVKH    .S1     \handler,A0
+       B       .S2X    A0
+       LDW     .D2T1   *++B15[2],A0
+       NOP     4
+       NOP
+       NOP
+       .endm
+#else /* CONFIG_C6X_BIG_KERNEL */
+       B       .S2     \handler
+       NOP
+       NOP
+       NOP
+       NOP
+       NOP
+       NOP
+       NOP
+       .endm
+#endif /* CONFIG_C6X_BIG_KERNEL */
+
+          .sect ".vectors","ax"
+          .align ALIGNMENT
+          .global RESET
+          .hidden RESET
+RESET:
+#ifdef CONFIG_C6X_BIG_KERNEL
+          MVKL .S1     _c_int00,A0             ; branch to _c_int00
+          MVKH .S1     _c_int00,A0
+          B    .S2X    A0
+#else
+          B    .S2     _c_int00
+          NOP
+          NOP
+#endif
+          NOP
+          NOP
+          NOP
+          NOP
+          NOP
+
+
+          IRQVEC NMI,_nmi_handler              ; NMI interrupt
+          IRQVEC AINT,_bad_interrupt           ; reserved
+          IRQVEC MSGINT,_bad_interrupt         ; reserved
+
+          IRQVEC INT4,_int4_handler
+          IRQVEC INT5,_int5_handler
+          IRQVEC INT6,_int6_handler
+          IRQVEC INT7,_int7_handler
+          IRQVEC INT8,_int8_handler
+          IRQVEC INT9,_int9_handler
+          IRQVEC INT10,_int10_handler
+          IRQVEC INT11,_int11_handler
+          IRQVEC INT12,_int12_handler
+          IRQVEC INT13,_int13_handler
+          IRQVEC INT14,_int14_handler
+          IRQVEC INT15,_int15_handler
diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S
new file mode 100644 (file)
index 0000000..1d81c4c
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+ * ld script for the c6x kernel
+ *
+ *  Copyright (C) 2010, 2011 Texas Instruments Incorporated
+ *  Mark Salter <msalter@redhat.com>
+ */
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+
+ENTRY(_c_int00)
+
+#if defined(CONFIG_CPU_BIG_ENDIAN)
+jiffies = jiffies_64 + 4;
+#else
+jiffies = jiffies_64;
+#endif
+
+#define        READONLY_SEGMENT_START  \
+       . = PAGE_OFFSET;
+#define        READWRITE_SEGMENT_START \
+       . = ALIGN(128);         \
+       _data_lma = .;
+
+SECTIONS
+{
+       /*
+        * Start kernel read only segment
+        */
+       READONLY_SEGMENT_START
+
+       .vectors :
+       {
+               _vectors_start = .;
+               *(.vectors)
+               . = ALIGN(0x400);
+               _vectors_end = .;
+       }
+
+       . = ALIGN(0x1000);
+       .cmdline :
+       {
+               *(.cmdline)
+       }
+
+       /*
+        * This section contains data which may be shared with other
+        * cores. It needs to be a fixed offset from PAGE_OFFSET
+        * regardless of kernel configuration.
+        */
+       .virtio_ipc_dev :
+       {
+               *(.virtio_ipc_dev)
+       }
+
+       . = ALIGN(PAGE_SIZE);
+       .init :
+       {
+               _stext = .;
+               _sinittext = .;
+               HEAD_TEXT
+               INIT_TEXT
+               _einittext = .;
+       }
+
+       __init_begin = _stext;
+       INIT_DATA_SECTION(16)
+
+       PERCPU_SECTION(128)
+
+       . = ALIGN(PAGE_SIZE);
+       __init_end = .;
+
+       .text :
+       {
+               _text = .;
+               TEXT_TEXT
+               SCHED_TEXT
+               LOCK_TEXT
+               IRQENTRY_TEXT
+               KPROBES_TEXT
+               *(.fixup)
+               *(.gnu.warning)
+       }
+
+       EXCEPTION_TABLE(16)
+       NOTES
+
+       RO_DATA_SECTION(PAGE_SIZE)
+       .const :
+       {
+               *(.const .const.* .gnu.linkonce.r.*)
+               *(.switch)
+       }
+
+       . = ALIGN (8) ;
+       __fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET)
+       {
+               _fdt_start = . ;        /* place for fdt blob */
+               *(__fdt_blob) ;         /* Any link-placed DTB */
+               BYTE(0);                /* section always has contents */
+               . = _fdt_start + 0x4000;        /* Pad up to 16kbyte */
+               _fdt_end = . ;
+       }
+
+       _etext = .;
+
+       /*
+        * Start kernel read-write segment.
+        */
+       READWRITE_SEGMENT_START
+       _sdata = .;
+
+       .fardata : AT(ADDR(.fardata) - LOAD_OFFSET)
+       {
+               INIT_TASK_DATA(THREAD_SIZE)
+               NOSAVE_DATA
+               PAGE_ALIGNED_DATA(PAGE_SIZE)
+               CACHELINE_ALIGNED_DATA(128)
+               READ_MOSTLY_DATA(128)
+               DATA_DATA
+               CONSTRUCTORS
+               *(.data1)
+               *(.fardata .fardata.*)
+               *(.data.debug_bpt)
+       }
+
+       .neardata ALIGN(8) : AT(ADDR(.neardata) - LOAD_OFFSET)
+       {
+               *(.neardata2 .neardata2.* .gnu.linkonce.s2.*)
+               *(.neardata .neardata.* .gnu.linkonce.s.*)
+               . = ALIGN(8);
+       }
+
+       _edata = .;
+
+       __bss_start = .;
+       SBSS(8)
+       BSS(8)
+       .far :
+       {
+               . = ALIGN(8);
+               *(.dynfar)
+               *(.far .far.* .gnu.linkonce.b.*)
+               . = ALIGN(8);
+       }
+       __bss_stop = .;
+
+       _end = .;
+
+       DWARF_DEBUG
+
+       /DISCARD/ :
+       {
+                 EXIT_TEXT
+                 EXIT_DATA
+                 EXIT_CALL
+                 *(.discard)
+                 *(.discard.*)
+                 *(.interp)
+       }
+}
diff --git a/arch/c6x/lib/Makefile b/arch/c6x/lib/Makefile
new file mode 100644 (file)
index 0000000..ffd3c65
--- /dev/null
@@ -0,0 +1,7 @@
+#
+# Makefile for arch/c6x/lib/
+#
+
+lib-y := divu.o divi.o pop_rts.o push_rts.o remi.o remu.o strasgi.o llshru.o
+lib-y += llshr.o llshl.o negll.o mpyll.o divremi.o divremu.o
+lib-y += checksum.o csum_64plus.o memcpy_64plus.o strasgi_64plus.o
diff --git a/arch/c6x/lib/checksum.c b/arch/c6x/lib/checksum.c
new file mode 100644 (file)
index 0000000..67cc93b
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <net/checksum.h>
+
+#include <asm/byteorder.h>
+
+/*
+ * copy from fs while checksumming, otherwise like csum_partial
+ */
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+                           __wsum sum, int *csum_err)
+{
+       int missing;
+
+       missing = __copy_from_user(dst, src, len);
+       if (missing) {
+               memset(dst + len - missing, 0, missing);
+               *csum_err = -EFAULT;
+       } else
+               *csum_err = 0;
+
+       return csum_partial(dst, len, sum);
+}
+EXPORT_SYMBOL(csum_partial_copy_from_user);
+
+/* These are from csum_64plus.S */
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy);
+EXPORT_SYMBOL(ip_compute_csum);
+EXPORT_SYMBOL(ip_fast_csum);
diff --git a/arch/c6x/lib/csum_64plus.S b/arch/c6x/lib/csum_64plus.S
new file mode 100644 (file)
index 0000000..6d25896
--- /dev/null
@@ -0,0 +1,419 @@
+;
+;  linux/arch/c6x/lib/csum_64plus.s
+;
+;  Port on Texas Instruments TMS320C6x architecture
+;
+;  Copyright (C) 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+;  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+;
+;  This program is free software; you can redistribute it and/or modify
+;  it under the terms of the GNU General Public License version 2 as
+;  published by the Free Software Foundation.
+;
+#include <linux/linkage.h>
+
+;
+;unsigned int csum_partial_copy(const char *src, char * dst,
+;                              int len, int sum)
+;
+; A4:  src
+; B4:  dst
+; A6:  len
+; B6:  sum
+; return csum in A4
+;
+
+       .text
+ENTRY(csum_partial_copy)
+       MVC     .S2     ILC,B30
+
+       MV      .D1X    B6,A31          ; given csum
+       ZERO    .D1     A9              ; csum (a side)
+||     ZERO    .D2     B9              ; csum (b side)
+||     SHRU    .S2X    A6,2,B5         ; len / 4
+
+       ;; Check alignment and size
+       AND     .S1     3,A4,A1
+||     AND     .S2     3,B4,B0
+       OR      .L2X    B0,A1,B0        ; non aligned condition
+||     MVC     .S2     B5,ILC
+||     MVK     .D2     1,B2
+||     MV      .D1X    B5,A1           ; words condition
+  [!A1]        B       .S1     L8
+   [B0] BNOP   .S1     L6,5
+
+       SPLOOP          1
+
+       ;; Main loop for aligned words
+       LDW     .D1T1   *A4++,A7
+       NOP     4
+       MV      .S2X    A7,B7
+||     EXTU    .S1     A7,0,16,A16
+       STW     .D2T2   B7,*B4++
+||     MPYU    .M2     B7,B2,B8
+||     ADD     .L1     A16,A9,A9
+       NOP
+       SPKERNEL        8,0
+||     ADD     .L2     B8,B9,B9
+
+       ZERO    .D1     A1
+||     ADD     .L1X    A9,B9,A9        ;  add csum from a and b sides
+
+L6:
+  [!A1]        BNOP    .S1     L8,5
+
+       ;; Main loop for non-aligned words
+       SPLOOP          2
+ ||    MVK     .L1     1,A2
+
+       LDNW    .D1T1   *A4++,A7
+       NOP             3
+
+       NOP
+       MV      .S2X    A7,B7
+ ||    EXTU    .S1     A7,0,16,A16
+ ||    MPYU    .M1     A7,A2,A8
+
+       ADD     .L1     A16,A9,A9
+       SPKERNEL        6,0
+ ||    STNW    .D2T2   B7,*B4++
+ ||    ADD     .L1     A8,A9,A9
+
+L8:    AND     .S2X    2,A6,B5
+       CMPGT   .L2     B5,0,B0
+  [!B0]        BNOP    .S1     L82,4
+
+       ;; Manage half-word
+       ZERO    .L1     A7
+||     ZERO    .D1     A8
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+
+       LDBU    .D1T1   *A4++,A7
+       LDBU    .D1T1   *A4++,A8
+       NOP             3
+       SHL     .S1     A7,8,A0
+       ADD     .S1     A8,A9,A9
+       STB     .D2T1   A7,*B4++
+||     ADD     .S1     A0,A9,A9
+       STB     .D2T1   A8,*B4++
+
+#else
+
+       LDBU    .D1T1   *A4++,A7
+       LDBU    .D1T1   *A4++,A8
+       NOP             3
+       ADD     .S1     A7,A9,A9
+       SHL     .S1     A8,8,A0
+
+       STB     .D2T1   A7,*B4++
+||     ADD     .S1     A0,A9,A9
+       STB     .D2T1   A8,*B4++
+
+#endif
+
+       ;; Manage eventually the last byte
+L82:   AND     .S2X    1,A6,B0
+  [!B0]        BNOP    .S1     L9,5
+
+||     ZERO    .L1     A7
+
+L83:   LDBU    .D1T1   *A4++,A7
+       NOP             4
+
+       MV      .L2X    A7,B7
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+
+       STB     .D2T2   B7,*B4++
+||     SHL     .S1     A7,8,A7
+       ADD     .S1     A7,A9,A9
+
+#else
+
+       STB     .D2T2   B7,*B4++
+||     ADD     .S1     A7,A9,A9
+
+#endif
+
+       ;; Fold the csum
+L9:    SHRU    .S2X    A9,16,B0
+  [!B0]        BNOP    .S1     L10,5
+
+L91:   SHRU    .S2X    A9,16,B4
+||     EXTU    .S1     A9,16,16,A3
+       ADD     .D1X    A3,B4,A9
+
+       SHRU    .S1     A9,16,A0
+   [A0]        BNOP    .S1     L91,5
+
+L10:   ADD     .D1     A31,A9,A9
+       MV      .D1     A9,A4
+
+       BNOP    .S2     B3,4
+       MVC     .S2     B30,ILC
+ENDPROC(csum_partial_copy)
+
+;
+;unsigned short
+;ip_fast_csum(unsigned char *iph, unsigned int ihl)
+;{
+;      unsigned int checksum = 0;
+;      unsigned short *tosum = (unsigned short *) iph;
+;      int len;
+;
+;      len = ihl*4;
+;
+;      if (len <= 0)
+;              return 0;
+;
+;      while(len) {
+;              len -= 2;
+;              checksum += *tosum++;
+;      }
+;      if (len & 1)
+;              checksum += *(unsigned char*) tosum;
+;
+;      while(checksum >> 16)
+;              checksum = (checksum & 0xffff) + (checksum >> 16);
+;
+;      return ~checksum;
+;}
+;
+; A4:  iph
+; B4:  ihl
+; return checksum in A4
+;
+       .text
+
+ENTRY(ip_fast_csum)
+       ZERO    .D1     A5
+ ||    MVC     .S2     ILC,B30
+       SHL     .S2     B4,2,B0
+       CMPGT   .L2     B0,0,B1
+  [!B1] BNOP   .S1     L15,4
+  [!B1]        ZERO    .D1     A3
+
+  [!B0]        B       .S1     L12
+       SHRU    .S2     B0,1,B0
+       MVC     .S2     B0,ILC
+       NOP     3
+
+       SPLOOP  1
+       LDHU    .D1T1   *A4++,A3
+       NOP     3
+       NOP
+       SPKERNEL        5,0
+ ||    ADD     .L1     A3,A5,A5
+
+L12:   SHRU    .S1     A5,16,A0
+  [!A0]        BNOP    .S1     L14,5
+
+L13:   SHRU    .S2X    A5,16,B4
+       EXTU    .S1     A5,16,16,A3
+       ADD     .D1X    A3,B4,A5
+       SHRU    .S1     A5,16,A0
+  [A0] BNOP    .S1     L13,5
+
+L14:   NOT     .D1     A5,A3
+       EXTU    .S1     A3,16,16,A3
+
+L15:   BNOP    .S2     B3,3
+       MVC     .S2     B30,ILC
+       MV      .D1     A3,A4
+ENDPROC(ip_fast_csum)
+
+;
+;unsigned short
+;do_csum(unsigned char *buff, unsigned int len)
+;{
+;      int odd, count;
+;      unsigned int result = 0;
+;
+;      if (len <= 0)
+;              goto out;
+;      odd = 1 & (unsigned long) buff;
+;      if (odd) {
+;#ifdef __LITTLE_ENDIAN
+;              result += (*buff << 8);
+;#else
+;              result = *buff;
+;#endif
+;              len--;
+;              buff++;
+;      }
+;      count = len >> 1;               /* nr of 16-bit words.. */
+;      if (count) {
+;              if (2 & (unsigned long) buff) {
+;                      result += *(unsigned short *) buff;
+;                      count--;
+;                      len -= 2;
+;                      buff += 2;
+;              }
+;              count >>= 1;            /* nr of 32-bit words.. */
+;              if (count) {
+;                      unsigned int carry = 0;
+;                      do {
+;                              unsigned int w = *(unsigned int *) buff;
+;                              count--;
+;                              buff += 4;
+;                              result += carry;
+;                              result += w;
+;                              carry = (w > result);
+;                      } while (count);
+;                      result += carry;
+;                      result = (result & 0xffff) + (result >> 16);
+;              }
+;              if (len & 2) {
+;                      result += *(unsigned short *) buff;
+;                      buff += 2;
+;              }
+;      }
+;      if (len & 1)
+;#ifdef __LITTLE_ENDIAN
+;              result += *buff;
+;#else
+;              result += (*buff << 8);
+;#endif
+;      result = (result & 0xffff) + (result >> 16);
+;      /* add up carry.. */
+;      result = (result & 0xffff) + (result >> 16);
+;      if (odd)
+;              result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
+;out:
+;      return result;
+;}
+;
+; A4:  buff
+; B4:  len
+; return checksum in A4
+;
+
+ENTRY(do_csum)
+          CMPGT   .L2     B4,0,B0
+   [!B0]   BNOP    .S1    L26,3
+          EXTU    .S1     A4,31,31,A0
+
+          MV      .L1     A0,A3
+||        MV      .S1X    B3,A5
+||        MV      .L2     B4,B3
+||        ZERO    .D1     A1
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+   [A0]    SUB    .L2     B3,1,B3
+|| [A0]    LDBU    .D1T1   *A4++,A1
+#else
+   [!A0]   BNOP    .S1    L21,5
+|| [A0]    LDBU    .D1T1   *A4++,A0
+          SUB     .L2     B3,1,B3
+||        SHL     .S1     A0,8,A1
+L21:
+#endif
+          SHR     .S2     B3,1,B0
+   [!B0]   BNOP    .S1    L24,3
+          MVK     .L1     2,A0
+          AND     .L1     A4,A0,A0
+
+   [!A0]   BNOP    .S1    L22,5
+|| [A0]    LDHU    .D1T1   *A4++,A0
+          SUB     .L2     B0,1,B0
+||        SUB     .S2     B3,2,B3
+||        ADD     .L1     A0,A1,A1
+L22:
+          SHR     .S2     B0,1,B0
+||        ZERO    .L1     A0
+
+   [!B0]   BNOP    .S1    L23,5
+|| [B0]    MVC    .S2     B0,ILC
+
+          SPLOOP  3
+          SPMASK  L1
+||        MV      .L1     A1,A2
+||        LDW     .D1T1   *A4++,A1
+
+          NOP     4
+          ADD     .L1     A0,A1,A0
+          ADD     .L1     A2,A0,A2
+
+          SPKERNEL 1,2
+||        CMPGTU  .L1     A1,A2,A0
+
+          ADD     .L1     A0,A2,A6
+          EXTU    .S1     A6,16,16,A7
+          SHRU    .S2X    A6,16,B0
+          NOP             1
+          ADD     .L1X    A7,B0,A1
+L23:
+          MVK     .L2     2,B0
+          AND     .L2     B3,B0,B0
+   [B0]    LDHU    .D1T1   *A4++,A0
+          NOP     4
+   [B0]    ADD    .L1     A0,A1,A1
+L24:
+          EXTU    .S2     B3,31,31,B0
+#ifdef CONFIG_CPU_BIG_ENDIAN
+   [!B0]   BNOP    .S1    L25,4
+|| [B0]    LDBU    .D1T1   *A4,A0
+          SHL     .S1     A0,8,A0
+          ADD     .L1     A0,A1,A1
+L25:
+#else
+   [B0]    LDBU    .D1T1   *A4,A0
+          NOP     4
+   [B0]    ADD    .L1     A0,A1,A1
+#endif
+          EXTU    .S1     A1,16,16,A0
+          SHRU    .S2X    A1,16,B0
+          NOP     1
+          ADD     .L1X    A0,B0,A0
+          SHRU    .S1     A0,16,A1
+          ADD     .L1     A0,A1,A0
+          EXTU    .S1     A0,16,16,A1
+          EXTU    .S1     A1,16,24,A2
+
+          EXTU    .S1     A1,24,16,A0
+||        MV      .L2X    A3,B0
+
+   [B0]    OR     .L1     A0,A2,A1
+L26:
+          NOP     1
+          BNOP    .S2X    A5,4
+          MV      .L1     A1,A4
+ENDPROC(do_csum)
+
+;__wsum csum_partial(const void *buff, int len, __wsum wsum)
+;{
+;      unsigned int sum = (__force unsigned int)wsum;
+;      unsigned int result = do_csum(buff, len);
+;
+;      /* add in old sum, and carry.. */
+;      result += sum;
+;      if (sum > result)
+;              result += 1;
+;      return (__force __wsum)result;
+;}
+;
+ENTRY(csum_partial)
+          MV      .L1X    B3,A9
+||        CALLP   .S2     do_csum,B3
+||        MV      .S1     A6,A8
+          BNOP    .S2X    A9,2
+          ADD     .L1     A8,A4,A1
+          CMPGTU  .L1     A8,A1,A0
+          ADD     .L1     A1,A0,A4
+ENDPROC(csum_partial)
+
+;unsigned short
+;ip_compute_csum(unsigned char *buff, unsigned int len)
+;
+; A4:  buff
+; B4:  len
+; return checksum in A4
+
+ENTRY(ip_compute_csum)
+          MV      .L1X    B3,A9
+||        CALLP   .S2     do_csum,B3
+          BNOP    .S2X    A9,3
+          NOT     .S1     A4,A4
+          CLR     .S1     A4,16,31,A4
+ENDPROC(ip_compute_csum)
diff --git a/arch/c6x/lib/divi.S b/arch/c6x/lib/divi.S
new file mode 100644 (file)
index 0000000..4bde924
--- /dev/null
@@ -0,0 +1,53 @@
+;;  Copyright 2010  Free Software Foundation, Inc.
+;;  Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+       ;; ABI considerations for the divide functions
+       ;; The following registers are call-used:
+       ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
+       ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
+       ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
+       ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
+       ;;
+       ;; In our implementation, divu and remu are leaf functions,
+       ;; while both divi and remi call into divu.
+       ;; A0 is not clobbered by any of the functions.
+       ;; divu does not clobber B2 either, which is taken advantage of
+       ;; in remi.
+       ;; divi uses B5 to hold the original return address during
+       ;; the call to divu.
+       ;; remi uses B2 and A5 to hold the input values during the
+       ;; call to divu.  It stores B3 in on the stack.
+
+       .text
+ENTRY(__c6xabi_divi)
+       call    .s2     __c6xabi_divu
+||     mv      .d2     B3, B5
+||     cmpgt   .l1     0, A4, A1
+||     cmpgt   .l2     0, B4, B1
+
+   [A1]        neg     .l1     A4, A4
+|| [B1]        neg     .l2     B4, B4
+||     xor     .s1x    A1, B1, A1
+   [A1] addkpc .s2     _divu_ret, B3, 4
+_divu_ret:
+       neg     .l1     A4, A4
+||     mv      .l2     B3,B5
+||     ret     .s2     B5
+       nop             5
+ENDPROC(__c6xabi_divi)
diff --git a/arch/c6x/lib/divremi.S b/arch/c6x/lib/divremi.S
new file mode 100644 (file)
index 0000000..64bc5aa
--- /dev/null
@@ -0,0 +1,46 @@
+;;  Copyright 2010  Free Software Foundation, Inc.
+;;  Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+       .text
+ENTRY(__c6xabi_divremi)
+       stw     .d2t2   B3, *B15--[2]
+||     cmpgt   .l1     0, A4, A1
+||     cmpgt   .l2     0, B4, B2
+||     mv      .s1     A4, A5
+||     call    .s2     __c6xabi_divu
+
+   [A1]        neg     .l1     A4, A4
+|| [B2]        neg     .l2     B4, B4
+||     xor     .s2x    B2, A1, B0
+||     mv      .d2     B4, B2
+
+   [B0]        addkpc  .s2     _divu_ret_1, B3, 1
+  [!B0] addkpc .s2     _divu_ret_2, B3, 1
+       nop     2
+_divu_ret_1:
+       neg     .l1     A4, A4
+_divu_ret_2:
+       ldw     .d2t2   *++B15[2], B3
+
+       mpy32   .m1x    A4, B2, A6
+       nop             3
+       ret     .s2     B3
+       sub     .l1     A5, A6, A5
+       nop     4
+ENDPROC(__c6xabi_divremi)
diff --git a/arch/c6x/lib/divremu.S b/arch/c6x/lib/divremu.S
new file mode 100644 (file)
index 0000000..caa9f23
--- /dev/null
@@ -0,0 +1,87 @@
+;;  Copyright 2011  Free Software Foundation, Inc.
+;;  Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+       .text
+ENTRY(__c6xabi_divremu)
+       ;; We use a series of up to 31 subc instructions.  First, we find
+       ;; out how many leading zero bits there are in the divisor.  This
+       ;; gives us both a shift count for aligning (shifting) the divisor
+       ;; to the, and the number of times we have to execute subc.
+
+       ;; At the end, we have both the remainder and most of the quotient
+       ;; in A4.  The top bit of the quotient is computed first and is
+       ;; placed in A2.
+
+       ;; Return immediately if the dividend is zero.  Setting B4 to 1
+       ;; is a trick to allow us to leave the following insns in the jump
+       ;; delay slot without affecting the result.
+       mv      .s2x    A4, B1
+
+  [b1] lmbd    .l2     1, B4, B1
+||[!b1] b      .s2     B3      ; RETURN A
+||[!b1] mvk    .d2     1, B4
+
+||[!b1] zero   .s1     A5
+       mv      .l1x    B1, A6
+||     shl     .s2     B4, B1, B4
+
+       ;; The loop performs a maximum of 28 steps, so we do the
+       ;; first 3 here.
+       cmpltu  .l1x    A4, B4, A2
+  [!A2]        sub     .l1x    A4, B4, A4
+||     shru    .s2     B4, 1, B4
+||     xor     .s1     1, A2, A2
+
+       shl     .s1     A2, 31, A2
+|| [b1]        subc    .l1x    A4,B4,A4
+|| [b1]        add     .s2     -1, B1, B1
+   [b1]        subc    .l1x    A4,B4,A4
+|| [b1]        add     .s2     -1, B1, B1
+
+       ;; RETURN A may happen here (note: must happen before the next branch)
+__divremu0:
+       cmpgt   .l2     B1, 7, B0
+|| [b1]        subc    .l1x    A4,B4,A4
+|| [b1]        add     .s2     -1, B1, B1
+   [b1]        subc    .l1x    A4,B4,A4
+|| [b1]        add     .s2     -1, B1, B1
+|| [b0] b      .s1     __divremu0
+   [b1]        subc    .l1x    A4,B4,A4
+|| [b1]        add     .s2     -1, B1, B1
+   [b1]        subc    .l1x    A4,B4,A4
+|| [b1]        add     .s2     -1, B1, B1
+   [b1]        subc    .l1x    A4,B4,A4
+|| [b1]        add     .s2     -1, B1, B1
+   [b1]        subc    .l1x    A4,B4,A4
+|| [b1]        add     .s2     -1, B1, B1
+   [b1]        subc    .l1x    A4,B4,A4
+|| [b1]        add     .s2     -1, B1, B1
+       ;; loop backwards branch happens here
+
+       ret     .s2     B3
+||     mvk     .s1     32, A1
+       sub     .l1     A1, A6, A6
+||     extu    .s1     A4, A6, A5
+       shl     .s1     A4, A6, A4
+       shru    .s1     A4, 1, A4
+||     sub     .l1     A6, 1, A6
+       or      .l1     A2, A4, A4
+       shru    .s1     A4, A6, A4
+       nop
+ENDPROC(__c6xabi_divremu)
diff --git a/arch/c6x/lib/divu.S b/arch/c6x/lib/divu.S
new file mode 100644 (file)
index 0000000..64af3c0
--- /dev/null
@@ -0,0 +1,98 @@
+;;  Copyright 2010  Free Software Foundation, Inc.
+;;  Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+       ;; ABI considerations for the divide functions
+       ;; The following registers are call-used:
+       ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
+       ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
+       ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
+       ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
+       ;;
+       ;; In our implementation, divu and remu are leaf functions,
+       ;; while both divi and remi call into divu.
+       ;; A0 is not clobbered by any of the functions.
+       ;; divu does not clobber B2 either, which is taken advantage of
+       ;; in remi.
+       ;; divi uses B5 to hold the original return address during
+       ;; the call to divu.
+       ;; remi uses B2 and A5 to hold the input values during the
+       ;; call to divu.  It stores B3 in on the stack.
+
+       .text
+ENTRY(__c6xabi_divu)
+       ;; We use a series of up to 31 subc instructions.  First, we find
+       ;; out how many leading zero bits there are in the divisor.  This
+       ;; gives us both a shift count for aligning (shifting) the divisor
+       ;; to the, and the number of times we have to execute subc.
+
+       ;; At the end, we have both the remainder and most of the quotient
+       ;; in A4.  The top bit of the quotient is computed first and is
+       ;; placed in A2.
+
+       ;; Return immediately if the dividend is zero.
+        mv     .s2x    A4, B1
+   [B1]         lmbd   .l2     1, B4, B1
+|| [!B1] b     .s2     B3      ; RETURN A
+|| [!B1] mvk   .d2     1, B4
+        mv     .l1x    B1, A6
+||      shl    .s2     B4, B1, B4
+
+       ;; The loop performs a maximum of 28 steps, so we do the
+       ;; first 3 here.
+        cmpltu .l1x    A4, B4, A2
+   [!A2] sub   .l1x    A4, B4, A4
+||      shru   .s2     B4, 1, B4
+||      xor    .s1     1, A2, A2
+
+        shl    .s1     A2, 31, A2
+|| [B1]         subc   .l1x    A4,B4,A4
+|| [B1]         add    .s2     -1, B1, B1
+   [B1]         subc   .l1x    A4,B4,A4
+|| [B1]         add    .s2     -1, B1, B1
+
+       ;; RETURN A may happen here (note: must happen before the next branch)
+_divu_loop:
+        cmpgt  .l2     B1, 7, B0
+|| [B1]         subc   .l1x    A4,B4,A4
+|| [B1]         add    .s2     -1, B1, B1
+   [B1]         subc   .l1x    A4,B4,A4
+|| [B1]         add    .s2     -1, B1, B1
+|| [B0]  b     .s1     _divu_loop
+   [B1]         subc   .l1x    A4,B4,A4
+|| [B1]         add    .s2     -1, B1, B1
+   [B1]         subc   .l1x    A4,B4,A4
+|| [B1]         add    .s2     -1, B1, B1
+   [B1]         subc   .l1x    A4,B4,A4
+|| [B1]         add    .s2     -1, B1, B1
+   [B1]         subc   .l1x    A4,B4,A4
+|| [B1]         add    .s2     -1, B1, B1
+   [B1]         subc   .l1x    A4,B4,A4
+|| [B1]         add    .s2     -1, B1, B1
+       ;; loop backwards branch happens here
+
+        ret    .s2     B3
+||      mvk    .s1     32, A1
+        sub    .l1     A1, A6, A6
+        shl    .s1     A4, A6, A4
+        shru   .s1     A4, 1, A4
+||      sub    .l1     A6, 1, A6
+        or     .l1     A2, A4, A4
+        shru   .s1     A4, A6, A4
+        nop
+ENDPROC(__c6xabi_divu)
diff --git a/arch/c6x/lib/llshl.S b/arch/c6x/lib/llshl.S
new file mode 100644 (file)
index 0000000..7b105e2
--- /dev/null
@@ -0,0 +1,37 @@
+;;  Copyright (C) 2010 Texas Instruments Incorporated
+;;  Contributed by Mark Salter <msalter@redhat.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+;;  uint64_t __c6xabi_llshl(uint64_t val, uint shift)
+
+#include <linux/linkage.h>
+
+       .text
+ENTRY(__c6xabi_llshl)
+        mv     .l1x    B4,A1
+   [!A1] b     .s2     B3              ; just return if zero shift
+        mvk    .s1     32,A0
+        sub    .d1     A0,A1,A0
+        cmplt  .l1     0,A0,A2
+   [A2]         shru   .s1     A4,A0,A0
+   [!A2] neg   .l1     A0,A5
+|| [A2]  shl   .s1     A5,A1,A5
+   [!A2] shl   .s1     A4,A5,A5
+|| [A2]  or    .d1     A5,A0,A5
+|| [!A2] mvk   .l1     0,A4
+   [A2]         shl    .s1     A4,A1,A4
+        bnop   .s2     B3,5
+ENDPROC(__c6xabi_llshl)
diff --git a/arch/c6x/lib/llshr.S b/arch/c6x/lib/llshr.S
new file mode 100644 (file)
index 0000000..fde1bec
--- /dev/null
@@ -0,0 +1,38 @@
+;;  Copyright (C) 2010 Texas Instruments Incorporated
+;;  Contributed by Mark Salter <msalter@redhat.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+;;  uint64_t __c6xabi_llshr(uint64_t val, uint shift)
+
+#include <linux/linkage.h>
+
+       .text
+ENTRY(__c6xabi_llshr)
+        mv     .l1x    B4,A1
+   [!A1] b     .s2     B3              ; return if zero shift count
+        mvk    .s1     32,A0
+        sub    .d1     A0,A1,A0
+        cmplt  .l1     0,A0,A2
+   [A2]  shl   .s1     A5,A0,A0
+        nop
+   [!A2] neg   .l1     A0,A4
+|| [A2]  shru  .s1     A4,A1,A4
+   [!A2] shr   .s1     A5,A4,A4
+|| [A2]  or    .d1     A4,A0,A4
+   [!A2] shr   .s1     A5,0x1f,A5
+   [A2]  shr   .s1     A5,A1,A5
+        bnop   .s2     B3,5
+ENDPROC(__c6xabi_llshr)
diff --git a/arch/c6x/lib/llshru.S b/arch/c6x/lib/llshru.S
new file mode 100644 (file)
index 0000000..596ae3f
--- /dev/null
@@ -0,0 +1,38 @@
+;;  Copyright (C) 2010 Texas Instruments Incorporated
+;;  Contributed by Mark Salter <msalter@redhat.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+;;  uint64_t __c6xabi_llshru(uint64_t val, uint shift)
+
+#include <linux/linkage.h>
+
+       .text
+ENTRY(__c6xabi_llshru)
+        mv     .l1x    B4,A1
+   [!A1] b     .s2     B3              ; return if zero shift count
+        mvk    .s1     32,A0
+        sub    .d1     A0,A1,A0
+        cmplt  .l1     0,A0,A2
+   [A2]  shl   .s1     A5,A0,A0
+        nop
+   [!A2] neg   .l1     A0,A4
+|| [A2]  shru  .s1     A4,A1,A4
+   [!A2] shru  .s1     A5,A4,A4
+|| [A2]  or    .d1     A4,A0,A4
+|| [!A2] mvk   .l1     0,A5
+   [A2]  shru  .s1     A5,A1,A5
+        bnop   .s2     B3,5
+ENDPROC(__c6xabi_llshru)
diff --git a/arch/c6x/lib/memcpy_64plus.S b/arch/c6x/lib/memcpy_64plus.S
new file mode 100644 (file)
index 0000000..0bbc2cb
--- /dev/null
@@ -0,0 +1,46 @@
+;  Port on Texas Instruments TMS320C6x architecture
+;
+;  Copyright (C) 2006, 2009, 2010 Texas Instruments Incorporated
+;  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+;
+;  This program is free software; you can redistribute it and/or modify
+;  it under the terms of the GNU General Public License version 2 as
+;  published by the Free Software Foundation.
+;
+
+#include <linux/linkage.h>
+
+       .text
+
+ENTRY(memcpy)
+       AND     .L1     0x1,A6,A0
+ ||    AND     .S1     0x2,A6,A1
+ ||    AND     .L2X    0x4,A6,B0
+ ||    MV      .D1     A4,A3
+ ||    MVC     .S2     ILC,B2
+
+   [A0] LDB    .D2T1   *B4++,A5
+   [A1] LDB    .D2T1   *B4++,A7
+   [A1] LDB    .D2T1   *B4++,A8
+   [B0] LDNW   .D2T1   *B4++,A9
+ ||    SHRU    .S2X    A6,0x3,B1
+  [!B1] BNOP   .S2     B3,1
+
+   [A0] STB    .D1T1   A5,*A3++
+ ||[B1] MVC    .S2     B1,ILC
+   [A1] STB    .D1T1   A7,*A3++
+   [A1] STB    .D1T1   A8,*A3++
+   [B0] STNW   .D1T1   A9,*A3++        ; return when len < 8
+
+       SPLOOP  2
+
+       LDNDW   .D2T1   *B4++,A9:A8
+       NOP     3
+
+       NOP
+       SPKERNEL        0,0
+ ||    STNDW   .D1T1   A9:A8,*A3++
+
+       BNOP    .S2     B3,4
+       MVC     .S2     B2,ILC
+ENDPROC(memcpy)
diff --git a/arch/c6x/lib/mpyll.S b/arch/c6x/lib/mpyll.S
new file mode 100644 (file)
index 0000000..f103441
--- /dev/null
@@ -0,0 +1,49 @@
+;;  Copyright (C) 2010 Texas Instruments Incorporated
+;;  Contributed by Mark Salter <msalter@redhat.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+       ;; uint64_t __c6xabi_mpyll(uint64_t x, uint64_t y)
+       ;;
+       ;; 64x64 multiply
+       ;; First compute partial results using 32-bit parts of x and y:
+       ;;
+       ;;   b63         b32 b31          b0
+       ;;    -----------------------------
+       ;;    |      1      |      0      |
+       ;;    -----------------------------
+       ;;
+       ;;   P0 = X0*Y0
+       ;;   P1 = X0*Y1 + X1*Y0
+       ;;   P2 = X1*Y1
+       ;;
+       ;;   result = (P2 << 64) + (P1 << 32) + P0
+       ;;
+       ;; Since the result is also 64-bit, we can skip the P2 term.
+
+       .text
+ENTRY(__c6xabi_mpyll)
+       mpy32u  .m1x    A4,B4,A1:A0     ; X0*Y0
+       b       .s2     B3
+ ||    mpy32u  .m2x    B5,A4,B1:B0     ; X0*Y1 (don't need upper 32-bits)
+ ||    mpy32u  .m1x    A5,B4,A3:A2     ; X1*Y0 (don't need upper 32-bits)
+       nop
+       nop
+       mv      .s1     A0,A4
+       add     .l1x    A2,B0,A5
+       add     .s1     A1,A5,A5
+ENDPROC(__c6xabi_mpyll)
diff --git a/arch/c6x/lib/negll.S b/arch/c6x/lib/negll.S
new file mode 100644 (file)
index 0000000..82f4bce
--- /dev/null
@@ -0,0 +1,31 @@
+;;  Copyright (C) 2010 Texas Instruments Incorporated
+;;  Contributed by Mark Salter <msalter@redhat.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+;;  int64_t __c6xabi_negll(int64_t val)
+
+#include <linux/linkage.h>
+
+       .text
+ENTRY(__c6xabi_negll)
+       b       .s2     B3
+       mvk     .l1     0,A0
+       subu    .l1     A0,A4,A3:A2
+       sub     .l1     A0,A5,A0
+||     ext     .s1     A3,24,24,A5
+       add     .l1     A5,A0,A5
+       mv      .s1     A2,A4
+ENDPROC(__c6xabi_negll)
diff --git a/arch/c6x/lib/pop_rts.S b/arch/c6x/lib/pop_rts.S
new file mode 100644 (file)
index 0000000..d7d96c7
--- /dev/null
@@ -0,0 +1,32 @@
+;;  Copyright 2010  Free Software Foundation, Inc.
+;;  Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+       .text
+
+ENTRY(__c6xabi_pop_rts)
+       lddw    .d2t2   *++B15, B3:B2
+       lddw    .d2t1   *++B15, A11:A10
+       lddw    .d2t2   *++B15, B11:B10
+       lddw    .d2t1   *++B15, A13:A12
+       lddw    .d2t2   *++B15, B13:B12
+       lddw    .d2t1   *++B15, A15:A14
+||     b       .s2     B3
+       ldw     .d2t2   *++B15[2], B14
+       nop     4
+ENDPROC(__c6xabi_pop_rts)
diff --git a/arch/c6x/lib/push_rts.S b/arch/c6x/lib/push_rts.S
new file mode 100644 (file)
index 0000000..f6e3db3
--- /dev/null
@@ -0,0 +1,31 @@
+;;  Copyright 2010  Free Software Foundation, Inc.
+;;  Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+       .text
+
+ENTRY(__c6xabi_push_rts)
+       stw     .d2t2   B14, *B15--[2]
+       stdw    .d2t1   A15:A14, *B15--
+||     b       .s2x    A3
+       stdw    .d2t2   B13:B12, *B15--
+       stdw    .d2t1   A13:A12, *B15--
+       stdw    .d2t2   B11:B10, *B15--
+       stdw    .d2t1   A11:A10, *B15--
+       stdw    .d2t2   B3:B2, *B15--
+ENDPROC(__c6xabi_push_rts)
diff --git a/arch/c6x/lib/remi.S b/arch/c6x/lib/remi.S
new file mode 100644 (file)
index 0000000..6f2ca18
--- /dev/null
@@ -0,0 +1,64 @@
+;;  Copyright 2010  Free Software Foundation, Inc.
+;;  Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+       ;; ABI considerations for the divide functions
+       ;; The following registers are call-used:
+       ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
+       ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
+       ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
+       ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
+       ;;
+       ;; In our implementation, divu and remu are leaf functions,
+       ;; while both divi and remi call into divu.
+       ;; A0 is not clobbered by any of the functions.
+       ;; divu does not clobber B2 either, which is taken advantage of
+       ;; in remi.
+       ;; divi uses B5 to hold the original return address during
+       ;; the call to divu.
+       ;; remi uses B2 and A5 to hold the input values during the
+       ;; call to divu.  It stores B3 in on the stack.
+
+       .text
+
+ENTRY(__c6xabi_remi)
+       stw     .d2t2   B3, *B15--[2]
+||     cmpgt   .l1     0, A4, A1
+||     cmpgt   .l2     0, B4, B2
+||     mv      .s1     A4, A5
+||     call    .s2     __c6xabi_divu
+
+   [A1]        neg     .l1     A4, A4
+|| [B2]        neg     .l2     B4, B4
+||     xor     .s2x    B2, A1, B0
+||     mv      .d2     B4, B2
+
+   [B0]        addkpc  .s2     _divu_ret_1, B3, 1
+  [!B0] addkpc .s2     _divu_ret_2, B3, 1
+       nop     2
+_divu_ret_1:
+       neg     .l1     A4, A4
+_divu_ret_2:
+       ldw     .d2t2   *++B15[2], B3
+
+       mpy32   .m1x    A4, B2, A6
+       nop             3
+       ret     .s2     B3
+       sub     .l1     A5, A6, A4
+       nop     4
+ENDPROC(__c6xabi_remi)
diff --git a/arch/c6x/lib/remu.S b/arch/c6x/lib/remu.S
new file mode 100644 (file)
index 0000000..3fae719
--- /dev/null
@@ -0,0 +1,82 @@
+;;  Copyright 2010  Free Software Foundation, Inc.
+;;  Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+       ;; ABI considerations for the divide functions
+       ;; The following registers are call-used:
+       ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
+       ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
+       ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
+       ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
+       ;;
+       ;; In our implementation, divu and remu are leaf functions,
+       ;; while both divi and remi call into divu.
+       ;; A0 is not clobbered by any of the functions.
+       ;; divu does not clobber B2 either, which is taken advantage of
+       ;; in remi.
+       ;; divi uses B5 to hold the original return address during
+       ;; the call to divu.
+       ;; remi uses B2 and A5 to hold the input values during the
+       ;; call to divu.  It stores B3 in on the stack.
+
+
+       .text
+
+ENTRY(__c6xabi_remu)
+       ;; The ABI seems designed to prevent these functions calling each other,
+       ;; so we duplicate most of the divsi3 code here.
+        mv     .s2x    A4, B1
+        lmbd   .l2     1, B4, B1
+|| [!B1] b     .s2     B3      ; RETURN A
+|| [!B1] mvk   .d2     1, B4
+
+        mv     .l1x    B1, A7
+||      shl    .s2     B4, B1, B4
+
+        cmpltu .l1x    A4, B4, A1
+   [!A1] sub   .l1x    A4, B4, A4
+        shru   .s2     B4, 1, B4
+
+_remu_loop:
+        cmpgt  .l2     B1, 7, B0
+|| [B1]         subc   .l1x    A4,B4,A4
+|| [B1]         add    .s2     -1, B1, B1
+       ;; RETURN A may happen here (note: must happen before the next branch)
+   [B1]         subc   .l1x    A4,B4,A4
+|| [B1]         add    .s2     -1, B1, B1
+|| [B0]         b      .s1     _remu_loop
+   [B1]         subc   .l1x    A4,B4,A4
+|| [B1]         add    .s2     -1, B1, B1
+   [B1]         subc   .l1x    A4,B4,A4
+|| [B1]         add    .s2     -1, B1, B1
+   [B1]         subc   .l1x    A4,B4,A4
+|| [B1]         add    .s2     -1, B1, B1
+   [B1]         subc   .l1x    A4,B4,A4
+|| [B1]         add    .s2     -1, B1, B1
+   [B1]         subc   .l1x    A4,B4,A4
+|| [B1]         add    .s2     -1, B1, B1
+       ;; loop backwards branch happens here
+
+        ret    .s2     B3
+   [B1]         subc   .l1x    A4,B4,A4
+|| [B1]         add    .s2     -1, B1, B1
+   [B1]         subc   .l1x    A4,B4,A4
+
+        extu   .s1     A4, A7, A4
+        nop    2
+ENDPROC(__c6xabi_remu)
diff --git a/arch/c6x/lib/strasgi.S b/arch/c6x/lib/strasgi.S
new file mode 100644 (file)
index 0000000..de27407
--- /dev/null
@@ -0,0 +1,89 @@
+;;  Copyright 2010  Free Software Foundation, Inc.
+;;  Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+       .text
+
+ENTRY(__c6xabi_strasgi)
+       ;; This is essentially memcpy, with alignment known to be at least
+       ;; 4, and the size a multiple of 4 greater than or equal to 28.
+        ldw    .d2t1   *B4++, A0
+||      mvk    .s2     16, B1
+        ldw    .d2t1   *B4++, A1
+||      mvk    .s2     20, B2
+||      sub    .d1     A6, 24, A6
+        ldw    .d2t1   *B4++, A5
+        ldw    .d2t1   *B4++, A7
+||      mv     .l2x    A6, B7
+        ldw    .d2t1   *B4++, A8
+        ldw    .d2t1   *B4++, A9
+||      mv     .s2x    A0, B5
+||      cmpltu .l2     B2, B7, B0
+
+_strasgi_loop:
+        stw    .d1t2   B5, *A4++
+|| [B0]         ldw    .d2t1   *B4++, A0
+||      mv     .s2x    A1, B5
+||      mv     .l2     B7, B6
+
+   [B0]         sub    .d2     B6, 24, B7
+|| [B0]         b      .s2     _strasgi_loop
+||      cmpltu .l2     B1, B6, B0
+
+   [B0]         ldw    .d2t1   *B4++, A1
+||      stw    .d1t2   B5, *A4++
+||      mv     .s2x    A5, B5
+||      cmpltu .l2     12, B6, B0
+
+   [B0]         ldw    .d2t1   *B4++, A5
+||      stw    .d1t2   B5, *A4++
+||      mv     .s2x    A7, B5
+||      cmpltu .l2     8, B6, B0
+
+   [B0]         ldw    .d2t1   *B4++, A7
+||      stw    .d1t2   B5, *A4++
+||      mv     .s2x    A8, B5
+||      cmpltu .l2     4, B6, B0
+
+   [B0]         ldw    .d2t1   *B4++, A8
+||      stw    .d1t2   B5, *A4++
+||      mv     .s2x    A9, B5
+||      cmpltu .l2     0, B6, B0
+
+   [B0]         ldw    .d2t1   *B4++, A9
+||      stw    .d1t2   B5, *A4++
+||      mv     .s2x    A0, B5
+||      cmpltu .l2     B2, B7, B0
+
+       ;; loop back branch happens here
+
+        cmpltu .l2     B1, B6, B0
+||      ret    .s2     b3
+
+   [B0]         stw    .d1t1   A1, *A4++
+||      cmpltu .l2     12, B6, B0
+   [B0]         stw    .d1t1   A5, *A4++
+||      cmpltu .l2     8, B6, B0
+   [B0]         stw    .d1t1   A7, *A4++
+||      cmpltu .l2     4, B6, B0
+   [B0]         stw    .d1t1   A8, *A4++
+||      cmpltu .l2     0, B6, B0
+   [B0]         stw    .d1t1   A9, *A4++
+
+       ;; return happens here
+ENDPROC(__c6xabi_strasgi)
diff --git a/arch/c6x/lib/strasgi_64plus.S b/arch/c6x/lib/strasgi_64plus.S
new file mode 100644 (file)
index 0000000..c9fd159
--- /dev/null
@@ -0,0 +1,39 @@
+;;  Copyright 2010  Free Software Foundation, Inc.
+;;  Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+       .text
+
+ENTRY(__c6xabi_strasgi_64plus)
+       shru    .s2x    a6, 2, b31
+||     mv      .s1     a4, a30
+||     mv      .d2     b4, b30
+
+       add     .s2     -4, b31, b31
+
+       sploopd         1
+||     mvc     .s2     b31, ilc
+       ldw     .d2t2   *b30++, b31
+       nop     4
+       mv      .s1x    b31,a31
+       spkernel        6, 0
+||     stw     .d1t1   a31, *a30++
+
+       ret     .s2     b3
+       nop 5
+ENDPROC(__c6xabi_strasgi_64plus)
diff --git a/arch/c6x/mm/Makefile b/arch/c6x/mm/Makefile
new file mode 100644 (file)
index 0000000..136a975
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for the linux c6x-specific parts of the memory manager.
+#
+
+obj-y := init.o dma-coherent.o
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
new file mode 100644 (file)
index 0000000..4187e51
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot <aurelien.jacquiot@ti.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ *  DMA uncached mapping support.
+ *
+ *  Using code pulled from ARM
+ *  Copyright (C) 2000-2004 Russell King
+ *
+ */
+#include <linux/slab.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/memblock.h>
+
+#include <asm/page.h>
+
+/*
+ * DMA coherent memory management, can be redefined using the memdma=
+ * kernel command line
+ */
+
+/* none by default */
+static phys_addr_t dma_base;
+static u32 dma_size;
+static u32 dma_pages;
+
+static unsigned long *dma_bitmap;
+
+/* bitmap lock */
+static DEFINE_SPINLOCK(dma_lock);
+
+/*
+ * Return a DMA coherent and contiguous memory chunk from the DMA memory
+ */
+static inline u32 __alloc_dma_pages(int order)
+{
+       unsigned long flags;
+       u32 pos;
+
+       spin_lock_irqsave(&dma_lock, flags);
+       pos = bitmap_find_free_region(dma_bitmap, dma_pages, order);
+       spin_unlock_irqrestore(&dma_lock, flags);
+
+       return dma_base + (pos << PAGE_SHIFT);
+}
+
+static void __free_dma_pages(u32 addr, int order)
+{
+       unsigned long flags;
+       u32 pos = (addr - dma_base) >> PAGE_SHIFT;
+
+       if (addr < dma_base || (pos + (1 << order)) >= dma_pages) {
+               printk(KERN_ERR "%s: freeing outside range.\n", __func__);
+               BUG();
+       }
+
+       spin_lock_irqsave(&dma_lock, flags);
+       bitmap_release_region(dma_bitmap, pos, order);
+       spin_unlock_irqrestore(&dma_lock, flags);
+}
+
+/*
+ * Allocate DMA coherent memory space and return both the kernel
+ * virtual and DMA address for that space.
+ */
+void *dma_alloc_coherent(struct device *dev, size_t size,
+                        dma_addr_t *handle, gfp_t gfp)
+{
+       u32 paddr;
+       int order;
+
+       if (!dma_size || !size)
+               return NULL;
+
+       order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
+
+       paddr = __alloc_dma_pages(order);
+
+       if (handle)
+               *handle = paddr;
+
+       if (!paddr)
+               return NULL;
+
+       return phys_to_virt(paddr);
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+/*
+ * Free DMA coherent memory as defined by the above mapping.
+ */
+void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+                      dma_addr_t dma_handle)
+{
+       int order;
+
+       if (!dma_size || !size)
+               return;
+
+       order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
+
+       __free_dma_pages(virt_to_phys(vaddr), order);
+}
+EXPORT_SYMBOL(dma_free_coherent);
+
+/*
+ * Initialise the coherent DMA memory allocator using the given uncached region.
+ */
+void __init coherent_mem_init(phys_addr_t start, u32 size)
+{
+       phys_addr_t bitmap_phys;
+
+       if (!size)
+               return;
+
+       printk(KERN_INFO
+              "Coherent memory (DMA) region start=0x%x size=0x%x\n",
+              start, size);
+
+       dma_base = start;
+       dma_size = size;
+
+       /* allocate bitmap */
+       dma_pages = dma_size >> PAGE_SHIFT;
+       if (dma_size & (PAGE_SIZE - 1))
+               ++dma_pages;
+
+       bitmap_phys = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
+                                    sizeof(long));
+
+       dma_bitmap = phys_to_virt(bitmap_phys);
+       memset(dma_bitmap, 0, dma_pages * PAGE_SIZE);
+}
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
new file mode 100644 (file)
index 0000000..89395f0
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/module.h>
+#include <linux/bootmem.h>
+#ifdef CONFIG_BLK_DEV_RAM
+#include <linux/blkdev.h>
+#endif
+#include <linux/initrd.h>
+
+#include <asm/sections.h>
+
+/*
+ * ZERO_PAGE is a special page that is used for zero-initialized
+ * data and COW.
+ */
+unsigned long empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
+
+/*
+ * paging_init() continues the virtual memory environment setup which
+ * was begun by the code in arch/head.S.
+ * The parameters are pointers to where to stick the starting and ending
+ * addresses  of available kernel virtual memory.
+ */
+void __init paging_init(void)
+{
+       struct pglist_data *pgdat = NODE_DATA(0);
+       unsigned long zones_size[MAX_NR_ZONES] = {0, };
+
+       empty_zero_page      = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
+       memset((void *)empty_zero_page, 0, PAGE_SIZE);
+
+       /*
+        * Set up user data space
+        */
+       set_fs(KERNEL_DS);
+
+       /*
+        * Define zones
+        */
+       zones_size[ZONE_NORMAL] = (memory_end - PAGE_OFFSET) >> PAGE_SHIFT;
+       pgdat->node_zones[ZONE_NORMAL].zone_start_pfn =
+               __pa(PAGE_OFFSET) >> PAGE_SHIFT;
+
+       free_area_init(zones_size);
+}
+
+void __init mem_init(void)
+{
+       int codek, datak;
+       unsigned long tmp;
+       unsigned long len = memory_end - memory_start;
+
+       high_memory = (void *)(memory_end & PAGE_MASK);
+
+       /* this will put all memory onto the freelists */
+       totalram_pages = free_all_bootmem();
+
+       codek = (_etext - _stext) >> 10;
+       datak = (_end - _sdata) >> 10;
+
+       tmp = nr_free_pages() << PAGE_SHIFT;
+       printk(KERN_INFO "Memory: %luk/%luk RAM (%dk kernel code, %dk data)\n",
+              tmp >> 10, len >> 10, codek, datak);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init free_initrd_mem(unsigned long start, unsigned long end)
+{
+       int pages = 0;
+       for (; start < end; start += PAGE_SIZE) {
+               ClearPageReserved(virt_to_page(start));
+               init_page_count(virt_to_page(start));
+               free_page(start);
+               totalram_pages++;
+               pages++;
+       }
+       printk(KERN_INFO "Freeing initrd memory: %luk freed\n",
+              (pages * PAGE_SIZE) >> 10);
+}
+#endif
+
+void __init free_initmem(void)
+{
+       unsigned long addr;
+
+       /*
+        * The following code should be cool even if these sections
+        * are not page aligned.
+        */
+       addr = PAGE_ALIGN((unsigned long)(__init_begin));
+
+       /* next to check that the page we free is not a partial page */
+       for (; addr + PAGE_SIZE < (unsigned long)(__init_end);
+            addr += PAGE_SIZE) {
+               ClearPageReserved(virt_to_page(addr));
+               init_page_count(virt_to_page(addr));
+               free_page(addr);
+               totalram_pages++;
+       }
+       printk(KERN_INFO "Freeing unused kernel memory: %dK freed\n",
+              (int) ((addr - PAGE_ALIGN((long) &__init_begin)) >> 10));
+}
diff --git a/arch/c6x/platforms/Kconfig b/arch/c6x/platforms/Kconfig
new file mode 100644 (file)
index 0000000..401ee67
--- /dev/null
@@ -0,0 +1,16 @@
+
+config SOC_TMS320C6455
+       bool "TMS320C6455"
+       default n
+
+config SOC_TMS320C6457
+       bool "TMS320C6457"
+       default n
+
+config SOC_TMS320C6472
+       bool "TMS320C6472"
+       default n
+
+config SOC_TMS320C6474
+       bool "TMS320C6474"
+       default n
diff --git a/arch/c6x/platforms/Makefile b/arch/c6x/platforms/Makefile
new file mode 100644 (file)
index 0000000..9a95b9b
--- /dev/null
@@ -0,0 +1,12 @@
+#
+# Makefile for arch/c6x/platforms
+#
+# Copyright 2010, 2011 Texas Instruments Incorporated
+#
+
+obj-y = platform.o cache.o megamod-pic.o pll.o plldata.o timer64.o
+obj-y += dscr.o
+
+# SoC objects
+obj-$(CONFIG_SOC_TMS320C6455)   += emif.o
+obj-$(CONFIG_SOC_TMS320C6457)   += emif.o
diff --git a/arch/c6x/platforms/cache.c b/arch/c6x/platforms/cache.c
new file mode 100644 (file)
index 0000000..86318a1
--- /dev/null
@@ -0,0 +1,445 @@
+/*
+ *  Copyright (C) 2011 Texas Instruments Incorporated
+ *  Author: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include <asm/cache.h>
+#include <asm/soc.h>
+
+/*
+ * Internal Memory Control Registers for caches
+ */
+#define IMCR_CCFG        0x0000
+#define IMCR_L1PCFG      0x0020
+#define IMCR_L1PCC       0x0024
+#define IMCR_L1DCFG      0x0040
+#define IMCR_L1DCC       0x0044
+#define IMCR_L2ALLOC0    0x2000
+#define IMCR_L2ALLOC1    0x2004
+#define IMCR_L2ALLOC2    0x2008
+#define IMCR_L2ALLOC3    0x200c
+#define IMCR_L2WBAR      0x4000
+#define IMCR_L2WWC       0x4004
+#define IMCR_L2WIBAR     0x4010
+#define IMCR_L2WIWC      0x4014
+#define IMCR_L2IBAR      0x4018
+#define IMCR_L2IWC       0x401c
+#define IMCR_L1PIBAR     0x4020
+#define IMCR_L1PIWC      0x4024
+#define IMCR_L1DWIBAR    0x4030
+#define IMCR_L1DWIWC     0x4034
+#define IMCR_L1DWBAR     0x4040
+#define IMCR_L1DWWC      0x4044
+#define IMCR_L1DIBAR     0x4048
+#define IMCR_L1DIWC      0x404c
+#define IMCR_L2WB        0x5000
+#define IMCR_L2WBINV     0x5004
+#define IMCR_L2INV       0x5008
+#define IMCR_L1PINV      0x5028
+#define IMCR_L1DWB       0x5040
+#define IMCR_L1DWBINV    0x5044
+#define IMCR_L1DINV      0x5048
+#define IMCR_MAR_BASE    0x8000
+#define IMCR_MAR96_111   0x8180
+#define IMCR_MAR128_191   0x8200
+#define IMCR_MAR224_239   0x8380
+#define IMCR_L2MPFAR     0xa000
+#define IMCR_L2MPFSR     0xa004
+#define IMCR_L2MPFCR     0xa008
+#define IMCR_L2MPLK0     0xa100
+#define IMCR_L2MPLK1     0xa104
+#define IMCR_L2MPLK2     0xa108
+#define IMCR_L2MPLK3     0xa10c
+#define IMCR_L2MPLKCMD   0xa110
+#define IMCR_L2MPLKSTAT   0xa114
+#define IMCR_L2MPPA_BASE  0xa200
+#define IMCR_L1PMPFAR    0xa400
+#define IMCR_L1PMPFSR    0xa404
+#define IMCR_L1PMPFCR    0xa408
+#define IMCR_L1PMPLK0    0xa500
+#define IMCR_L1PMPLK1    0xa504
+#define IMCR_L1PMPLK2    0xa508
+#define IMCR_L1PMPLK3    0xa50c
+#define IMCR_L1PMPLKCMD   0xa510
+#define IMCR_L1PMPLKSTAT  0xa514
+#define IMCR_L1PMPPA_BASE 0xa600
+#define IMCR_L1DMPFAR    0xac00
+#define IMCR_L1DMPFSR    0xac04
+#define IMCR_L1DMPFCR    0xac08
+#define IMCR_L1DMPLK0    0xad00
+#define IMCR_L1DMPLK1    0xad04
+#define IMCR_L1DMPLK2    0xad08
+#define IMCR_L1DMPLK3    0xad0c
+#define IMCR_L1DMPLKCMD   0xad10
+#define IMCR_L1DMPLKSTAT  0xad14
+#define IMCR_L1DMPPA_BASE 0xae00
+#define IMCR_L2PDWAKE0   0xc040
+#define IMCR_L2PDWAKE1   0xc044
+#define IMCR_L2PDSLEEP0   0xc050
+#define IMCR_L2PDSLEEP1   0xc054
+#define IMCR_L2PDSTAT0   0xc060
+#define IMCR_L2PDSTAT1   0xc064
+
+/*
+ * CCFG register values and bits
+ */
+#define L2MODE_0K_CACHE   0x0
+#define L2MODE_32K_CACHE  0x1
+#define L2MODE_64K_CACHE  0x2
+#define L2MODE_128K_CACHE 0x3
+#define L2MODE_256K_CACHE 0x7
+
+#define L2PRIO_URGENT     0x0
+#define L2PRIO_HIGH       0x1
+#define L2PRIO_MEDIUM     0x2
+#define L2PRIO_LOW        0x3
+
+#define CCFG_ID           0x100   /* Invalidate L1P bit */
+#define CCFG_IP           0x200   /* Invalidate L1D bit */
+
+static void __iomem *cache_base;
+
+/*
+ * L1 & L2 caches generic functions
+ */
+#define imcr_get(reg) soc_readl(cache_base + (reg))
+#define imcr_set(reg, value) \
+do {                                                           \
+       soc_writel((value), cache_base + (reg));                \
+       soc_readl(cache_base + (reg));                          \
+} while (0)
+
+static void cache_block_operation_wait(unsigned int wc_reg)
+{
+       /* Wait for completion */
+       while (imcr_get(wc_reg))
+               cpu_relax();
+}
+
+static DEFINE_SPINLOCK(cache_lock);
+
+/*
+ * Generic function to perform a block cache operation as
+ * invalidate or writeback/invalidate
+ */
+static void cache_block_operation(unsigned int *start,
+                                 unsigned int *end,
+                                 unsigned int bar_reg,
+                                 unsigned int wc_reg)
+{
+       unsigned long flags;
+       unsigned int wcnt =
+               (L2_CACHE_ALIGN_CNT((unsigned int) end)
+                - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2;
+       unsigned int wc = 0;
+
+       for (; wcnt; wcnt -= wc, start += wc) {
+loop:
+               spin_lock_irqsave(&cache_lock, flags);
+
+               /*
+                * If another cache operation is occuring
+                */
+               if (unlikely(imcr_get(wc_reg))) {
+                       spin_unlock_irqrestore(&cache_lock, flags);
+
+                       /* Wait for previous operation completion */
+                       cache_block_operation_wait(wc_reg);
+
+                       /* Try again */
+                       goto loop;
+               }
+
+               imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start));
+
+               if (wcnt > 0xffff)
+                       wc = 0xffff;
+               else
+                       wc = wcnt;
+
+               /* Set word count value in the WC register */
+               imcr_set(wc_reg, wc & 0xffff);
+
+               spin_unlock_irqrestore(&cache_lock, flags);
+
+               /* Wait for completion */
+               cache_block_operation_wait(wc_reg);
+       }
+}
+
+static void cache_block_operation_nowait(unsigned int *start,
+                                        unsigned int *end,
+                                        unsigned int bar_reg,
+                                        unsigned int wc_reg)
+{
+       unsigned long flags;
+       unsigned int wcnt =
+               (L2_CACHE_ALIGN_CNT((unsigned int) end)
+                - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2;
+       unsigned int wc = 0;
+
+       for (; wcnt; wcnt -= wc, start += wc) {
+
+               spin_lock_irqsave(&cache_lock, flags);
+
+               imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start));
+
+               if (wcnt > 0xffff)
+                       wc = 0xffff;
+               else
+                       wc = wcnt;
+
+               /* Set word count value in the WC register */
+               imcr_set(wc_reg, wc & 0xffff);
+
+               spin_unlock_irqrestore(&cache_lock, flags);
+
+               /* Don't wait for completion on last cache operation */
+               if (wcnt > 0xffff)
+                       cache_block_operation_wait(wc_reg);
+       }
+}
+
+/*
+ * L1 caches management
+ */
+
+/*
+ * Disable L1 caches
+ */
+void L1_cache_off(void)
+{
+       unsigned int dummy;
+
+       imcr_set(IMCR_L1PCFG, 0);
+       dummy = imcr_get(IMCR_L1PCFG);
+
+       imcr_set(IMCR_L1DCFG, 0);
+       dummy = imcr_get(IMCR_L1DCFG);
+}
+
+/*
+ * Enable L1 caches
+ */
+void L1_cache_on(void)
+{
+       unsigned int dummy;
+
+       imcr_set(IMCR_L1PCFG, 7);
+       dummy = imcr_get(IMCR_L1PCFG);
+
+       imcr_set(IMCR_L1DCFG, 7);
+       dummy = imcr_get(IMCR_L1DCFG);
+}
+
+/*
+ *  L1P global-invalidate all
+ */
+void L1P_cache_global_invalidate(void)
+{
+       unsigned int set = 1;
+       imcr_set(IMCR_L1PINV, set);
+       while (imcr_get(IMCR_L1PINV) & 1)
+               cpu_relax();
+}
+
+/*
+ *  L1D global-invalidate all
+ *
+ * Warning: this operation causes all updated data in L1D to
+ * be discarded rather than written back to the lower levels of
+ * memory
+ */
+void L1D_cache_global_invalidate(void)
+{
+       unsigned int set = 1;
+       imcr_set(IMCR_L1DINV, set);
+       while (imcr_get(IMCR_L1DINV) & 1)
+               cpu_relax();
+}
+
+void L1D_cache_global_writeback(void)
+{
+       unsigned int set = 1;
+       imcr_set(IMCR_L1DWB, set);
+       while (imcr_get(IMCR_L1DWB) & 1)
+               cpu_relax();
+}
+
+void L1D_cache_global_writeback_invalidate(void)
+{
+       unsigned int set = 1;
+       imcr_set(IMCR_L1DWBINV, set);
+       while (imcr_get(IMCR_L1DWBINV) & 1)
+               cpu_relax();
+}
+
+/*
+ * L2 caches management
+ */
+
+/*
+ * Set L2 operation mode
+ */
+void L2_cache_set_mode(unsigned int mode)
+{
+       unsigned int ccfg = imcr_get(IMCR_CCFG);
+
+       /* Clear and set the L2MODE bits in CCFG */
+       ccfg &= ~7;
+       ccfg |= (mode & 7);
+       imcr_set(IMCR_CCFG, ccfg);
+       ccfg = imcr_get(IMCR_CCFG);
+}
+
+/*
+ *  L2 global-writeback and global-invalidate all
+ */
+void L2_cache_global_writeback_invalidate(void)
+{
+       imcr_set(IMCR_L2WBINV, 1);
+       while (imcr_get(IMCR_L2WBINV))
+               cpu_relax();
+}
+
+/*
+ *  L2 global-writeback all
+ */
+void L2_cache_global_writeback(void)
+{
+       imcr_set(IMCR_L2WB, 1);
+       while (imcr_get(IMCR_L2WB))
+               cpu_relax();
+}
+
+/*
+ * Cacheability controls
+ */
+void enable_caching(unsigned long start, unsigned long end)
+{
+       unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2);
+       unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2);
+
+       for (; mar <= mar_e; mar += 4)
+               imcr_set(mar, imcr_get(mar) | 1);
+}
+
+void disable_caching(unsigned long start, unsigned long end)
+{
+       unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2);
+       unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2);
+
+       for (; mar <= mar_e; mar += 4)
+               imcr_set(mar, imcr_get(mar) & ~1);
+}
+
+
+/*
+ *  L1 block operations
+ */
+void L1P_cache_block_invalidate(unsigned int start, unsigned int end)
+{
+       cache_block_operation((unsigned int *) start,
+                             (unsigned int *) end,
+                             IMCR_L1PIBAR, IMCR_L1PIWC);
+}
+
+void L1D_cache_block_invalidate(unsigned int start, unsigned int end)
+{
+       cache_block_operation((unsigned int *) start,
+                             (unsigned int *) end,
+                             IMCR_L1DIBAR, IMCR_L1DIWC);
+}
+
+void L1D_cache_block_writeback_invalidate(unsigned int start, unsigned int end)
+{
+       cache_block_operation((unsigned int *) start,
+                             (unsigned int *) end,
+                             IMCR_L1DWIBAR, IMCR_L1DWIWC);
+}
+
+void L1D_cache_block_writeback(unsigned int start, unsigned int end)
+{
+       cache_block_operation((unsigned int *) start,
+                             (unsigned int *) end,
+                             IMCR_L1DWBAR, IMCR_L1DWWC);
+}
+
+/*
+ *  L2 block operations
+ */
+void L2_cache_block_invalidate(unsigned int start, unsigned int end)
+{
+       cache_block_operation((unsigned int *) start,
+                             (unsigned int *) end,
+                             IMCR_L2IBAR, IMCR_L2IWC);
+}
+
+void L2_cache_block_writeback(unsigned int start, unsigned int end)
+{
+       cache_block_operation((unsigned int *) start,
+                             (unsigned int *) end,
+                             IMCR_L2WBAR, IMCR_L2WWC);
+}
+
+void L2_cache_block_writeback_invalidate(unsigned int start, unsigned int end)
+{
+       cache_block_operation((unsigned int *) start,
+                             (unsigned int *) end,
+                             IMCR_L2WIBAR, IMCR_L2WIWC);
+}
+
+void L2_cache_block_invalidate_nowait(unsigned int start, unsigned int end)
+{
+       cache_block_operation_nowait((unsigned int *) start,
+                                    (unsigned int *) end,
+                                    IMCR_L2IBAR, IMCR_L2IWC);
+}
+
+void L2_cache_block_writeback_nowait(unsigned int start, unsigned int end)
+{
+       cache_block_operation_nowait((unsigned int *) start,
+                                    (unsigned int *) end,
+                                    IMCR_L2WBAR, IMCR_L2WWC);
+}
+
+void L2_cache_block_writeback_invalidate_nowait(unsigned int start,
+                                               unsigned int end)
+{
+       cache_block_operation_nowait((unsigned int *) start,
+                                    (unsigned int *) end,
+                                    IMCR_L2WIBAR, IMCR_L2WIWC);
+}
+
+
+/*
+ * L1 and L2 caches configuration
+ */
+void __init c6x_cache_init(void)
+{
+       struct device_node *node;
+
+       node = of_find_compatible_node(NULL, NULL, "ti,c64x+cache");
+       if (!node)
+               return;
+
+       cache_base = of_iomap(node, 0);
+
+       of_node_put(node);
+
+       if (!cache_base)
+               return;
+
+       /* Set L2 caches on the the whole L2 SRAM memory */
+       L2_cache_set_mode(L2MODE_SIZE);
+
+       /* Enable L1 */
+       L1_cache_on();
+}
diff --git a/arch/c6x/platforms/dscr.c b/arch/c6x/platforms/dscr.c
new file mode 100644 (file)
index 0000000..f848a65
--- /dev/null
@@ -0,0 +1,598 @@
+/*
+ *  Device State Control Registers driver
+ *
+ *  Copyright (C) 2011 Texas Instruments Incorporated
+ *  Author: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+/*
+ * The Device State Control Registers (DSCR) provide SoC level control over
+ * a number of peripherals. Details vary considerably among the various SoC
+ * parts. In general, the DSCR block will provide one or more configuration
+ * registers often protected by a lock register. One or more key values must
+ * be written to a lock register in order to unlock the configuration register.
+ * The configuration register may be used to enable (and disable in some
+ * cases) SoC pin drivers, peripheral clock sources (internal or pin), etc.
+ * In some cases, a configuration register is write once or the individual
+ * bits are write once. That is, you may be able to enable a device, but
+ * will not be able to disable it.
+ *
+ * In addition to device configuration, the DSCR block may provide registers
+ * which are used to reset SoC peripherals, provide device ID information,
+ * provide MAC addresses, and other miscellaneous functions.
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <asm/soc.h>
+#include <asm/dscr.h>
+
+#define MAX_DEVSTATE_IDS   32
+#define MAX_DEVCTL_REGS     8
+#define MAX_DEVSTAT_REGS    8
+#define MAX_LOCKED_REGS     4
+#define MAX_SOC_EMACS       2
+
+struct rmii_reset_reg {
+       u32 reg;
+       u32 mask;
+};
+
+/*
+ * Some registerd may be locked. In order to write to these
+ * registers, the key value must first be written to the lockreg.
+ */
+struct locked_reg {
+       u32 reg;        /* offset from base */
+       u32 lockreg;    /* offset from base */
+       u32 key;        /* unlock key */
+};
+
+/*
+ * This describes a contiguous area of like control bits used to enable/disable
+ * SoC devices. Each controllable device is given an ID which is used by the
+ * individual device drivers to control the device state. These IDs start at
+ * zero and are assigned sequentially to the control bitfield ranges described
+ * by this structure.
+ */
+struct devstate_ctl_reg {
+       u32 reg;                /* register holding the control bits */
+       u8  start_id;           /* start id of this range */
+       u8  num_ids;            /* number of devices in this range */
+       u8  enable_only;        /* bits are write-once to enable only */
+       u8  enable;             /* value used to enable device */
+       u8  disable;            /* value used to disable device */
+       u8  shift;              /* starting (rightmost) bit in range */
+       u8  nbits;              /* number of bits per device */
+};
+
+
+/*
+ * This describes a region of status bits indicating the state of
+ * various devices. This is used internally to wait for status
+ * change completion when enabling/disabling a device. Status is
+ * optional and not all device controls will have a corresponding
+ * status.
+ */
+struct devstate_stat_reg {
+       u32 reg;                /* register holding the status bits */
+       u8  start_id;           /* start id of this range */
+       u8  num_ids;            /* number of devices in this range */
+       u8  enable;             /* value indicating enabled state */
+       u8  disable;            /* value indicating disabled state */
+       u8  shift;              /* starting (rightmost) bit in range */
+       u8  nbits;              /* number of bits per device */
+};
+
+struct devstate_info {
+       struct devstate_ctl_reg *ctl;
+       struct devstate_stat_reg *stat;
+};
+
+/* These are callbacks to SOC-specific code. */
+struct dscr_ops {
+       void (*init)(struct device_node *node);
+};
+
+struct dscr_regs {
+       spinlock_t              lock;
+       void __iomem            *base;
+       u32                     kick_reg[2];
+       u32                     kick_key[2];
+       struct locked_reg       locked[MAX_LOCKED_REGS];
+       struct devstate_info    devstate_info[MAX_DEVSTATE_IDS];
+       struct rmii_reset_reg   rmii_resets[MAX_SOC_EMACS];
+       struct devstate_ctl_reg devctl[MAX_DEVCTL_REGS];
+       struct devstate_stat_reg devstat[MAX_DEVSTAT_REGS];
+};
+
+static struct dscr_regs        dscr;
+
+static struct locked_reg *find_locked_reg(u32 reg)
+{
+       int i;
+
+       for (i = 0; i < MAX_LOCKED_REGS; i++)
+               if (dscr.locked[i].key && reg == dscr.locked[i].reg)
+                       return &dscr.locked[i];
+       return NULL;
+}
+
+/*
+ * Write to a register with one lock
+ */
+static void dscr_write_locked1(u32 reg, u32 val,
+                              u32 lock, u32 key)
+{
+       void __iomem *reg_addr = dscr.base + reg;
+       void __iomem *lock_addr = dscr.base + lock;
+
+       /*
+        * For some registers, the lock is relocked after a short number
+        * of cycles. We have to put the lock write and register write in
+        * the same fetch packet to meet this timing. The .align ensures
+        * the two stw instructions are in the same fetch packet.
+        */
+       asm volatile ("b        .s2     0f\n"
+                     "nop      5\n"
+                     "    .align 5\n"
+                     "0:\n"
+                     "stw      .D1T2   %3,*%2\n"
+                     "stw      .D1T2   %1,*%0\n"
+                     :
+                     : "a"(reg_addr), "b"(val), "a"(lock_addr), "b"(key)
+               );
+
+       /* in case the hw doesn't reset the lock */
+       soc_writel(0, lock_addr);
+}
+
+/*
+ * Write to a register protected by two lock registers
+ */
+static void dscr_write_locked2(u32 reg, u32 val,
+                              u32 lock0, u32 key0,
+                              u32 lock1, u32 key1)
+{
+       soc_writel(key0, dscr.base + lock0);
+       soc_writel(key1, dscr.base + lock1);
+       soc_writel(val, dscr.base + reg);
+       soc_writel(0, dscr.base + lock0);
+       soc_writel(0, dscr.base + lock1);
+}
+
+static void dscr_write(u32 reg, u32 val)
+{
+       struct locked_reg *lock;
+
+       lock = find_locked_reg(reg);
+       if (lock)
+               dscr_write_locked1(reg, val, lock->lockreg, lock->key);
+       else if (dscr.kick_key[0])
+               dscr_write_locked2(reg, val, dscr.kick_reg[0], dscr.kick_key[0],
+                                  dscr.kick_reg[1], dscr.kick_key[1]);
+       else
+               soc_writel(val, dscr.base + reg);
+}
+
+
+/*
+ * Drivers can use this interface to enable/disable SoC IP blocks.
+ */
+void dscr_set_devstate(int id, enum dscr_devstate_t state)
+{
+       struct devstate_ctl_reg *ctl;
+       struct devstate_stat_reg *stat;
+       struct devstate_info *info;
+       u32 ctl_val, val;
+       int ctl_shift, ctl_mask;
+       unsigned long flags;
+
+       if (!dscr.base)
+               return;
+
+       if (id < 0 || id >= MAX_DEVSTATE_IDS)
+               return;
+
+       info = &dscr.devstate_info[id];
+       ctl = info->ctl;
+       stat = info->stat;
+
+       if (ctl == NULL)
+               return;
+
+       ctl_shift = ctl->shift + ctl->nbits * (id - ctl->start_id);
+       ctl_mask = ((1 << ctl->nbits) - 1) << ctl_shift;
+
+       switch (state) {
+       case DSCR_DEVSTATE_ENABLED:
+               ctl_val = ctl->enable << ctl_shift;
+               break;
+       case DSCR_DEVSTATE_DISABLED:
+               if (ctl->enable_only)
+                       return;
+               ctl_val = ctl->disable << ctl_shift;
+               break;
+       default:
+               return;
+       }
+
+       spin_lock_irqsave(&dscr.lock, flags);
+
+       val = soc_readl(dscr.base + ctl->reg);
+       val &= ~ctl_mask;
+       val |= ctl_val;
+
+       dscr_write(ctl->reg, val);
+
+       spin_unlock_irqrestore(&dscr.lock, flags);
+
+       if (!stat)
+               return;
+
+       ctl_shift = stat->shift + stat->nbits * (id - stat->start_id);
+
+       if (state == DSCR_DEVSTATE_ENABLED)
+               ctl_val = stat->enable;
+       else
+               ctl_val = stat->disable;
+
+       do {
+               val = soc_readl(dscr.base + stat->reg);
+               val >>= ctl_shift;
+               val &= ((1 << stat->nbits) - 1);
+       } while (val != ctl_val);
+}
+EXPORT_SYMBOL(dscr_set_devstate);
+
+/*
+ * Drivers can use this to reset RMII module.
+ */
+void dscr_rmii_reset(int id, int assert)
+{
+       struct rmii_reset_reg *r;
+       unsigned long flags;
+       u32 val;
+
+       if (id < 0 || id >= MAX_SOC_EMACS)
+               return;
+
+       r = &dscr.rmii_resets[id];
+       if (r->mask == 0)
+               return;
+
+       spin_lock_irqsave(&dscr.lock, flags);
+
+       val = soc_readl(dscr.base + r->reg);
+       if (assert)
+               dscr_write(r->reg, val | r->mask);
+       else
+               dscr_write(r->reg, val & ~(r->mask));
+
+       spin_unlock_irqrestore(&dscr.lock, flags);
+}
+EXPORT_SYMBOL(dscr_rmii_reset);
+
+static void __init dscr_parse_devstat(struct device_node *node,
+                                     void __iomem *base)
+{
+       u32 val;
+       int err;
+
+       err = of_property_read_u32_array(node, "ti,dscr-devstat", &val, 1);
+       if (!err)
+               c6x_devstat = soc_readl(base + val);
+       printk(KERN_INFO "DEVSTAT: %08x\n", c6x_devstat);
+}
+
+static void __init dscr_parse_silicon_rev(struct device_node *node,
+                                        void __iomem *base)
+{
+       u32 vals[3];
+       int err;
+
+       err = of_property_read_u32_array(node, "ti,dscr-silicon-rev", vals, 3);
+       if (!err) {
+               c6x_silicon_rev = soc_readl(base + vals[0]);
+               c6x_silicon_rev >>= vals[1];
+               c6x_silicon_rev &= vals[2];
+       }
+}
+
+/*
+ * Some SoCs will have a pair of fuse registers which hold
+ * an ethernet MAC address. The "ti,dscr-mac-fuse-regs"
+ * property is a mapping from fuse register bytes to MAC
+ * address bytes. The expected format is:
+ *
+ *     ti,dscr-mac-fuse-regs = <reg0 b3 b2 b1 b0
+ *                              reg1 b3 b2 b1 b0>
+ *
+ * reg0 and reg1 are the offsets of the two fuse registers.
+ * b3-b0 positionally represent bytes within the fuse register.
+ * b3 is the most significant byte and b0 is the least.
+ * Allowable values for b3-b0 are:
+ *
+ *       0 = fuse register byte not used in MAC address
+ *      1-6 = index+1 into c6x_fuse_mac[]
+ */
+static void __init dscr_parse_mac_fuse(struct device_node *node,
+                                      void __iomem *base)
+{
+       u32 vals[10], fuse;
+       int f, i, j, err;
+
+       err = of_property_read_u32_array(node, "ti,dscr-mac-fuse-regs",
+                                        vals, 10);
+       if (err)
+               return;
+
+       for (f = 0; f < 2; f++) {
+               fuse = soc_readl(base + vals[f * 5]);
+               for (j = (f * 5) + 1, i = 24; i >= 0; i -= 8, j++)
+                       if (vals[j] && vals[j] <= 6)
+                               c6x_fuse_mac[vals[j] - 1] = fuse >> i;
+       }
+}
+
+static void __init dscr_parse_rmii_resets(struct device_node *node,
+                                         void __iomem *base)
+{
+       const __be32 *p;
+       int i, size;
+
+       /* look for RMII reset registers */
+       p = of_get_property(node, "ti,dscr-rmii-resets", &size);
+       if (p) {
+               /* parse all the reg/mask pairs we can handle */
+               size /= (sizeof(*p) * 2);
+               if (size > MAX_SOC_EMACS)
+                       size = MAX_SOC_EMACS;
+
+               for (i = 0; i < size; i++) {
+                       dscr.rmii_resets[i].reg = be32_to_cpup(p++);
+                       dscr.rmii_resets[i].mask = be32_to_cpup(p++);
+               }
+       }
+}
+
+
+static void __init dscr_parse_privperm(struct device_node *node,
+                                      void __iomem *base)
+{
+       u32 vals[2];
+       int err;
+
+       err = of_property_read_u32_array(node, "ti,dscr-privperm", vals, 2);
+       if (err)
+               return;
+       dscr_write(vals[0], vals[1]);
+}
+
+/*
+ * SoCs may have "locked" DSCR registers which can only be written
+ * to only after writing a key value to a lock registers. These
+ * regisers can be described with the "ti,dscr-locked-regs" property.
+ * This property provides a list of register descriptions with each
+ * description consisting of three values.
+ *
+ *     ti,dscr-locked-regs = <reg0 lockreg0 key0
+ *                               ...
+ *                             regN lockregN keyN>;
+ *
+ * reg is the offset of the locked register
+ * lockreg is the offset of the lock register
+ * key is the unlock key written to lockreg
+ *
+ */
+static void __init dscr_parse_locked_regs(struct device_node *node,
+                                         void __iomem *base)
+{
+       struct locked_reg *r;
+       const __be32 *p;
+       int i, size;
+
+       p = of_get_property(node, "ti,dscr-locked-regs", &size);
+       if (p) {
+               /* parse all the register descriptions we can handle */
+               size /= (sizeof(*p) * 3);
+               if (size > MAX_LOCKED_REGS)
+                       size = MAX_LOCKED_REGS;
+
+               for (i = 0; i < size; i++) {
+                       r = &dscr.locked[i];
+
+                       r->reg = be32_to_cpup(p++);
+                       r->lockreg = be32_to_cpup(p++);
+                       r->key = be32_to_cpup(p++);
+               }
+       }
+}
+
+/*
+ * SoCs may have DSCR registers which are only write enabled after
+ * writing specific key values to two registers. The two key registers
+ * and the key values can be parsed from a "ti,dscr-kick-regs"
+ * propety with the following layout:
+ *
+ *     ti,dscr-kick-regs = <kickreg0 key0 kickreg1 key1>
+ *
+ * kickreg is the offset of the "kick" register
+ * key is the value which unlocks writing for protected regs
+ */
+static void __init dscr_parse_kick_regs(struct device_node *node,
+                                       void __iomem *base)
+{
+       u32 vals[4];
+       int err;
+
+       err = of_property_read_u32_array(node, "ti,dscr-kick-regs", vals, 4);
+       if (!err) {
+               dscr.kick_reg[0] = vals[0];
+               dscr.kick_key[0] = vals[1];
+               dscr.kick_reg[1] = vals[2];
+               dscr.kick_key[1] = vals[3];
+       }
+}
+
+
+/*
+ * SoCs may provide controls to enable/disable individual IP blocks. These
+ * controls in the DSCR usually control pin drivers but also may control
+ * clocking and or resets. The device tree is used to describe the bitfields
+ * in registers used to control device state. The number of bits and their
+ * values may vary even within the same register.
+ *
+ * The layout of these bitfields is described by the ti,dscr-devstate-ctl-regs
+ * property. This property is a list where each element describes a contiguous
+ * range of control fields with like properties. Each element of the list
+ * consists of 7 cells with the following values:
+ *
+ *   start_id num_ids reg enable disable start_bit nbits
+ *
+ * start_id is device id for the first device control in the range
+ * num_ids is the number of device controls in the range
+ * reg is the offset of the register holding the control bits
+ * enable is the value to enable a device
+ * disable is the value to disable a device (0xffffffff if cannot disable)
+ * start_bit is the bit number of the first bit in the range
+ * nbits is the number of bits per device control
+ */
+static void __init dscr_parse_devstate_ctl_regs(struct device_node *node,
+                                               void __iomem *base)
+{
+       struct devstate_ctl_reg *r;
+       const __be32 *p;
+       int i, j, size;
+
+       p = of_get_property(node, "ti,dscr-devstate-ctl-regs", &size);
+       if (p) {
+               /* parse all the ranges we can handle */
+               size /= (sizeof(*p) * 7);
+               if (size > MAX_DEVCTL_REGS)
+                       size = MAX_DEVCTL_REGS;
+
+               for (i = 0; i < size; i++) {
+                       r = &dscr.devctl[i];
+
+                       r->start_id = be32_to_cpup(p++);
+                       r->num_ids = be32_to_cpup(p++);
+                       r->reg = be32_to_cpup(p++);
+                       r->enable = be32_to_cpup(p++);
+                       r->disable = be32_to_cpup(p++);
+                       if (r->disable == 0xffffffff)
+                               r->enable_only = 1;
+                       r->shift = be32_to_cpup(p++);
+                       r->nbits = be32_to_cpup(p++);
+
+                       for (j = r->start_id;
+                            j < (r->start_id + r->num_ids);
+                            j++)
+                               dscr.devstate_info[j].ctl = r;
+               }
+       }
+}
+
+/*
+ * SoCs may provide status registers indicating the state (enabled/disabled) of
+ * devices on the SoC. The device tree is used to describe the bitfields in
+ * registers used to provide device status. The number of bits and their
+ * values used to provide status may vary even within the same register.
+ *
+ * The layout of these bitfields is described by the ti,dscr-devstate-stat-regs
+ * property. This property is a list where each element describes a contiguous
+ * range of status fields with like properties. Each element of the list
+ * consists of 7 cells with the following values:
+ *
+ *   start_id num_ids reg enable disable start_bit nbits
+ *
+ * start_id is device id for the first device status in the range
+ * num_ids is the number of devices covered by the range
+ * reg is the offset of the register holding the status bits
+ * enable is the value indicating device is enabled
+ * disable is the value indicating device is disabled
+ * start_bit is the bit number of the first bit in the range
+ * nbits is the number of bits per device status
+ */
+static void __init dscr_parse_devstate_stat_regs(struct device_node *node,
+                                                void __iomem *base)
+{
+       struct devstate_stat_reg *r;
+       const __be32 *p;
+       int i, j, size;
+
+       p = of_get_property(node, "ti,dscr-devstate-stat-regs", &size);
+       if (p) {
+               /* parse all the ranges we can handle */
+               size /= (sizeof(*p) * 7);
+               if (size > MAX_DEVSTAT_REGS)
+                       size = MAX_DEVSTAT_REGS;
+
+               for (i = 0; i < size; i++) {
+                       r = &dscr.devstat[i];
+
+                       r->start_id = be32_to_cpup(p++);
+                       r->num_ids = be32_to_cpup(p++);
+                       r->reg = be32_to_cpup(p++);
+                       r->enable = be32_to_cpup(p++);
+                       r->disable = be32_to_cpup(p++);
+                       r->shift = be32_to_cpup(p++);
+                       r->nbits = be32_to_cpup(p++);
+
+                       for (j = r->start_id;
+                            j < (r->start_id + r->num_ids);
+                            j++)
+                               dscr.devstate_info[j].stat = r;
+               }
+       }
+}
+
+static struct of_device_id dscr_ids[] __initdata = {
+       { .compatible = "ti,c64x+dscr" },
+       {}
+};
+
+/*
+ * Probe for DSCR area.
+ *
+ * This has to be done early on in case timer or interrupt controller
+ * needs something. e.g. On C6455 SoC, timer must be enabled through
+ * DSCR before it is functional.
+ */
+void __init dscr_probe(void)
+{
+       struct device_node *node;
+       void __iomem *base;
+
+       spin_lock_init(&dscr.lock);
+
+       node = of_find_matching_node(NULL, dscr_ids);
+       if (!node)
+               return;
+
+       base = of_iomap(node, 0);
+       if (!base) {
+               of_node_put(node);
+               return;
+       }
+
+       dscr.base = base;
+
+       dscr_parse_devstat(node, base);
+       dscr_parse_silicon_rev(node, base);
+       dscr_parse_mac_fuse(node, base);
+       dscr_parse_rmii_resets(node, base);
+       dscr_parse_locked_regs(node, base);
+       dscr_parse_kick_regs(node, base);
+       dscr_parse_devstate_ctl_regs(node, base);
+       dscr_parse_devstate_stat_regs(node, base);
+       dscr_parse_privperm(node, base);
+}
diff --git a/arch/c6x/platforms/emif.c b/arch/c6x/platforms/emif.c
new file mode 100644 (file)
index 0000000..8b564de
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ *  External Memory Interface
+ *
+ *  Copyright (C) 2011 Texas Instruments Incorporated
+ *  Author: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <asm/soc.h>
+#include <asm/dscr.h>
+
+#define NUM_EMIFA_CHIP_ENABLES 4
+
+struct emifa_regs {
+       u32     midr;
+       u32     stat;
+       u32     reserved1[6];
+       u32     bprio;
+       u32     reserved2[23];
+       u32     cecfg[NUM_EMIFA_CHIP_ENABLES];
+       u32     reserved3[4];
+       u32     awcc;
+       u32     reserved4[7];
+       u32     intraw;
+       u32     intmsk;
+       u32     intmskset;
+       u32     intmskclr;
+};
+
+static struct of_device_id emifa_match[] __initdata = {
+       { .compatible = "ti,c64x+emifa" },
+       {}
+};
+
+/*
+ * Parse device tree for existence of an EMIF (External Memory Interface)
+ * and initialize it if found.
+ */
+static int __init c6x_emifa_init(void)
+{
+       struct emifa_regs __iomem *regs;
+       struct device_node *node;
+       const __be32 *p;
+       u32 val;
+       int i, len, err;
+
+       node = of_find_matching_node(NULL, emifa_match);
+       if (!node)
+               return 0;
+
+       regs = of_iomap(node, 0);
+       if (!regs)
+               return 0;
+
+       /* look for a dscr-based enable for emifa pin buffers */
+       err = of_property_read_u32_array(node, "ti,dscr-dev-enable", &val, 1);
+       if (!err)
+               dscr_set_devstate(val, DSCR_DEVSTATE_ENABLED);
+
+       /* set up the chip enables */
+       p = of_get_property(node, "ti,emifa-ce-config", &len);
+       if (p) {
+               len /= sizeof(u32);
+               if (len > NUM_EMIFA_CHIP_ENABLES)
+                       len = NUM_EMIFA_CHIP_ENABLES;
+               for (i = 0; i <= len; i++)
+                       soc_writel(be32_to_cpup(&p[i]), &regs->cecfg[i]);
+       }
+
+       err = of_property_read_u32_array(node, "ti,emifa-burst-priority", &val, 1);
+       if (!err)
+               soc_writel(val, &regs->bprio);
+
+       err = of_property_read_u32_array(node, "ti,emifa-async-wait-control", &val, 1);
+       if (!err)
+               soc_writel(val, &regs->awcc);
+
+       iounmap(regs);
+       of_node_put(node);
+       return 0;
+}
+pure_initcall(c6x_emifa_init);
diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c
new file mode 100644 (file)
index 0000000..7c37a94
--- /dev/null
@@ -0,0 +1,349 @@
+/*
+ *  Support for C64x+ Megamodule Interrupt Controller
+ *
+ *  Copyright (C) 2010, 2011 Texas Instruments Incorporated
+ *  Contributed by: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <asm/soc.h>
+#include <asm/megamod-pic.h>
+
+#define NR_COMBINERS   4
+#define NR_MUX_OUTPUTS  12
+
+#define IRQ_UNMAPPED 0xffff
+
+/*
+ * Megamodule Interrupt Controller register layout
+ */
+struct megamod_regs {
+       u32     evtflag[8];
+       u32     evtset[8];
+       u32     evtclr[8];
+       u32     reserved0[8];
+       u32     evtmask[8];
+       u32     mevtflag[8];
+       u32     expmask[8];
+       u32     mexpflag[8];
+       u32     intmux_unused;
+       u32     intmux[7];
+       u32     reserved1[8];
+       u32     aegmux[2];
+       u32     reserved2[14];
+       u32     intxstat;
+       u32     intxclr;
+       u32     intdmask;
+       u32     reserved3[13];
+       u32     evtasrt;
+};
+
+struct megamod_pic {
+       struct irq_host *irqhost;
+       struct megamod_regs __iomem *regs;
+       raw_spinlock_t lock;
+
+       /* hw mux mapping */
+       unsigned int output_to_irq[NR_MUX_OUTPUTS];
+};
+
+static struct megamod_pic *mm_pic;
+
+struct megamod_cascade_data {
+       struct megamod_pic *pic;
+       int index;
+};
+
+static struct megamod_cascade_data cascade_data[NR_COMBINERS];
+
+static void mask_megamod(struct irq_data *data)
+{
+       struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
+       irq_hw_number_t src = irqd_to_hwirq(data);
+       u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
+
+       raw_spin_lock(&pic->lock);
+       soc_writel(soc_readl(evtmask) | (1 << (src & 31)), evtmask);
+       raw_spin_unlock(&pic->lock);
+}
+
+static void unmask_megamod(struct irq_data *data)
+{
+       struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
+       irq_hw_number_t src = irqd_to_hwirq(data);
+       u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
+
+       raw_spin_lock(&pic->lock);
+       soc_writel(soc_readl(evtmask) & ~(1 << (src & 31)), evtmask);
+       raw_spin_unlock(&pic->lock);
+}
+
+static struct irq_chip megamod_chip = {
+       .name           = "megamod",
+       .irq_mask       = mask_megamod,
+       .irq_unmask     = unmask_megamod,
+};
+
+static void megamod_irq_cascade(unsigned int irq, struct irq_desc *desc)
+{
+       struct megamod_cascade_data *cascade;
+       struct megamod_pic *pic;
+       u32 events;
+       int n, idx;
+
+       cascade = irq_desc_get_handler_data(desc);
+
+       pic = cascade->pic;
+       idx = cascade->index;
+
+       while ((events = soc_readl(&pic->regs->mevtflag[idx])) != 0) {
+               n = __ffs(events);
+
+               irq = irq_linear_revmap(pic->irqhost, idx * 32 + n);
+
+               soc_writel(1 << n, &pic->regs->evtclr[idx]);
+
+               generic_handle_irq(irq);
+       }
+}
+
+static int megamod_map(struct irq_host *h, unsigned int virq,
+                      irq_hw_number_t hw)
+{
+       struct megamod_pic *pic = h->host_data;
+       int i;
+
+       /* We shouldn't see a hwirq which is muxed to core controller */
+       for (i = 0; i < NR_MUX_OUTPUTS; i++)
+               if (pic->output_to_irq[i] == hw)
+                       return -1;
+
+       irq_set_chip_data(virq, pic);
+       irq_set_chip_and_handler(virq, &megamod_chip, handle_level_irq);
+
+       /* Set default irq type */
+       irq_set_irq_type(virq, IRQ_TYPE_NONE);
+
+       return 0;
+}
+
+static int megamod_xlate(struct irq_host *h, struct device_node *ct,
+                        const u32 *intspec, unsigned int intsize,
+                        irq_hw_number_t *out_hwirq, unsigned int *out_type)
+
+{
+       /* megamod intspecs must have 1 cell */
+       BUG_ON(intsize != 1);
+       *out_hwirq = intspec[0];
+       *out_type = IRQ_TYPE_NONE;
+       return 0;
+}
+
+static struct irq_host_ops megamod_host_ops = {
+       .map    = megamod_map,
+       .xlate  = megamod_xlate,
+};
+
+static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output)
+{
+       int index, offset;
+       u32 val;
+
+       if (src < 0 || src >= (NR_COMBINERS * 32)) {
+               pic->output_to_irq[output] = IRQ_UNMAPPED;
+               return;
+       }
+
+       /* four mappings per mux register */
+       index = output / 4;
+       offset = (output & 3) * 8;
+
+       val = soc_readl(&pic->regs->intmux[index]);
+       val &= ~(0xff << offset);
+       val |= src << offset;
+       soc_writel(val, &pic->regs->intmux[index]);
+}
+
+/*
+ * Parse the MUX mapping, if one exists.
+ *
+ * The MUX map is an array of up to 12 cells; one for each usable core priority
+ * interrupt. The value of a given cell is the megamodule interrupt source
+ * which is to me MUXed to the output corresponding to the cell position
+ * withing the array. The first cell in the array corresponds to priority
+ * 4 and the last (12th) cell corresponds to priority 15. The allowed
+ * values are 4 - ((NR_COMBINERS * 32) - 1). Note that the combined interrupt
+ * sources (0 - 3) are not allowed to be mapped through this property. They
+ * are handled through the "interrupts" property. This allows us to use a
+ * value of zero as a "do not map" placeholder.
+ */
+static void __init parse_priority_map(struct megamod_pic *pic,
+                                     int *mapping, int size)
+{
+       struct device_node *np = pic->irqhost->of_node;
+       const __be32 *map;
+       int i, maplen;
+       u32 val;
+
+       map = of_get_property(np, "ti,c64x+megamod-pic-mux", &maplen);
+       if (map) {
+               maplen /= 4;
+               if (maplen > size)
+                       maplen = size;
+
+               for (i = 0; i < maplen; i++) {
+                       val = be32_to_cpup(map);
+                       if (val && val >= 4)
+                               mapping[i] = val;
+                       ++map;
+               }
+       }
+}
+
+static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
+{
+       struct megamod_pic *pic;
+       int i, irq;
+       int mapping[NR_MUX_OUTPUTS];
+
+       pr_info("Initializing C64x+ Megamodule PIC\n");
+
+       pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL);
+       if (!pic) {
+               pr_err("%s: Could not alloc PIC structure.\n", np->full_name);
+               return NULL;
+       }
+
+       pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR,
+                                     NR_COMBINERS * 32, &megamod_host_ops,
+                                     IRQ_UNMAPPED);
+       if (!pic->irqhost) {
+               pr_err("%s: Could not alloc host.\n", np->full_name);
+               goto error_free;
+       }
+
+       pic->irqhost->host_data = pic;
+
+       raw_spin_lock_init(&pic->lock);
+
+       pic->regs = of_iomap(np, 0);
+       if (!pic->regs) {
+               pr_err("%s: Could not map registers.\n", np->full_name);
+               goto error_free;
+       }
+
+       /* Initialize MUX map */
+       for (i = 0; i < ARRAY_SIZE(mapping); i++)
+               mapping[i] = IRQ_UNMAPPED;
+
+       parse_priority_map(pic, mapping, ARRAY_SIZE(mapping));
+
+       /*
+        * We can have up to 12 interrupts cascading to the core controller.
+        * These cascades can be from the combined interrupt sources or for
+        * individual interrupt sources. The "interrupts" property only
+        * deals with the cascaded combined interrupts. The individual
+        * interrupts muxed to the core controller use the core controller
+        * as their interrupt parent.
+        */
+       for (i = 0; i < NR_COMBINERS; i++) {
+
+               irq = irq_of_parse_and_map(np, i);
+               if (irq == NO_IRQ)
+                       continue;
+
+               /*
+                * We count on the core priority interrupts (4 - 15) being
+                * direct mapped. Check that device tree provided something
+                * in that range.
+                */
+               if (irq < 4 || irq >= NR_PRIORITY_IRQS) {
+                       pr_err("%s: combiner-%d virq %d out of range!\n",
+                                np->full_name, i, irq);
+                       continue;
+               }
+
+               /* record the mapping */
+               mapping[irq - 4] = i;
+
+               pr_debug("%s: combiner-%d cascading to virq %d\n",
+                        np->full_name, i, irq);
+
+               cascade_data[i].pic = pic;
+               cascade_data[i].index = i;
+
+               /* mask and clear all events in combiner */
+               soc_writel(~0, &pic->regs->evtmask[i]);
+               soc_writel(~0, &pic->regs->evtclr[i]);
+
+               irq_set_handler_data(irq, &cascade_data[i]);
+               irq_set_chained_handler(irq, megamod_irq_cascade);
+       }
+
+       /* Finally, set up the MUX registers */
+       for (i = 0; i < NR_MUX_OUTPUTS; i++) {
+               if (mapping[i] != IRQ_UNMAPPED) {
+                       pr_debug("%s: setting mux %d to priority %d\n",
+                                np->full_name, mapping[i], i + 4);
+                       set_megamod_mux(pic, mapping[i], i);
+               }
+       }
+
+       return pic;
+
+error_free:
+       kfree(pic);
+
+       return NULL;
+}
+
+/*
+ * Return next active event after ACK'ing it.
+ * Return -1 if no events active.
+ */
+static int get_exception(void)
+{
+       int i, bit;
+       u32 mask;
+
+       for (i = 0; i < NR_COMBINERS; i++) {
+               mask = soc_readl(&mm_pic->regs->mexpflag[i]);
+               if (mask) {
+                       bit = __ffs(mask);
+                       soc_writel(1 << bit, &mm_pic->regs->evtclr[i]);
+                       return (i * 32) + bit;
+               }
+       }
+       return -1;
+}
+
+static void assert_event(unsigned int val)
+{
+       soc_writel(val, &mm_pic->regs->evtasrt);
+}
+
+void __init megamod_pic_init(void)
+{
+       struct device_node *np;
+
+       np = of_find_compatible_node(NULL, NULL, "ti,c64x+megamod-pic");
+       if (!np)
+               return;
+
+       mm_pic = init_megamod_pic(np);
+       of_node_put(np);
+
+       soc_ops.get_exception = get_exception;
+       soc_ops.assert_event = assert_event;
+
+       return;
+}
diff --git a/arch/c6x/platforms/platform.c b/arch/c6x/platforms/platform.c
new file mode 100644 (file)
index 0000000..26c1a35
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2011 Texas Instruments Incorporated
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+
+static int __init c6x_device_probe(void)
+{
+       of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+       return 0;
+}
+core_initcall(c6x_device_probe);
diff --git a/arch/c6x/platforms/pll.c b/arch/c6x/platforms/pll.c
new file mode 100644 (file)
index 0000000..3aa898f
--- /dev/null
@@ -0,0 +1,444 @@
+/*
+ * Clock and PLL control for C64x+ devices
+ *
+ * Copyright (C) 2010, 2011 Texas Instruments.
+ * Contributed by: Mark Salter <msalter@redhat.com>
+ *
+ * Copied heavily from arm/mach-davinci/clock.c, so:
+ *
+ * Copyright (C) 2006-2007 Texas Instruments.
+ * Copyright (C) 2008-2009 Deep Root Systems, LLC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/clkdev.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/err.h>
+
+#include <asm/clock.h>
+#include <asm/soc.h>
+
+static LIST_HEAD(clocks);
+static DEFINE_MUTEX(clocks_mutex);
+static DEFINE_SPINLOCK(clockfw_lock);
+
+static void __clk_enable(struct clk *clk)
+{
+       if (clk->parent)
+               __clk_enable(clk->parent);
+       clk->usecount++;
+}
+
+static void __clk_disable(struct clk *clk)
+{
+       if (WARN_ON(clk->usecount == 0))
+               return;
+       --clk->usecount;
+
+       if (clk->parent)
+               __clk_disable(clk->parent);
+}
+
+int clk_enable(struct clk *clk)
+{
+       unsigned long flags;
+
+       if (clk == NULL || IS_ERR(clk))
+               return -EINVAL;
+
+       spin_lock_irqsave(&clockfw_lock, flags);
+       __clk_enable(clk);
+       spin_unlock_irqrestore(&clockfw_lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+       unsigned long flags;
+
+       if (clk == NULL || IS_ERR(clk))
+               return;
+
+       spin_lock_irqsave(&clockfw_lock, flags);
+       __clk_disable(clk);
+       spin_unlock_irqrestore(&clockfw_lock, flags);
+}
+EXPORT_SYMBOL(clk_disable);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+       if (clk == NULL || IS_ERR(clk))
+               return -EINVAL;
+
+       return clk->rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+       if (clk == NULL || IS_ERR(clk))
+               return -EINVAL;
+
+       if (clk->round_rate)
+               return clk->round_rate(clk, rate);
+
+       return clk->rate;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+/* Propagate rate to children */
+static void propagate_rate(struct clk *root)
+{
+       struct clk *clk;
+
+       list_for_each_entry(clk, &root->children, childnode) {
+               if (clk->recalc)
+                       clk->rate = clk->recalc(clk);
+               propagate_rate(clk);
+       }
+}
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+       unsigned long flags;
+       int ret = -EINVAL;
+
+       if (clk == NULL || IS_ERR(clk))
+               return ret;
+
+       if (clk->set_rate)
+               ret = clk->set_rate(clk, rate);
+
+       spin_lock_irqsave(&clockfw_lock, flags);
+       if (ret == 0) {
+               if (clk->recalc)
+                       clk->rate = clk->recalc(clk);
+               propagate_rate(clk);
+       }
+       spin_unlock_irqrestore(&clockfw_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+       unsigned long flags;
+
+       if (clk == NULL || IS_ERR(clk))
+               return -EINVAL;
+
+       /* Cannot change parent on enabled clock */
+       if (WARN_ON(clk->usecount))
+               return -EINVAL;
+
+       mutex_lock(&clocks_mutex);
+       clk->parent = parent;
+       list_del_init(&clk->childnode);
+       list_add(&clk->childnode, &clk->parent->children);
+       mutex_unlock(&clocks_mutex);
+
+       spin_lock_irqsave(&clockfw_lock, flags);
+       if (clk->recalc)
+               clk->rate = clk->recalc(clk);
+       propagate_rate(clk);
+       spin_unlock_irqrestore(&clockfw_lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+int clk_register(struct clk *clk)
+{
+       if (clk == NULL || IS_ERR(clk))
+               return -EINVAL;
+
+       if (WARN(clk->parent && !clk->parent->rate,
+                "CLK: %s parent %s has no rate!\n",
+                clk->name, clk->parent->name))
+               return -EINVAL;
+
+       mutex_lock(&clocks_mutex);
+       list_add_tail(&clk->node, &clocks);
+       if (clk->parent)
+               list_add_tail(&clk->childnode, &clk->parent->children);
+       mutex_unlock(&clocks_mutex);
+
+       /* If rate is already set, use it */
+       if (clk->rate)
+               return 0;
+
+       /* Else, see if there is a way to calculate it */
+       if (clk->recalc)
+               clk->rate = clk->recalc(clk);
+
+       /* Otherwise, default to parent rate */
+       else if (clk->parent)
+               clk->rate = clk->parent->rate;
+
+       return 0;
+}
+EXPORT_SYMBOL(clk_register);
+
+void clk_unregister(struct clk *clk)
+{
+       if (clk == NULL || IS_ERR(clk))
+               return;
+
+       mutex_lock(&clocks_mutex);
+       list_del(&clk->node);
+       list_del(&clk->childnode);
+       mutex_unlock(&clocks_mutex);
+}
+EXPORT_SYMBOL(clk_unregister);
+
+
+static u32 pll_read(struct pll_data *pll, int reg)
+{
+       return soc_readl(pll->base + reg);
+}
+
+static unsigned long clk_sysclk_recalc(struct clk *clk)
+{
+       u32 v, plldiv = 0;
+       struct pll_data *pll;
+       unsigned long rate = clk->rate;
+
+       if (WARN_ON(!clk->parent))
+               return rate;
+
+       rate = clk->parent->rate;
+
+       /* the parent must be a PLL */
+       if (WARN_ON(!clk->parent->pll_data))
+               return rate;
+
+       pll = clk->parent->pll_data;
+
+       /* If pre-PLL, source clock is before the multiplier and divider(s) */
+       if (clk->flags & PRE_PLL)
+               rate = pll->input_rate;
+
+       if (!clk->div) {
+               pr_debug("%s: (no divider) rate = %lu KHz\n",
+                        clk->name, rate / 1000);
+               return rate;
+       }
+
+       if (clk->flags & FIXED_DIV_PLL) {
+               rate /= clk->div;
+               pr_debug("%s: (fixed divide by %d) rate = %lu KHz\n",
+                        clk->name, clk->div, rate / 1000);
+               return rate;
+       }
+
+       v = pll_read(pll, clk->div);
+       if (v & PLLDIV_EN)
+               plldiv = (v & PLLDIV_RATIO_MASK) + 1;
+
+       if (plldiv == 0)
+               plldiv = 1;
+
+       rate /= plldiv;
+
+       pr_debug("%s: (divide by %d) rate = %lu KHz\n",
+                clk->name, plldiv, rate / 1000);
+
+       return rate;
+}
+
+static unsigned long clk_leafclk_recalc(struct clk *clk)
+{
+       if (WARN_ON(!clk->parent))
+               return clk->rate;
+
+       pr_debug("%s: (parent %s) rate = %lu KHz\n",
+                clk->name, clk->parent->name,  clk->parent->rate / 1000);
+
+       return clk->parent->rate;
+}
+
+static unsigned long clk_pllclk_recalc(struct clk *clk)
+{
+       u32 ctrl, mult = 0, prediv = 0, postdiv = 0;
+       u8 bypass;
+       struct pll_data *pll = clk->pll_data;
+       unsigned long rate = clk->rate;
+
+       if (clk->flags & FIXED_RATE_PLL)
+               return rate;
+
+       ctrl = pll_read(pll, PLLCTL);
+       rate = pll->input_rate = clk->parent->rate;
+
+       if (ctrl & PLLCTL_PLLEN)
+               bypass = 0;
+       else
+               bypass = 1;
+
+       if (pll->flags & PLL_HAS_MUL) {
+               mult = pll_read(pll, PLLM);
+               mult = (mult & PLLM_PLLM_MASK) + 1;
+       }
+       if (pll->flags & PLL_HAS_PRE) {
+               prediv = pll_read(pll, PLLPRE);
+               if (prediv & PLLDIV_EN)
+                       prediv = (prediv & PLLDIV_RATIO_MASK) + 1;
+               else
+                       prediv = 0;
+       }
+       if (pll->flags & PLL_HAS_POST) {
+               postdiv = pll_read(pll, PLLPOST);
+               if (postdiv & PLLDIV_EN)
+                       postdiv = (postdiv & PLLDIV_RATIO_MASK) + 1;
+               else
+                       postdiv = 1;
+       }
+
+       if (!bypass) {
+               if (prediv)
+                       rate /= prediv;
+               if (mult)
+                       rate *= mult;
+               if (postdiv)
+                       rate /= postdiv;
+
+               pr_debug("PLL%d: input = %luMHz, pre[%d] mul[%d] post[%d] "
+                        "--> %luMHz output.\n",
+                        pll->num, clk->parent->rate / 1000000,
+                        prediv, mult, postdiv, rate / 1000000);
+       } else
+               pr_debug("PLL%d: input = %luMHz, bypass mode.\n",
+                        pll->num, clk->parent->rate / 1000000);
+
+       return rate;
+}
+
+
+static void __init __init_clk(struct clk *clk)
+{
+       INIT_LIST_HEAD(&clk->node);
+       INIT_LIST_HEAD(&clk->children);
+       INIT_LIST_HEAD(&clk->childnode);
+
+       if (!clk->recalc) {
+
+               /* Check if clock is a PLL */
+               if (clk->pll_data)
+                       clk->recalc = clk_pllclk_recalc;
+
+               /* Else, if it is a PLL-derived clock */
+               else if (clk->flags & CLK_PLL)
+                       clk->recalc = clk_sysclk_recalc;
+
+               /* Otherwise, it is a leaf clock (PSC clock) */
+               else if (clk->parent)
+                       clk->recalc = clk_leafclk_recalc;
+       }
+}
+
+void __init c6x_clks_init(struct clk_lookup *clocks)
+{
+       struct clk_lookup *c;
+       struct clk *clk;
+       size_t num_clocks = 0;
+
+       for (c = clocks; c->clk; c++) {
+               clk = c->clk;
+
+               __init_clk(clk);
+               clk_register(clk);
+               num_clocks++;
+
+               /* Turn on clocks that Linux doesn't otherwise manage */
+               if (clk->flags & ALWAYS_ENABLED)
+                       clk_enable(clk);
+       }
+
+       clkdev_add_table(clocks, num_clocks);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define CLKNAME_MAX    10              /* longest clock name */
+#define NEST_DELTA     2
+#define NEST_MAX       4
+
+static void
+dump_clock(struct seq_file *s, unsigned nest, struct clk *parent)
+{
+       char            *state;
+       char            buf[CLKNAME_MAX + NEST_DELTA * NEST_MAX];
+       struct clk      *clk;
+       unsigned        i;
+
+       if (parent->flags & CLK_PLL)
+               state = "pll";
+       else
+               state = "";
+
+       /* <nest spaces> name <pad to end> */
+       memset(buf, ' ', sizeof(buf) - 1);
+       buf[sizeof(buf) - 1] = 0;
+       i = strlen(parent->name);
+       memcpy(buf + nest, parent->name,
+              min(i, (unsigned)(sizeof(buf) - 1 - nest)));
+
+       seq_printf(s, "%s users=%2d %-3s %9ld Hz\n",
+                  buf, parent->usecount, state, clk_get_rate(parent));
+       /* REVISIT show device associations too */
+
+       /* cost is now small, but not linear... */
+       list_for_each_entry(clk, &parent->children, childnode) {
+               dump_clock(s, nest + NEST_DELTA, clk);
+       }
+}
+
+static int c6x_ck_show(struct seq_file *m, void *v)
+{
+       struct clk *clk;
+
+       /*
+        * Show clock tree; We trust nonzero usecounts equate to PSC enables...
+        */
+       mutex_lock(&clocks_mutex);
+       list_for_each_entry(clk, &clocks, node)
+               if (!clk->parent)
+                       dump_clock(m, 0, clk);
+       mutex_unlock(&clocks_mutex);
+
+       return 0;
+}
+
+static int c6x_ck_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, c6x_ck_show, NULL);
+}
+
+static const struct file_operations c6x_ck_operations = {
+       .open           = c6x_ck_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int __init c6x_clk_debugfs_init(void)
+{
+       debugfs_create_file("c6x_clocks", S_IFREG | S_IRUGO, NULL, NULL,
+                           &c6x_ck_operations);
+
+       return 0;
+}
+device_initcall(c6x_clk_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/c6x/platforms/plldata.c b/arch/c6x/platforms/plldata.c
new file mode 100644 (file)
index 0000000..2cfd6f4
--- /dev/null
@@ -0,0 +1,404 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2011 Texas Instruments Incorporated
+ *  Author: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <asm/clock.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+
+/*
+ * Common SoC clock support.
+ */
+
+/* Default input for PLL1 */
+struct clk clkin1 = {
+       .name = "clkin1",
+       .node = LIST_HEAD_INIT(clkin1.node),
+       .children = LIST_HEAD_INIT(clkin1.children),
+       .childnode = LIST_HEAD_INIT(clkin1.childnode),
+};
+
+struct pll_data c6x_soc_pll1 = {
+       .num       = 1,
+       .sysclks   = {
+               {
+                       .name = "pll1",
+                       .parent = &clkin1,
+                       .pll_data = &c6x_soc_pll1,
+                       .flags = CLK_PLL,
+               },
+               {
+                       .name = "pll1_sysclk1",
+                       .parent = &c6x_soc_pll1.sysclks[0],
+                       .flags = CLK_PLL,
+               },
+               {
+                       .name = "pll1_sysclk2",
+                       .parent = &c6x_soc_pll1.sysclks[0],
+                       .flags = CLK_PLL,
+               },
+               {
+                       .name = "pll1_sysclk3",
+                       .parent = &c6x_soc_pll1.sysclks[0],
+                       .flags = CLK_PLL,
+               },
+               {
+                       .name = "pll1_sysclk4",
+                       .parent = &c6x_soc_pll1.sysclks[0],
+                       .flags = CLK_PLL,
+               },
+               {
+                       .name = "pll1_sysclk5",
+                       .parent = &c6x_soc_pll1.sysclks[0],
+                       .flags = CLK_PLL,
+               },
+               {
+                       .name = "pll1_sysclk6",
+                       .parent = &c6x_soc_pll1.sysclks[0],
+                       .flags = CLK_PLL,
+               },
+               {
+                       .name = "pll1_sysclk7",
+                       .parent = &c6x_soc_pll1.sysclks[0],
+                       .flags = CLK_PLL,
+               },
+               {
+                       .name = "pll1_sysclk8",
+                       .parent = &c6x_soc_pll1.sysclks[0],
+                       .flags = CLK_PLL,
+               },
+               {
+                       .name = "pll1_sysclk9",
+                       .parent = &c6x_soc_pll1.sysclks[0],
+                       .flags = CLK_PLL,
+               },
+               {
+                       .name = "pll1_sysclk10",
+                       .parent = &c6x_soc_pll1.sysclks[0],
+                       .flags = CLK_PLL,
+               },
+               {
+                       .name = "pll1_sysclk11",
+                       .parent = &c6x_soc_pll1.sysclks[0],
+                       .flags = CLK_PLL,
+               },
+               {
+                       .name = "pll1_sysclk12",
+                       .parent = &c6x_soc_pll1.sysclks[0],
+                       .flags = CLK_PLL,
+               },
+               {
+                       .name = "pll1_sysclk13",
+                       .parent = &c6x_soc_pll1.sysclks[0],
+                       .flags = CLK_PLL,
+               },
+               {
+                       .name = "pll1_sysclk14",
+                       .parent = &c6x_soc_pll1.sysclks[0],
+                       .flags = CLK_PLL,
+               },
+               {
+                       .name = "pll1_sysclk15",
+                       .parent = &c6x_soc_pll1.sysclks[0],
+                       .flags = CLK_PLL,
+               },
+               {
+                       .name = "pll1_sysclk16",
+                       .parent = &c6x_soc_pll1.sysclks[0],
+                       .flags = CLK_PLL,
+               },
+       },
+};
+
+/* CPU core clock */
+struct clk c6x_core_clk = {
+       .name = "core",
+};
+
+/* miscellaneous IO clocks */
+struct clk c6x_i2c_clk = {
+       .name = "i2c",
+};
+
+struct clk c6x_watchdog_clk = {
+       .name = "watchdog",
+};
+
+struct clk c6x_mcbsp1_clk = {
+       .name = "mcbsp1",
+};
+
+struct clk c6x_mcbsp2_clk = {
+       .name = "mcbsp2",
+};
+
+struct clk c6x_mdio_clk = {
+       .name = "mdio",
+};
+
+
+#ifdef CONFIG_SOC_TMS320C6455
+static struct clk_lookup c6455_clks[] = {
+       CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
+       CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]),
+       CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]),
+       CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]),
+       CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]),
+       CLK(NULL, "core", &c6x_core_clk),
+       CLK("i2c_davinci.1", NULL, &c6x_i2c_clk),
+       CLK("watchdog", NULL, &c6x_watchdog_clk),
+       CLK("2c81800.mdio", NULL, &c6x_mdio_clk),
+       CLK("", NULL, NULL)
+};
+
+
+static void __init c6455_setup_clocks(struct device_node *node)
+{
+       struct pll_data *pll = &c6x_soc_pll1;
+       struct clk *sysclks = pll->sysclks;
+
+       pll->flags = PLL_HAS_PRE | PLL_HAS_MUL;
+
+       sysclks[2].flags |= FIXED_DIV_PLL;
+       sysclks[2].div = 3;
+       sysclks[3].flags |= FIXED_DIV_PLL;
+       sysclks[3].div = 6;
+       sysclks[4].div = PLLDIV4;
+       sysclks[5].div = PLLDIV5;
+
+       c6x_core_clk.parent = &sysclks[0];
+       c6x_i2c_clk.parent = &sysclks[3];
+       c6x_watchdog_clk.parent = &sysclks[3];
+       c6x_mdio_clk.parent = &sysclks[3];
+
+       c6x_clks_init(c6455_clks);
+}
+#endif /* CONFIG_SOC_TMS320C6455 */
+
+#ifdef CONFIG_SOC_TMS320C6457
+static struct clk_lookup c6457_clks[] = {
+       CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
+       CLK(NULL, "pll1_sysclk1", &c6x_soc_pll1.sysclks[1]),
+       CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]),
+       CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]),
+       CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]),
+       CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]),
+       CLK(NULL, "core", &c6x_core_clk),
+       CLK("i2c_davinci.1", NULL, &c6x_i2c_clk),
+       CLK("watchdog", NULL, &c6x_watchdog_clk),
+       CLK("2c81800.mdio", NULL, &c6x_mdio_clk),
+       CLK("", NULL, NULL)
+};
+
+static void __init c6457_setup_clocks(struct device_node *node)
+{
+       struct pll_data *pll = &c6x_soc_pll1;
+       struct clk *sysclks = pll->sysclks;
+
+       pll->flags = PLL_HAS_MUL | PLL_HAS_POST;
+
+       sysclks[1].flags |= FIXED_DIV_PLL;
+       sysclks[1].div = 1;
+       sysclks[2].flags |= FIXED_DIV_PLL;
+       sysclks[2].div = 3;
+       sysclks[3].flags |= FIXED_DIV_PLL;
+       sysclks[3].div = 6;
+       sysclks[4].div = PLLDIV4;
+       sysclks[5].div = PLLDIV5;
+
+       c6x_core_clk.parent = &sysclks[1];
+       c6x_i2c_clk.parent = &sysclks[3];
+       c6x_watchdog_clk.parent = &sysclks[5];
+       c6x_mdio_clk.parent = &sysclks[5];
+
+       c6x_clks_init(c6457_clks);
+}
+#endif /* CONFIG_SOC_TMS320C6455 */
+
+#ifdef CONFIG_SOC_TMS320C6472
+static struct clk_lookup c6472_clks[] = {
+       CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
+       CLK(NULL, "pll1_sysclk1", &c6x_soc_pll1.sysclks[1]),
+       CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]),
+       CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]),
+       CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]),
+       CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]),
+       CLK(NULL, "pll1_sysclk6", &c6x_soc_pll1.sysclks[6]),
+       CLK(NULL, "pll1_sysclk7", &c6x_soc_pll1.sysclks[7]),
+       CLK(NULL, "pll1_sysclk8", &c6x_soc_pll1.sysclks[8]),
+       CLK(NULL, "pll1_sysclk9", &c6x_soc_pll1.sysclks[9]),
+       CLK(NULL, "pll1_sysclk10", &c6x_soc_pll1.sysclks[10]),
+       CLK(NULL, "core", &c6x_core_clk),
+       CLK("i2c_davinci.1", NULL, &c6x_i2c_clk),
+       CLK("watchdog", NULL, &c6x_watchdog_clk),
+       CLK("2c81800.mdio", NULL, &c6x_mdio_clk),
+       CLK("", NULL, NULL)
+};
+
+/* assumptions used for delay loop calculations */
+#define MIN_CLKIN1_KHz 15625
+#define MAX_CORE_KHz   700000
+#define MIN_PLLOUT_KHz MIN_CLKIN1_KHz
+
+static void __init c6472_setup_clocks(struct device_node *node)
+{
+       struct pll_data *pll = &c6x_soc_pll1;
+       struct clk *sysclks = pll->sysclks;
+       int i;
+
+       pll->flags = PLL_HAS_MUL;
+
+       for (i = 1; i <= 6; i++) {
+               sysclks[i].flags |= FIXED_DIV_PLL;
+               sysclks[i].div = 1;
+       }
+
+       sysclks[7].flags |= FIXED_DIV_PLL;
+       sysclks[7].div = 3;
+       sysclks[8].flags |= FIXED_DIV_PLL;
+       sysclks[8].div = 6;
+       sysclks[9].flags |= FIXED_DIV_PLL;
+       sysclks[9].div = 2;
+       sysclks[10].div = PLLDIV10;
+
+       c6x_core_clk.parent = &sysclks[get_coreid() + 1];
+       c6x_i2c_clk.parent = &sysclks[8];
+       c6x_watchdog_clk.parent = &sysclks[8];
+       c6x_mdio_clk.parent = &sysclks[5];
+
+       c6x_clks_init(c6472_clks);
+}
+#endif /* CONFIG_SOC_TMS320C6472 */
+
+
+#ifdef CONFIG_SOC_TMS320C6474
+static struct clk_lookup c6474_clks[] = {
+       CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
+       CLK(NULL, "pll1_sysclk7", &c6x_soc_pll1.sysclks[7]),
+       CLK(NULL, "pll1_sysclk9", &c6x_soc_pll1.sysclks[9]),
+       CLK(NULL, "pll1_sysclk10", &c6x_soc_pll1.sysclks[10]),
+       CLK(NULL, "pll1_sysclk11", &c6x_soc_pll1.sysclks[11]),
+       CLK(NULL, "pll1_sysclk12", &c6x_soc_pll1.sysclks[12]),
+       CLK(NULL, "pll1_sysclk13", &c6x_soc_pll1.sysclks[13]),
+       CLK(NULL, "core", &c6x_core_clk),
+       CLK("i2c_davinci.1", NULL, &c6x_i2c_clk),
+       CLK("mcbsp.1", NULL, &c6x_mcbsp1_clk),
+       CLK("mcbsp.2", NULL, &c6x_mcbsp2_clk),
+       CLK("watchdog", NULL, &c6x_watchdog_clk),
+       CLK("2c81800.mdio", NULL, &c6x_mdio_clk),
+       CLK("", NULL, NULL)
+};
+
+static void __init c6474_setup_clocks(struct device_node *node)
+{
+       struct pll_data *pll = &c6x_soc_pll1;
+       struct clk *sysclks = pll->sysclks;
+
+       pll->flags = PLL_HAS_MUL;
+
+       sysclks[7].flags |= FIXED_DIV_PLL;
+       sysclks[7].div = 1;
+       sysclks[9].flags |= FIXED_DIV_PLL;
+       sysclks[9].div = 3;
+       sysclks[10].flags |= FIXED_DIV_PLL;
+       sysclks[10].div = 6;
+
+       sysclks[11].div = PLLDIV11;
+
+       sysclks[12].flags |= FIXED_DIV_PLL;
+       sysclks[12].div = 2;
+
+       sysclks[13].div = PLLDIV13;
+
+       c6x_core_clk.parent = &sysclks[7];
+       c6x_i2c_clk.parent = &sysclks[10];
+       c6x_watchdog_clk.parent = &sysclks[10];
+       c6x_mcbsp1_clk.parent = &sysclks[10];
+       c6x_mcbsp2_clk.parent = &sysclks[10];
+
+       c6x_clks_init(c6474_clks);
+}
+#endif /* CONFIG_SOC_TMS320C6474 */
+
+static struct of_device_id c6x_clkc_match[] __initdata = {
+#ifdef CONFIG_SOC_TMS320C6455
+       { .compatible = "ti,c6455-pll", .data = c6455_setup_clocks },
+#endif
+#ifdef CONFIG_SOC_TMS320C6457
+       { .compatible = "ti,c6457-pll", .data = c6457_setup_clocks },
+#endif
+#ifdef CONFIG_SOC_TMS320C6472
+       { .compatible = "ti,c6472-pll", .data = c6472_setup_clocks },
+#endif
+#ifdef CONFIG_SOC_TMS320C6474
+       { .compatible = "ti,c6474-pll", .data = c6474_setup_clocks },
+#endif
+       { .compatible = "ti,c64x+pll" },
+       {}
+};
+
+void __init c64x_setup_clocks(void)
+{
+       void (*__setup_clocks)(struct device_node *np);
+       struct pll_data *pll = &c6x_soc_pll1;
+       struct device_node *node;
+       const struct of_device_id *id;
+       int err;
+       u32 val;
+
+       node = of_find_matching_node(NULL, c6x_clkc_match);
+       if (!node)
+               return;
+
+       pll->base = of_iomap(node, 0);
+       if (!pll->base)
+               goto out;
+
+       err = of_property_read_u32(node, "clock-frequency", &val);
+       if (err || val == 0) {
+               pr_err("%s: no clock-frequency found! Using %dMHz\n",
+                      node->full_name, (int)val / 1000000);
+               val = 25000000;
+       }
+       clkin1.rate = val;
+
+       err = of_property_read_u32(node, "ti,c64x+pll-bypass-delay", &val);
+       if (err)
+               val = 5000;
+       pll->bypass_delay = val;
+
+       err = of_property_read_u32(node, "ti,c64x+pll-reset-delay", &val);
+       if (err)
+               val = 30000;
+       pll->reset_delay = val;
+
+       err = of_property_read_u32(node, "ti,c64x+pll-lock-delay", &val);
+       if (err)
+               val = 30000;
+       pll->lock_delay = val;
+
+       /* id->data is a pointer to SoC-specific setup */
+       id = of_match_node(c6x_clkc_match, node);
+       if (id && id->data) {
+               __setup_clocks = id->data;
+               __setup_clocks(node);
+       }
+
+out:
+       of_node_put(node);
+}
diff --git a/arch/c6x/platforms/timer64.c b/arch/c6x/platforms/timer64.c
new file mode 100644 (file)
index 0000000..03c03c2
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+ *  Copyright (C) 2010, 2011 Texas Instruments Incorporated
+ *  Contributed by: Mark Salter (msalter@redhat.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <asm/soc.h>
+#include <asm/dscr.h>
+#include <asm/timer64.h>
+
+struct timer_regs {
+       u32     reserved0;
+       u32     emumgt;
+       u32     reserved1;
+       u32     reserved2;
+       u32     cntlo;
+       u32     cnthi;
+       u32     prdlo;
+       u32     prdhi;
+       u32     tcr;
+       u32     tgcr;
+       u32     wdtcr;
+};
+
+static struct timer_regs __iomem *timer;
+
+#define TCR_TSTATLO         0x001
+#define TCR_INVOUTPLO       0x002
+#define TCR_INVINPLO        0x004
+#define TCR_CPLO            0x008
+#define TCR_ENAMODELO_ONCE   0x040
+#define TCR_ENAMODELO_CONT   0x080
+#define TCR_ENAMODELO_MASK   0x0c0
+#define TCR_PWIDLO_MASK      0x030
+#define TCR_CLKSRCLO        0x100
+#define TCR_TIENLO          0x200
+#define TCR_TSTATHI         (0x001 << 16)
+#define TCR_INVOUTPHI       (0x002 << 16)
+#define TCR_CPHI            (0x008 << 16)
+#define TCR_PWIDHI_MASK      (0x030 << 16)
+#define TCR_ENAMODEHI_ONCE   (0x040 << 16)
+#define TCR_ENAMODEHI_CONT   (0x080 << 16)
+#define TCR_ENAMODEHI_MASK   (0x0c0 << 16)
+
+#define TGCR_TIMLORS        0x001
+#define TGCR_TIMHIRS        0x002
+#define TGCR_TIMMODE_UD32    0x004
+#define TGCR_TIMMODE_WDT64   0x008
+#define TGCR_TIMMODE_CD32    0x00c
+#define TGCR_TIMMODE_MASK    0x00c
+#define TGCR_PSCHI_MASK      (0x00f << 8)
+#define TGCR_TDDRHI_MASK     (0x00f << 12)
+
+/*
+ * Timer clocks are divided down from the CPU clock
+ * The divisor is in the EMUMGTCLKSPD register
+ */
+#define TIMER_DIVISOR \
+       ((soc_readl(&timer->emumgt) & (0xf << 16)) >> 16)
+
+#define TIMER64_RATE (c6x_core_freq / TIMER_DIVISOR)
+
+#define TIMER64_MODE_DISABLED 0
+#define TIMER64_MODE_ONE_SHOT TCR_ENAMODELO_ONCE
+#define TIMER64_MODE_PERIODIC TCR_ENAMODELO_CONT
+
+static int timer64_mode;
+static int timer64_devstate_id = -1;
+
+static void timer64_config(unsigned long period)
+{
+       u32 tcr = soc_readl(&timer->tcr) & ~TCR_ENAMODELO_MASK;
+
+       soc_writel(tcr, &timer->tcr);
+       soc_writel(period - 1, &timer->prdlo);
+       soc_writel(0, &timer->cntlo);
+       tcr |= timer64_mode;
+       soc_writel(tcr, &timer->tcr);
+}
+
+static void timer64_enable(void)
+{
+       u32 val;
+
+       if (timer64_devstate_id >= 0)
+               dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED);
+
+       /* disable timer, reset count */
+       soc_writel(soc_readl(&timer->tcr) & ~TCR_ENAMODELO_MASK, &timer->tcr);
+       soc_writel(0, &timer->prdlo);
+
+       /* use internal clock and 1 cycle pulse width */
+       val = soc_readl(&timer->tcr);
+       soc_writel(val & ~(TCR_CLKSRCLO | TCR_PWIDLO_MASK), &timer->tcr);
+
+       /* dual 32-bit unchained mode */
+       val = soc_readl(&timer->tgcr) & ~TGCR_TIMMODE_MASK;
+       soc_writel(val, &timer->tgcr);
+       soc_writel(val | (TGCR_TIMLORS | TGCR_TIMMODE_UD32), &timer->tgcr);
+}
+
+static void timer64_disable(void)
+{
+       /* disable timer, reset count */
+       soc_writel(soc_readl(&timer->tcr) & ~TCR_ENAMODELO_MASK, &timer->tcr);
+       soc_writel(0, &timer->prdlo);
+
+       if (timer64_devstate_id >= 0)
+               dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_DISABLED);
+}
+
+static int next_event(unsigned long delta,
+                     struct clock_event_device *evt)
+{
+       timer64_config(delta);
+       return 0;
+}
+
+static void set_clock_mode(enum clock_event_mode mode,
+                          struct clock_event_device *evt)
+{
+       switch (mode) {
+       case CLOCK_EVT_MODE_PERIODIC:
+               timer64_enable();
+               timer64_mode = TIMER64_MODE_PERIODIC;
+               timer64_config(TIMER64_RATE / HZ);
+               break;
+       case CLOCK_EVT_MODE_ONESHOT:
+               timer64_enable();
+               timer64_mode = TIMER64_MODE_ONE_SHOT;
+               break;
+       case CLOCK_EVT_MODE_UNUSED:
+       case CLOCK_EVT_MODE_SHUTDOWN:
+               timer64_mode = TIMER64_MODE_DISABLED;
+               timer64_disable();
+               break;
+       case CLOCK_EVT_MODE_RESUME:
+               break;
+       }
+}
+
+static struct clock_event_device t64_clockevent_device = {
+       .name           = "TIMER64_EVT32_TIMER",
+       .features       = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+       .rating         = 200,
+       .set_mode       = set_clock_mode,
+       .set_next_event = next_event,
+};
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+       struct clock_event_device *cd = &t64_clockevent_device;
+
+       cd->event_handler(cd);
+
+       return IRQ_HANDLED;
+}
+
+static struct irqaction timer_iact = {
+       .name           = "timer",
+       .flags          = IRQF_TIMER,
+       .handler        = timer_interrupt,
+       .dev_id         = &t64_clockevent_device,
+};
+
+void __init timer64_init(void)
+{
+       struct clock_event_device *cd = &t64_clockevent_device;
+       struct device_node *np, *first = NULL;
+       u32 val;
+       int err, found = 0;
+
+       for_each_compatible_node(np, NULL, "ti,c64x+timer64") {
+               err = of_property_read_u32(np, "ti,core-mask", &val);
+               if (!err) {
+                       if (val & (1 << get_coreid())) {
+                               found = 1;
+                               break;
+                       }
+               } else if (!first)
+                       first = np;
+       }
+       if (!found) {
+               /* try first one with no core-mask */
+               if (first)
+                       np = of_node_get(first);
+               else {
+                       pr_debug("Cannot find ti,c64x+timer64 timer.\n");
+                       return;
+               }
+       }
+
+       timer = of_iomap(np, 0);
+       if (!timer) {
+               pr_debug("%s: Cannot map timer registers.\n", np->full_name);
+               goto out;
+       }
+       pr_debug("%s: Timer registers=%p.\n", np->full_name, timer);
+
+       cd->irq = irq_of_parse_and_map(np, 0);
+       if (cd->irq == NO_IRQ) {
+               pr_debug("%s: Cannot find interrupt.\n", np->full_name);
+               iounmap(timer);
+               goto out;
+       }
+
+       /* If there is a device state control, save the ID. */
+       err = of_property_read_u32(np, "ti,dscr-dev-enable", &val);
+       if (!err) {
+               timer64_devstate_id = val;
+
+               /*
+                * It is necessary to enable the timer block here because
+                * the TIMER_DIVISOR macro needs to read a timer register
+                * to get the divisor.
+                */
+               dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED);
+       }
+
+       pr_debug("%s: Timer irq=%d.\n", np->full_name, cd->irq);
+
+       clockevents_calc_mult_shift(cd, c6x_core_freq / TIMER_DIVISOR, 5);
+
+       cd->max_delta_ns        = clockevent_delta2ns(0x7fffffff, cd);
+       cd->min_delta_ns        = clockevent_delta2ns(250, cd);
+
+       cd->cpumask             = cpumask_of(smp_processor_id());
+
+       clockevents_register_device(cd);
+       setup_irq(cd->irq, &timer_iact);
+
+out:
+       of_node_put(np);
+       return;
+}
index 408b055c585f489e9f3a76c66f32da6680be8d91..b3abfb08aa5c3d32face736f2403fc753a0596c1 100644 (file)
@@ -19,10 +19,6 @@ config GENERIC_CMOS_UPDATE
 config ARCH_USES_GETTIMEOFFSET
        def_bool n
 
-config GENERIC_IOMAP
-       bool
-       default y
-
 config ARCH_HAS_ILOG2_U32
        bool
        default n
@@ -52,6 +48,7 @@ config CRIS
        select HAVE_IDE
        select HAVE_GENERIC_HARDIRQS
        select GENERIC_IRQ_SHOW
+       select GENERIC_IOMAP
 
 config HZ
        int
index a2bde3744622fec40df210616e6aef82c76b3cc4..b34438e026be436d6c71632a250a23c187d9c6f7 100644 (file)
@@ -404,8 +404,7 @@ static int __init init_axis_flash(void)
                 */
                int blockstat;
                do {
-                       blockstat = main_mtd->block_isbad(main_mtd,
-                               ptable_sector);
+                       blockstat = mtd_block_isbad(main_mtd, ptable_sector);
                        if (blockstat < 0)
                                ptable_sector = 0; /* read error */
                        else if (blockstat)
@@ -413,8 +412,8 @@ static int __init init_axis_flash(void)
                } while (blockstat && ptable_sector);
 #endif
                if (ptable_sector) {
-                       main_mtd->read(main_mtd, ptable_sector, PAGESIZE,
-                               &len, page);
+                       mtd_read(main_mtd, ptable_sector, PAGESIZE, &len,
+                                page);
                        ptable_head = &((struct partitiontable *) page)->head;
                }
 
index c5e69abb4889f8956f1e4cd13466f3aaf34e94b4..bbbf7927f238005afee51476a6b9156f6c53f954 100644 (file)
@@ -317,6 +317,7 @@ config PCI
        bool "Use PCI"
        depends on MB93090_MB00
        default y
+       select GENERIC_PCI_IOMAP
        help
          Some FR-V systems (such as the MB93090-MB00 VDK) have PCI
          onboard. If you have one of these boards and you wish to use the PCI
index ca7475e73b5ee7f2673334011f86e5cb77ac62b1..8cb50a2fbcb2d9b34b6bad36323cb49513f64ceb 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/virtconvert.h>
 #include <asm/string.h>
 #include <asm/mb-regs.h>
+#include <asm-generic/pci_iomap.h>
 #include <linux/delay.h>
 
 /*
@@ -370,7 +371,6 @@ static inline void iowrite32_rep(void __iomem *p, const void *src, unsigned long
 
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
 static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
 {
 }
index b73b542f8f48c32d9a771c5aa0be92a78d4cea00..21f1df1b378af0647664113f18734dd45ebb24c5 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 ifeq "$(CONFIG_PCI)" "y"
-obj-y := pci-frv.o pci-irq.o pci-vdk.o pci-iomap.o
+obj-y := pci-frv.o pci-irq.o pci-vdk.o
 
 ifeq "$(CONFIG_MMU)" "y"
 obj-y += pci-dma.o
diff --git a/arch/frv/mb93090-mb00/pci-iomap.c b/arch/frv/mb93090-mb00/pci-iomap.c
deleted file mode 100644 (file)
index 35f6df2..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/* pci-iomap.c: description
- *
- * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/pci.h>
-#include <linux/module.h>
-
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
-       resource_size_t start = pci_resource_start(dev, bar);
-       resource_size_t len = pci_resource_len(dev, bar);
-       unsigned long flags = pci_resource_flags(dev, bar);
-
-       if (!len || !start)
-               return NULL;
-
-       if ((flags & IORESOURCE_IO) || (flags & IORESOURCE_MEM))
-               return (void __iomem *) start;
-
-       return NULL;
-}
-
-EXPORT_SYMBOL(pci_iomap);
index 02513c2dd5ec99a9e6d0b4c0e4613465c8110a32..9059e3905887196a48750cf3e531abf0aa6d96df 100644 (file)
@@ -26,6 +26,7 @@ config HEXAGON
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_TRACEHOOK
        select NO_IOPORT
+       select GENERIC_IOMAP
        # mostly generic routines, with some accelerated ones
        ---help---
          Qualcomm Hexagon is a processor architecture designed for high
@@ -73,9 +74,6 @@ config GENERIC_CSUM
 config GENERIC_IRQ_PROBE
        def_bool y
 
-config GENERIC_IOMAP
-       def_bool y
-
 #config ZONE_DMA
 #      bool
 #      default y
index 3b7a7c483785929f0f126953756aa5f40c694dd0..bd7266903bf8f497b4ddfd416df6091335d17039 100644 (file)
@@ -32,6 +32,7 @@ config IA64
        select GENERIC_IRQ_SHOW
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
+       select GENERIC_IOMAP
        default y
        help
          The Itanium Processor Family is Intel's 64-bit successor to
@@ -105,10 +106,6 @@ config EFI
        bool
        default y
 
-config GENERIC_IOMAP
-       bool
-       default y
-
 config ARCH_CLOCKSOURCE_DATA
        def_bool y
 
index 105c93b00b1bc53ce22f04ffd249595b5001c4d2..b6a809fa2995fc989bc1175fca12ab483c28790e 100644 (file)
@@ -11,10 +11,12 @@ extern void no_iommu_init(void);
 extern int force_iommu, no_iommu;
 extern int iommu_pass_through;
 extern int iommu_detected;
+extern int iommu_group_mf;
 #else
 #define iommu_pass_through     (0)
 #define no_iommu               (1)
 #define iommu_detected         (0)
+#define iommu_group_mf         (0)
 #endif
 extern void iommu_dma_init(void);
 extern void machvec_init(const char *name);
index 1d2427d116e363dbecbb98559dd002cc9b67f4a6..fbb519828aa179e3ac75af8e8c2ea07fda944fc7 100644 (file)
@@ -71,7 +71,7 @@
 __DEFINE_GUEST_HANDLE(uchar, unsigned char);
 __DEFINE_GUEST_HANDLE(uint, unsigned int);
 __DEFINE_GUEST_HANDLE(ulong, unsigned long);
-__DEFINE_GUEST_HANDLE(u64, unsigned long);
+
 DEFINE_GUEST_HANDLE(char);
 DEFINE_GUEST_HANDLE(int);
 DEFINE_GUEST_HANDLE(long);
index c16162c70860a7ca1e878474b85348da233ad21d..eb11757200500f574aaf6665ea9c3f17b9a3d566 100644 (file)
@@ -33,6 +33,7 @@ int force_iommu __read_mostly;
 #endif
 
 int iommu_pass_through;
+int iommu_group_mf;
 
 /* Dummy device used for NULL arguments (normally ISA). Better would
    be probably a smaller DMA mask, but this is bug-to-bug compatible
index 43f4c92816ef9760f68849611cea83ea8d0799aa..405052002493b8de11884c109afe9fcaf008aba2 100644 (file)
@@ -774,13 +774,13 @@ struct kvm *kvm_arch_alloc_vm(void)
        return kvm;
 }
 
-struct kvm_io_range {
+struct kvm_ia64_io_range {
        unsigned long start;
        unsigned long size;
        unsigned long type;
 };
 
-static const struct kvm_io_range io_ranges[] = {
+static const struct kvm_ia64_io_range io_ranges[] = {
        {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
        {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
        {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
@@ -1366,14 +1366,12 @@ static void kvm_release_vm_pages(struct kvm *kvm)
 {
        struct kvm_memslots *slots;
        struct kvm_memory_slot *memslot;
-       int i, j;
+       int j;
        unsigned long base_gfn;
 
        slots = kvm_memslots(kvm);
-       for (i = 0; i < slots->nmemslots; i++) {
-               memslot = &slots->memslots[i];
+       kvm_for_each_memslot(memslot, slots) {
                base_gfn = memslot->base_gfn;
-
                for (j = 0; j < memslot->npages; j++) {
                        if (memslot->rmap[j])
                                put_page((struct page *)memslot->rmap[j]);
@@ -1820,7 +1818,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
        if (log->slot >= KVM_MEMORY_SLOTS)
                goto out;
 
-       memslot = &kvm->memslots->memslots[log->slot];
+       memslot = id_to_memslot(kvm->memslots, log->slot);
        r = -ENOENT;
        if (!memslot->dirty_bitmap)
                goto out;
index 81fdaa72c540e4315c2d9872d75ff0f4e17afba5..99c363617f270fe3c0b6eb9f64c43a3c0d6bf0a0 100644 (file)
@@ -37,9 +37,6 @@ config GENERIC_CALIBRATE_DELAY
        bool
        default y
 
-config GENERIC_IOMAP
-       def_bool MMU
-
 config GENERIC_CSUM
        bool
 
@@ -81,6 +78,7 @@ source "kernel/Kconfig.freezer"
 config MMU
        bool "MMU-based Paged Memory Management Support"
        default y
+       select GENERIC_IOMAP
        help
          Select if you want MMU-based virtualised addressing space
          support by paged memory management. If unsure, say 'Y'.
index e446bab2427bc7c69982190f3e89c0f442752780..f0eead74fff67d1c4eb0af6eae23d1fcb26720b0 100644 (file)
@@ -17,6 +17,7 @@ config MICROBLAZE
        select HAVE_GENERIC_HARDIRQS
        select GENERIC_IRQ_PROBE
        select GENERIC_IRQ_SHOW
+       select GENERIC_PCI_IOMAP
 
 config SWAP
        def_bool n
index cc54187f3d383993e633306092061195469b1d88..a175132e44960a752f2aaf29953fb11b556e3499 100644 (file)
@@ -9,7 +9,14 @@
 #ifndef _ASM_MICROBLAZE_IRQ_H
 #define _ASM_MICROBLAZE_IRQ_H
 
-#define NR_IRQS 32
+
+/*
+ * Linux IRQ# is currently offset by one to map to the hardware
+ * irq number. So hardware IRQ0 maps to Linux irq 1.
+ */
+#define NO_IRQ_OFFSET  1
+#define IRQ_OFFSET     NO_IRQ_OFFSET
+#define NR_IRQS                (32 + IRQ_OFFSET)
 #include <asm-generic/irq.h>
 
 /* This type is the placeholder for a hardware interrupt number. It has to
@@ -20,8 +27,6 @@ typedef unsigned long irq_hw_number_t;
 
 extern unsigned int nr_irq;
 
-#define NO_IRQ (-1)
-
 struct pt_regs;
 extern void do_IRQ(struct pt_regs *regs);
 
index ed9d0f6e2cdb2be99cda93a20eb8987faf393a9c..a25e6b5e2ad454ec08020b3307ed02fbd89991b6 100644 (file)
@@ -174,15 +174,8 @@ extern int page_is_ram(unsigned long pfn);
 
 #define        virt_addr_valid(vaddr)  (pfn_valid(virt_to_pfn(vaddr)))
 
-
-#  ifndef CONFIG_MMU
-#  define __pa(vaddr)  ((unsigned long) (vaddr))
-#  define __va(paddr)  ((void *) (paddr))
-#  else /* CONFIG_MMU */
-#  define __pa(x)      __virt_to_phys((unsigned long)(x))
-#  define __va(x)      ((void *)__phys_to_virt((unsigned long)(x)))
-#  endif /* CONFIG_MMU */
-
+# define __pa(x)       __virt_to_phys((unsigned long)(x))
+# define __va(x)       ((void *)__phys_to_virt((unsigned long)(x)))
 
 /* Convert between virtual and physical address for MMU. */
 /* Handle MicroBlaze processor with virtual memory. */
index 904e5ef6a11b2e958f0b33c29b4f7d3794cecb38..6c72ed7eba9854c5b003555214d0c1376a544684 100644 (file)
@@ -26,12 +26,6 @@ int setup_early_printk(char *opt);
 void remap_early_printk(void);
 void disable_early_printk(void);
 
-#if defined(CONFIG_EARLY_PRINTK)
-#define eprintk early_printk
-#else
-#define eprintk printk
-#endif
-
 void heartbeat(void);
 void setup_heartbeat(void);
 
index 7d7092b917ac5ddb61664075aca7879ca95d6717..d20ffbc86bebcc4b319ce1ff52117ae5c029463a 100644 (file)
 #define __NR_clock_adjtime     373
 #define __NR_syncfs            374
 #define __NR_setns             375
+#define __NR_sendmmsg          376
+#define __NR_process_vm_readv  377
+#define __NR_process_vm_writev 378
 
-#define __NR_syscalls          376
+#define __NR_syscalls          379
 
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
index d26d92d47754a067481fa40eb78ef2eadba04317..8356e47631c4313199bb4487068ccd8ccf6635ac 100644 (file)
@@ -50,9 +50,9 @@ static void early_printk_uartlite_write(struct console *unused,
                                        const char *s, unsigned n)
 {
        while (*s && n-- > 0) {
-               early_printk_uartlite_putc(*s);
                if (*s == '\n')
                        early_printk_uartlite_putc('\r');
+               early_printk_uartlite_putc(*s);
                s++;
        }
 }
@@ -94,9 +94,9 @@ static void early_printk_uart16550_write(struct console *unused,
                                        const char *s, unsigned n)
 {
        while (*s && n-- > 0) {
-               early_printk_uart16550_putc(*s);
                if (*s == '\n')
                        early_printk_uart16550_putc('\r');
+               early_printk_uart16550_putc(*s);
                s++;
        }
 }
index ca15bc5c7449f9ba79ed9e024f88c1b13d49e60b..66e34a3bfe1b41fe9e333b996ab4e366692a480c 100644 (file)
@@ -468,7 +468,7 @@ C_ENTRY(sys_fork_wrapper):
        addi    r5, r0, SIGCHLD                 /* Arg 0: flags */
        lwi     r6, r1, PT_R1   /* Arg 1: child SP (use parent's) */
        addik   r7, r1, 0                       /* Arg 2: parent context */
-       add     r8. r0, r0                      /* Arg 3: (unused) */
+       add     r8, r0, r0                      /* Arg 3: (unused) */
        add     r9, r0, r0;                     /* Arg 4: (unused) */
        brid    do_fork         /* Do real work (tail-call) */
        add     r10, r0, r0;                    /* Arg 5: (unused) */
index eb41441c7fd0288a84e8f7e38c3dfddfae5b55bd..44b177e2ab124c7338c04eeafb81814bf63177c0 100644 (file)
@@ -42,8 +42,9 @@ unsigned int nr_irq;
 
 static void intc_enable_or_unmask(struct irq_data *d)
 {
-       unsigned long mask = 1 << d->irq;
-       pr_debug("enable_or_unmask: %d\n", d->irq);
+       unsigned long mask = 1 << d->hwirq;
+
+       pr_debug("enable_or_unmask: %ld\n", d->hwirq);
        out_be32(INTC_BASE + SIE, mask);
 
        /* ack level irqs because they can't be acked during
@@ -56,20 +57,21 @@ static void intc_enable_or_unmask(struct irq_data *d)
 
 static void intc_disable_or_mask(struct irq_data *d)
 {
-       pr_debug("disable: %d\n", d->irq);
-       out_be32(INTC_BASE + CIE, 1 << d->irq);
+       pr_debug("disable: %ld\n", d->hwirq);
+       out_be32(INTC_BASE + CIE, 1 << d->hwirq);
 }
 
 static void intc_ack(struct irq_data *d)
 {
-       pr_debug("ack: %d\n", d->irq);
-       out_be32(INTC_BASE + IAR, 1 << d->irq);
+       pr_debug("ack: %ld\n", d->hwirq);
+       out_be32(INTC_BASE + IAR, 1 << d->hwirq);
 }
 
 static void intc_mask_ack(struct irq_data *d)
 {
-       unsigned long mask = 1 << d->irq;
-       pr_debug("disable_and_ack: %d\n", d->irq);
+       unsigned long mask = 1 << d->hwirq;
+
+       pr_debug("disable_and_ack: %ld\n", d->hwirq);
        out_be32(INTC_BASE + CIE, mask);
        out_be32(INTC_BASE + IAR, mask);
 }
@@ -91,7 +93,7 @@ unsigned int get_irq(struct pt_regs *regs)
         * order to handle multiple interrupt controllers. It currently
         * is hardcoded to check for interrupts only on the first INTC.
         */
-       irq = in_be32(INTC_BASE + IVR);
+       irq = in_be32(INTC_BASE + IVR) + NO_IRQ_OFFSET;
        pr_debug("get_irq: %d\n", irq);
 
        return irq;
@@ -99,7 +101,7 @@ unsigned int get_irq(struct pt_regs *regs)
 
 void __init init_IRQ(void)
 {
-       u32 i, j, intr_type;
+       u32 i, intr_mask;
        struct device_node *intc = NULL;
 #ifdef CONFIG_SELFMOD_INTC
        unsigned int intc_baseaddr = 0;
@@ -113,35 +115,24 @@ void __init init_IRQ(void)
                                0
                        };
 #endif
-       const char * const intc_list[] = {
-                               "xlnx,xps-intc-1.00.a",
-                               NULL
-                       };
-
-       for (j = 0; intc_list[j] != NULL; j++) {
-               intc = of_find_compatible_node(NULL, NULL, intc_list[j]);
-               if (intc)
-                       break;
-       }
+       intc = of_find_compatible_node(NULL, NULL, "xlnx,xps-intc-1.00.a");
        BUG_ON(!intc);
 
-       intc_baseaddr = be32_to_cpup(of_get_property(intc,
-                                                               "reg", NULL));
+       intc_baseaddr = be32_to_cpup(of_get_property(intc, "reg", NULL));
        intc_baseaddr = (unsigned long) ioremap(intc_baseaddr, PAGE_SIZE);
        nr_irq = be32_to_cpup(of_get_property(intc,
                                                "xlnx,num-intr-inputs", NULL));
 
-       intr_type =
-               be32_to_cpup(of_get_property(intc,
-                                               "xlnx,kind-of-intr", NULL));
-       if (intr_type > (u32)((1ULL << nr_irq) - 1))
+       intr_mask =
+               be32_to_cpup(of_get_property(intc, "xlnx,kind-of-intr", NULL));
+       if (intr_mask > (u32)((1ULL << nr_irq) - 1))
                printk(KERN_INFO " ERROR: Mismatch in kind-of-intr param\n");
 
 #ifdef CONFIG_SELFMOD_INTC
        selfmod_function((int *) arr_func, intc_baseaddr);
 #endif
-       printk(KERN_INFO "%s #0 at 0x%08x, num_irq=%d, edge=0x%x\n",
-               intc_list[j], intc_baseaddr, nr_irq, intr_type);
+       printk(KERN_INFO "XPS intc #0 at 0x%08x, num_irq=%d, edge=0x%x\n",
+               intc_baseaddr, nr_irq, intr_mask);
 
        /*
         * Disable all external interrupts until they are
@@ -155,8 +146,8 @@ void __init init_IRQ(void)
        /* Turn on the Master Enable. */
        out_be32(intc_baseaddr + MER, MER_HIE | MER_ME);
 
-       for (i = 0; i < nr_irq; ++i) {
-               if (intr_type & (0x00000001 << i)) {
+       for (i = IRQ_OFFSET; i < (nr_irq + IRQ_OFFSET); ++i) {
+               if (intr_mask & (0x00000001 << (i - IRQ_OFFSET))) {
                        irq_set_chip_and_handler_name(i, &intc_dev,
                                handle_edge_irq, "edge");
                        irq_clear_status_flags(i, IRQ_LEVEL);
@@ -165,5 +156,6 @@ void __init init_IRQ(void)
                                handle_level_irq, "level");
                        irq_set_status_flags(i, IRQ_LEVEL);
                }
+               irq_get_irq_data(i)->hwirq = i - IRQ_OFFSET;
        }
 }
index e5d63a89b9b20085e9e81042326c0e68bee362a6..bbebcae72c02f0d5b04598c3a57b1ea48c175aba 100644 (file)
@@ -33,11 +33,12 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
        irq_enter();
        irq = get_irq(regs);
 next_irq:
-       BUG_ON(irq == -1U);
-       generic_handle_irq(irq);
+       BUG_ON(!irq);
+       /* Substract 1 because of get_irq */
+       generic_handle_irq(irq + IRQ_OFFSET - NO_IRQ_OFFSET);
 
        irq = get_irq(regs);
-       if (irq != -1U) {
+       if (irq) {
                pr_debug("next irq: %d\n", irq);
                ++concurrent_irq;
                goto next_irq;
@@ -52,13 +53,13 @@ next_irq:
   intc without any cascades or any connection that's why mapping is 1:1 */
 unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq)
 {
-       return hwirq;
+       return hwirq + IRQ_OFFSET;
 }
 EXPORT_SYMBOL_GPL(irq_create_mapping);
 
 unsigned int irq_create_of_mapping(struct device_node *controller,
                                   const u32 *intspec, unsigned int intsize)
 {
-       return intspec[0];
+       return intspec[0] + IRQ_OFFSET;
 }
 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
index 142426f631bb7e34ded5faa048144fafcafb8e12..f39257a5abcf2652c9e853661773a49d7a0c49b9 100644 (file)
@@ -100,7 +100,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
                        break;
 
                case R_MICROBLAZE_64_NONE:
-                       pr_debug("R_MICROBLAZE_NONE\n");
+                       pr_debug("R_MICROBLAZE_64_NONE\n");
                        break;
 
                case R_MICROBLAZE_NONE:
index 0e654a12d37e18136caa965bfbbe729f87349d60..604cd9dd133362712e5220510be83be134d96ec1 100644 (file)
@@ -145,32 +145,32 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
        setup_early_printk(NULL);
 #endif
 
-       eprintk("Ramdisk addr 0x%08x, ", ram);
+       printk("Ramdisk addr 0x%08x, ", ram);
        if (fdt)
-               eprintk("FDT at 0x%08x\n", fdt);
+               printk("FDT at 0x%08x\n", fdt);
        else
-               eprintk("Compiled-in FDT at 0x%08x\n",
+               printk("Compiled-in FDT at 0x%08x\n",
                                        (unsigned int)_fdt_start);
 
 #ifdef CONFIG_MTD_UCLINUX
-       eprintk("Found romfs @ 0x%08x (0x%08x)\n",
+       printk("Found romfs @ 0x%08x (0x%08x)\n",
                        romfs_base, romfs_size);
-       eprintk("#### klimit %p ####\n", old_klimit);
+       printk("#### klimit %p ####\n", old_klimit);
        BUG_ON(romfs_size < 0); /* What else can we do? */
 
-       eprintk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
+       printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
                        romfs_size, romfs_base, (unsigned)&_ebss);
 
-       eprintk("New klimit: 0x%08x\n", (unsigned)klimit);
+       printk("New klimit: 0x%08x\n", (unsigned)klimit);
 #endif
 
 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
        if (msr)
-               eprintk("!!!Your kernel has setup MSR instruction but "
+               printk("!!!Your kernel has setup MSR instruction but "
                                "CPU don't have it %x\n", msr);
 #else
        if (!msr)
-               eprintk("!!!Your kernel not setup MSR instruction but "
+               printk("!!!Your kernel not setup MSR instruction but "
                                "CPU have it %x\n", msr);
 #endif
 
index 8789daa2a346683d43e7b42efe7935494b332e14..6a2b294ef6dc29525c75735bd7c82f001e5900a8 100644 (file)
@@ -380,3 +380,6 @@ ENTRY(sys_call_table)
        .long sys_clock_adjtime
        .long sys_syncfs
        .long sys_setns                 /* 375 */
+       .long sys_sendmmsg
+       .long sys_process_vm_readv
+       .long sys_process_vm_writev
index af74b1113aabb32fb3849a952109ffba07eacdcf..3cb0bf640135c5ced740bc2af6e0b6e819963792 100644 (file)
@@ -243,7 +243,7 @@ static int timer_initialized;
 
 void __init time_init(void)
 {
-       u32 irq, i = 0;
+       u32 irq;
        u32 timer_num = 1;
        struct device_node *timer = NULL;
        const void *prop;
@@ -258,33 +258,24 @@ void __init time_init(void)
                                0
                        };
 #endif
-       const char * const timer_list[] = {
-               "xlnx,xps-timer-1.00.a",
-               NULL
-       };
-
-       for (i = 0; timer_list[i] != NULL; i++) {
-               timer = of_find_compatible_node(NULL, NULL, timer_list[i]);
-               if (timer)
-                       break;
-       }
+       timer = of_find_compatible_node(NULL, NULL, "xlnx,xps-timer-1.00.a");
        BUG_ON(!timer);
 
        timer_baseaddr = be32_to_cpup(of_get_property(timer, "reg", NULL));
        timer_baseaddr = (unsigned long) ioremap(timer_baseaddr, PAGE_SIZE);
-       irq = be32_to_cpup(of_get_property(timer, "interrupts", NULL));
+       irq = irq_of_parse_and_map(timer, 0);
        timer_num = be32_to_cpup(of_get_property(timer,
                                                "xlnx,one-timer-only", NULL));
        if (timer_num) {
-               eprintk(KERN_EMERG "Please enable two timers in HW\n");
+               printk(KERN_EMERG "Please enable two timers in HW\n");
                BUG();
        }
 
 #ifdef CONFIG_SELFMOD_TIMER
        selfmod_function((int *) arr_func, timer_baseaddr);
 #endif
-       printk(KERN_INFO "%s #0 at 0x%08x, irq=%d\n",
-               timer_list[i], timer_baseaddr, irq);
+       printk(KERN_INFO "XPS timer #0 at 0x%08x, irq=%d\n",
+               timer_baseaddr, irq);
 
        /* If there is clock-frequency property than use it */
        prop = of_get_property(timer, "clock-frequency", NULL);
index c13067b243c3925e486bef404a1ac80aba5ebc6a..844960e8ae189767f96eb5d2e6e79ed2ac3cab2e 100644 (file)
@@ -20,6 +20,7 @@ lib-y += uaccess_old.o
 
 lib-y += ashldi3.o
 lib-y += ashrdi3.o
+lib-y += cmpdi2.o
 lib-y += divsi3.o
 lib-y += lshrdi3.o
 lib-y += modsi3.o
diff --git a/arch/microblaze/lib/cmpdi2.c b/arch/microblaze/lib/cmpdi2.c
new file mode 100644 (file)
index 0000000..a708400
--- /dev/null
@@ -0,0 +1,26 @@
+#include <linux/module.h>
+
+#include "libgcc.h"
+
+word_type __cmpdi2(long long a, long long b)
+{
+       const DWunion au = {
+               .ll = a
+       };
+       const DWunion bu = {
+               .ll = b
+       };
+
+       if (au.s.high < bu.s.high)
+               return 0;
+       else if (au.s.high > bu.s.high)
+               return 2;
+
+       if ((unsigned int) au.s.low < (unsigned int) bu.s.low)
+               return 0;
+       else if ((unsigned int) au.s.low > (unsigned int) bu.s.low)
+               return 2;
+
+       return 1;
+}
+EXPORT_SYMBOL(__cmpdi2);
index 57acda852f5a4723c7a294520b8fb6d51494cb54..b07abbac03197963c43011ceea69d2ec4c834b1f 100644 (file)
 #include <asm/io.h>
 #include <asm/pci-bridge.h>
 
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
-{
-       resource_size_t start = pci_resource_start(dev, bar);
-       resource_size_t len = pci_resource_len(dev, bar);
-       unsigned long flags = pci_resource_flags(dev, bar);
-
-       if (!len)
-               return NULL;
-       if (max && len > max)
-               len = max;
-       if (flags & IORESOURCE_IO)
-               return ioport_map(start, len);
-       if (flags & IORESOURCE_MEM)
-               return ioremap(start, len);
-       /* What? */
-       return NULL;
-}
-EXPORT_SYMBOL(pci_iomap);
-
 void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
 {
        if (isa_vaddr_is_ioport(addr))
index db841c7b9d5bad1dcbf4c55cb54301ceb76dd460..0d71b2ed81074befc649e5d369ebfdafb13ccf1a 100644 (file)
@@ -242,7 +242,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
                         line, pin);
 
                virq = irq_create_mapping(NULL, line);
-               if (virq != NO_IRQ)
+               if (virq)
                        irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
        } else {
                pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
@@ -253,7 +253,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
                virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
                                             oirq.size);
        }
-       if (virq == NO_IRQ) {
+       if (!virq) {
                pr_debug(" Failed to map !\n");
                return -1;
        }
index a7636d3ddc6a373f1d4e2329e3b40d31057ab296..29d92187ff3019914c5fea55f317108dc5745958 100644 (file)
@@ -16,6 +16,7 @@ config MIPS
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_KPROBES
        select HAVE_KRETPROBES
+       select ARCH_BINFMT_ELF_RANDOMIZE_PIE
        select RTC_LIB if !MACH_LOONGSON
        select GENERIC_ATOMIC64 if !64BIT
        select HAVE_DMA_ATTRS
@@ -2316,6 +2317,7 @@ config PCI
        bool "Support for PCI controller"
        depends on HW_HAS_PCI
        select PCI_DOMAINS
+       select GENERIC_PCI_IOMAP
        help
          Find out whether you have a PCI motherboard. PCI is the name of a
          bus system, i.e. the way the CPU talks to the other stuff inside
index 40b223b603be588ea6702579cd658c170e2f0384..c22385400fc979cbeb1d6d9d249f2cab3404f48b 100644 (file)
@@ -834,10 +834,13 @@ static struct mtd_partition mtd_partitions[] = {
        }
 };
 
+static const char *bcm63xx_part_types[] = { "bcm63xxpart", NULL };
+
 static struct physmap_flash_data flash_data = {
        .width                  = 2,
        .nr_parts               = ARRAY_SIZE(mtd_partitions),
        .parts                  = mtd_partitions,
+       .part_probe_types       = bcm63xx_part_types,
 };
 
 static struct resource mtd_resources[] = {
index ed72e6a26b736d2686f0fc68f7e297be5c805250..1e6b587f62c940a513d357ab0064d103d3d974de 100644 (file)
@@ -16,7 +16,6 @@
 #define TAGINFO1_LEN           30      /* Length of vendor information field1 in tag */
 #define FLASHLAYOUTVER_LEN     4       /* Length of Flash Layout Version String tag */
 #define TAGINFO2_LEN           16      /* Length of vendor information field2 in tag */
-#define CRC_LEN                        4       /* Length of CRC in bytes */
 #define ALTTAGINFO_LEN         54      /* Alternate length for vendor information; Pirelli */
 
 #define NUM_PIRELLI            2
@@ -77,19 +76,19 @@ struct bcm_tag {
        /* 192-195: Version flash layout */
        char flash_layout_ver[FLASHLAYOUTVER_LEN];
        /* 196-199: kernel+rootfs CRC32 */
-       char fskernel_crc[CRC_LEN];
+       __u32 fskernel_crc;
        /* 200-215: Unused except on Alice Gate where is is information */
        char information2[TAGINFO2_LEN];
        /* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */
-       char image_crc[CRC_LEN];
+       __u32 image_crc;
        /* 220-223: CRC32 of rootfs partition */
-       char rootfs_crc[CRC_LEN];
+       __u32 rootfs_crc;
        /* 224-227: CRC32 of kernel partition */
-       char kernel_crc[CRC_LEN];
+       __u32 kernel_crc;
        /* 228-235: Unused at present */
        char reserved1[8];
        /* 236-239: CRC32 of header excluding last 20 bytes */
-       char header_crc[CRC_LEN];
+       __u32 header_crc;
        /* 240-255: Unused at present */
        char reserved2[16];
 };
index 2ab899c4b4ce1ac38a7b235f5c340af49dcc923c..2635b1a9633385568677b9a121f50adb6f96d8b7 100644 (file)
@@ -40,32 +40,6 @@ static void __iomem *ioport_map_pci(struct pci_dev *dev,
        return (void __iomem *) (ctrl->io_map_base + port);
 }
 
-/*
- * Create a virtual mapping cookie for a PCI BAR (memory or IO)
- */
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
-       resource_size_t start = pci_resource_start(dev, bar);
-       resource_size_t len = pci_resource_len(dev, bar);
-       unsigned long flags = pci_resource_flags(dev, bar);
-
-       if (!len || !start)
-               return NULL;
-       if (maxlen && len > maxlen)
-               len = maxlen;
-       if (flags & IORESOURCE_IO)
-               return ioport_map_pci(dev, start, len);
-       if (flags & IORESOURCE_MEM) {
-               if (flags & IORESOURCE_CACHEABLE)
-                       return ioremap(start, len);
-               return ioremap_nocache(start, len);
-       }
-       /* What? */
-       return NULL;
-}
-
-EXPORT_SYMBOL(pci_iomap);
-
 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
 {
        iounmap(addr);
index 438db84a1f7c71bee4bcd1ec17ecbfbaa295b637..8f1c40d5817ebb80e9bc818deed5c0b0d80f3ac7 100644 (file)
@@ -252,6 +252,7 @@ config PCI
        bool "Use PCI"
        depends on MN10300_UNIT_ASB2305
        default y
+       select GENERIC_PCI_IOMAP
        help
          Some systems (such as the ASB2305) have PCI onboard. If you have one
          of these boards and you wish to use the PCI facilities, say Y here.
index 787255da744e2647a592b1aea12e20f3e76f83f9..139df8c53de83374a86189a8278d91a184d3e26c 100644 (file)
@@ -229,7 +229,6 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
 
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
 static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
 {
 }
@@ -251,15 +250,15 @@ static inline void *phys_to_virt(unsigned long address)
 /*
  * Change "struct page" to physical address.
  */
-static inline void *__ioremap(unsigned long offset, unsigned long size,
-                             unsigned long flags)
+static inline void __iomem *__ioremap(unsigned long offset, unsigned long size,
+                                     unsigned long flags)
 {
-       return (void *) offset;
+       return (void __iomem *) offset;
 }
 
-static inline void *ioremap(unsigned long offset, unsigned long size)
+static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
 {
-       return (void *) offset;
+       return (void __iomem *) offset;
 }
 
 /*
@@ -267,14 +266,14 @@ static inline void *ioremap(unsigned long offset, unsigned long size)
  * area.  it's useful if some control registers are in such an area and write
  * combining or read caching is not desirable:
  */
-static inline void *ioremap_nocache(unsigned long offset, unsigned long size)
+static inline void __iomem *ioremap_nocache(unsigned long offset, unsigned long size)
 {
-       return (void *) (offset | 0x20000000);
+       return (void __iomem *) (offset | 0x20000000);
 }
 
 #define ioremap_wc ioremap_nocache
 
-static inline void iounmap(void *addr)
+static inline void iounmap(void __iomem *addr)
 {
 }
 
index 0551022225b39905a7b5055541ec2afe8c7b0f91..cbc5abaa939a0f2e711c178cfede5d68cd9a312c 100644 (file)
@@ -5,4 +5,4 @@
 ###############################################################################
 obj-y   := unit-init.o leds.o
 
-obj-$(CONFIG_PCI) += pci.o pci-asb2305.o pci-irq.o pci-iomap.o
+obj-$(CONFIG_PCI) += pci.o pci-asb2305.o pci-irq.o
diff --git a/arch/mn10300/unit-asb2305/pci-iomap.c b/arch/mn10300/unit-asb2305/pci-iomap.c
deleted file mode 100644 (file)
index c1a8d8f..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/* ASB2305 PCI I/O mapping handler
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#include <linux/pci.h>
-#include <linux/module.h>
-
-/*
- * Create a virtual mapping cookie for a PCI BAR (memory or IO)
- */
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
-       resource_size_t start = pci_resource_start(dev, bar);
-       resource_size_t len = pci_resource_len(dev, bar);
-       unsigned long flags = pci_resource_flags(dev, bar);
-
-       if (!len || !start)
-               return NULL;
-
-       if ((flags & IORESOURCE_IO) || (flags & IORESOURCE_MEM))
-               return (void __iomem *) start;
-
-       return NULL;
-}
-EXPORT_SYMBOL(pci_iomap);
index e518a5a4cf4c19abf6f3fa537c478957602a86ad..081a54f1a93d23443f46c576f1031780f090a589 100644 (file)
@@ -38,9 +38,6 @@ config RWSEM_XCHGADD_ALGORITHM
 config GENERIC_HWEIGHT
        def_bool y
 
-config GENERIC_IOMAP
-       def_bool y
-
 config NO_IOPORT
        def_bool y
 
index fdfd8be29e951021140390d1ba37f1006d8f353d..242a1b7ac759448ba6ab06be9f28d4c9e9bfb69a 100644 (file)
@@ -14,6 +14,7 @@ config PARISC
        select GENERIC_ATOMIC64 if !64BIT
        select HAVE_GENERIC_HARDIRQS
        select GENERIC_IRQ_PROBE
+       select GENERIC_PCI_IOMAP
        select IRQ_PER_CPU
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
 
index 8f470c93b16d10a94706f3f5375f2152db22e582..fb8e10a4fb39d5184b127a934308df1a7aac7900 100644 (file)
@@ -436,28 +436,6 @@ void ioport_unmap(void __iomem *addr)
        }
 }
 
-/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
-       resource_size_t start = pci_resource_start(dev, bar);
-       resource_size_t len = pci_resource_len(dev, bar);
-       unsigned long flags = pci_resource_flags(dev, bar);
-
-       if (!len || !start)
-               return NULL;
-       if (maxlen && len > maxlen)
-               len = maxlen;
-       if (flags & IORESOURCE_IO)
-               return ioport_map(start, len);
-       if (flags & IORESOURCE_MEM) {
-               if (flags & IORESOURCE_CACHEABLE)
-                       return ioremap(start, len);
-               return ioremap_nocache(start, len);
-       }
-       /* What? */
-       return NULL;
-}
-
 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
 {
        if (!INDIRECT_ADDR(addr)) {
@@ -483,5 +461,4 @@ EXPORT_SYMBOL(iowrite16_rep);
 EXPORT_SYMBOL(iowrite32_rep);
 EXPORT_SYMBOL(ioport_map);
 EXPORT_SYMBOL(ioport_unmap);
-EXPORT_SYMBOL(pci_iomap);
 EXPORT_SYMBOL(pci_iounmap);
index 692ac7588e20d45afa608a7ae6d5f3a7638a315e..1919634a9b32c261c6f989f7c87b7403a5c6c57e 100644 (file)
@@ -718,6 +718,7 @@ config PCI
        default PCI_PERMEDIA if !4xx && !CPM2 && !8xx
        default PCI_QSPAN if !4xx && !CPM2 && 8xx
        select ARCH_SUPPORTS_MSI
+       select GENERIC_PCI_IOMAP
        help
          Find out whether your system includes a PCI bus. PCI is the name of
          a bus system, i.e. the way the CPU talks to the other stuff inside
index 0ad432bc81d66259d82e4e94f691c46e4c765555..f7727d91ac6b91d26325fdbbae46cc9e7698cba2 100644 (file)
@@ -170,8 +170,8 @@ struct kvm_sregs {
                        } ppc64;
                        struct {
                                __u32 sr[16];
-                               __u64 ibat[8]; 
-                               __u64 dbat[8]; 
+                               __u64 ibat[8];
+                               __u64 dbat[8];
                        } ppc32;
                } s;
                struct {
index 262791807397375612e29860ae64e5d86ee6cc6a..97a3715ac8bd83dadfb23555a398260de144ab9e 100644 (file)
@@ -119,24 +119,6 @@ EXPORT_SYMBOL(ioport_map);
 EXPORT_SYMBOL(ioport_unmap);
 
 #ifdef CONFIG_PCI
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
-{
-       resource_size_t start = pci_resource_start(dev, bar);
-       resource_size_t len = pci_resource_len(dev, bar);
-       unsigned long flags = pci_resource_flags(dev, bar);
-
-       if (!len)
-               return NULL;
-       if (max && len > max)
-               len = max;
-       if (flags & IORESOURCE_IO)
-               return ioport_map(start, len);
-       if (flags & IORESOURCE_MEM)
-               return ioremap(start, len);
-       /* What? */
-       return NULL;
-}
-
 void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
 {
        if (isa_vaddr_is_ioport(addr))
@@ -146,6 +128,5 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
        iounmap(addr);
 }
 
-EXPORT_SYMBOL(pci_iomap);
 EXPORT_SYMBOL(pci_iounmap);
 #endif /* CONFIG_PCI */
index a459479995c6619fd83d6731ea7baaa1c016e990..e41ac6f7dcf15105aa4b784fce7d43d1599c4d64 100644 (file)
@@ -498,7 +498,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 
        /* If nothing is dirty, don't bother messing with page tables. */
        if (is_dirty) {
-               memslot = &kvm->memslots->memslots[log->slot];
+               memslot = id_to_memslot(kvm->memslots, log->slot);
 
                ga = memslot->base_gfn << PAGE_SHIFT;
                ga_end = ga + (memslot->npages << PAGE_SHIFT);
index 286f13d601cf589f1b3ca9e9259c4fd22bb78fc4..a795a13f4a70f9ddc212e8d125857ad50232248a 100644 (file)
@@ -86,7 +86,7 @@ static inline int lpcr_rmls(unsigned long rma_size)
  * to allocate contiguous physical memory for the real memory
  * areas for guests.
  */
-void kvm_rma_init(void)
+void __init kvm_rma_init(void)
 {
        unsigned long i;
        unsigned long j, npages;
index 31e1adeaa92a371b334d1c3329f6550d5f3edaec..0cfb46d54b8c922db3b4136b1f7eb021213c48e7 100644 (file)
@@ -175,9 +175,6 @@ config PPC_INDIRECT_MMIO
 config PPC_IO_WORKAROUNDS
        bool
 
-config GENERIC_IOMAP
-       bool
-
 source "drivers/cpufreq/Kconfig"
 
 menu "CPU Frequency drivers"
index 8b0c9464aa9d10ece5562bad884969665ee6f348..3df65d39abc15f361df4cc61eace374af74e4c17 100644 (file)
@@ -4,6 +4,7 @@ config SCORE
        def_bool y
        select HAVE_GENERIC_HARDIRQS
        select GENERIC_IRQ_SHOW
+       select GENERIC_IOMAP
        select HAVE_MEMBLOCK
        select HAVE_MEMBLOCK_NODE_MAP
        select ARCH_DISCARD_MEMBLOCK
@@ -36,9 +37,6 @@ endmenu
 config CPU_SCORE7
        bool
 
-config GENERIC_IOMAP
-       def_bool y
-
 config NO_DMA
        bool
        default y
index 47a2f1c2cb0d4d63bf914d1e3f644111fe628276..3c8db65c89e5583ef16907f9d44eb0a9a74461ff 100644 (file)
@@ -85,9 +85,6 @@ config GENERIC_GPIO
 config GENERIC_CALIBRATE_DELAY
        bool
 
-config GENERIC_IOMAP
-       bool
-
 config GENERIC_CLOCKEVENTS
        def_bool y
 
@@ -861,6 +858,7 @@ config PCI
        bool "PCI support"
        depends on SYS_SUPPORTS_PCI
        select PCI_DOMAINS
+       select GENERIC_PCI_IOMAP
        help
          Find out whether you have a PCI motherboard. PCI is the name of a
          bus system, i.e. the way the CPU talks to the other stuff inside
index c2691afe8f79c9e08e402dae2f31a5231e34c6f5..11aaf2fdec84348513565122448c13aa2b2b59fc 100644 (file)
@@ -393,29 +393,6 @@ static void __iomem *ioport_map_pci(struct pci_dev *dev,
        return (void __iomem *)(chan->io_map_base + port);
 }
 
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
-       resource_size_t start = pci_resource_start(dev, bar);
-       resource_size_t len = pci_resource_len(dev, bar);
-       unsigned long flags = pci_resource_flags(dev, bar);
-
-       if (unlikely(!len || !start))
-               return NULL;
-       if (maxlen && len > maxlen)
-               len = maxlen;
-
-       if (flags & IORESOURCE_IO)
-               return ioport_map_pci(dev, start, len);
-       if (flags & IORESOURCE_MEM) {
-               if (flags & IORESOURCE_CACHEABLE)
-                       return ioremap(start, len);
-               return ioremap_nocache(start, len);
-       }
-
-       return NULL;
-}
-EXPORT_SYMBOL(pci_iomap);
-
 void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
 {
        iounmap(addr);
index 868ea08dff0ba55e29dd8a79e7f34b74b820fcb3..96657992a72eef8f69580cc9cdb12e2c184639ec 100644 (file)
@@ -28,6 +28,7 @@ config SPARC
        select HAVE_GENERIC_HARDIRQS
        select GENERIC_IRQ_SHOW
        select USE_GENERIC_SMP_HELPERS if SMP
+       select GENERIC_PCI_IOMAP
 
 config SPARC32
        def_bool !64BIT
index c2ced21c9dc19a7c284960d6a180f30345e5120f..2006e5d359dfb5f602ab004208ea60d610e15a6d 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <asm/page.h>      /* IO address mapping routines need this */
 #include <asm/system.h>
+#include <asm-generic/pci_iomap.h>
 
 #define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
 
@@ -324,7 +325,6 @@ extern void ioport_unmap(void __iomem *);
 
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
 extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
 
 /*
index 9c8965415f0a5187cd550435341029f553bdbfe2..9481e5a6fa90e218ffa2124a0feef91ffd228b4a 100644 (file)
@@ -8,6 +8,7 @@
 #include <asm/page.h>      /* IO address mapping routines need this */
 #include <asm/system.h>
 #include <asm/asi.h>
+#include <asm-generic/pci_iomap.h>
 
 /* PC crapola... */
 #define __SLOW_DOWN_IO do { } while (0)
@@ -514,7 +515,6 @@ extern void ioport_unmap(void __iomem *);
 
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
 extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
 
 static inline int sbus_can_dma_64bit(void)
index e49b828a2471d57cca8e5aa723acc3a011a9d390..aa42fe30d5b96f0c3f1029274932d7bbbbd3da4d 100644 (file)
@@ -143,10 +143,11 @@ struct sigstack {
 #define SA_ONSTACK     _SV_SSTACK
 #define SA_RESTART     _SV_INTR
 #define SA_ONESHOT     _SV_RESET
-#define SA_NOMASK      0x20u
+#define SA_NODEFER     0x20u
 #define SA_NOCLDWAIT    0x100u
 #define SA_SIGINFO      0x200u
 
+#define SA_NOMASK      SA_NODEFER
 
 #define SIG_BLOCK          0x01        /* for blocking signals */
 #define SIG_UNBLOCK        0x02        /* for unblocking signals */
index 9ef37e13a920e28ba99e197dc50dff7b812c5972..c4d42a50ebc06d3f92fdae70aa7d1f1d64a5b6f7 100644 (file)
@@ -18,31 +18,8 @@ void ioport_unmap(void __iomem *addr)
 EXPORT_SYMBOL(ioport_map);
 EXPORT_SYMBOL(ioport_unmap);
 
-/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
-       resource_size_t start = pci_resource_start(dev, bar);
-       resource_size_t len = pci_resource_len(dev, bar);
-       unsigned long flags = pci_resource_flags(dev, bar);
-
-       if (!len || !start)
-               return NULL;
-       if (maxlen && len > maxlen)
-               len = maxlen;
-       if (flags & IORESOURCE_IO)
-               return ioport_map(start, len);
-       if (flags & IORESOURCE_MEM) {
-               if (flags & IORESOURCE_CACHEABLE)
-                       return ioremap(start, len);
-               return ioremap_nocache(start, len);
-       }
-       /* What? */
-       return NULL;
-}
-
 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
 {
        /* nothing to do */
 }
-EXPORT_SYMBOL(pci_iomap);
 EXPORT_SYMBOL(pci_iounmap);
index 70a0de46cd1be8e4bc4c193ef3b4d213f89a368f..11270ca22c0a7b880718cd832a3e890f0b69c28f 100644 (file)
@@ -321,6 +321,7 @@ config PCI
        bool "PCI support"
        default y
        select PCI_DOMAINS
+       select GENERIC_PCI_IOMAP
        ---help---
          Enable PCI root complex support, so PCIe endpoint devices can
          be attached to the Tile chip.  Many, but not all, PCI devices
index c9ea1652af03c609f355ec12c456a45e0dd826f3..d2152deb1f3cf6a3bc352d92d445df4157a0924e 100644 (file)
@@ -204,7 +204,8 @@ static inline long ioport_panic(void)
 
 static inline void __iomem *ioport_map(unsigned long port, unsigned int len)
 {
-       return (void __iomem *) ioport_panic();
+       pr_info("ioport_map: mapping IO resources is unsupported on tile.\n");
+       return NULL;
 }
 
 static inline void ioport_unmap(void __iomem *addr)
index 7f03cefed1b92079687a2ca838cd9d700a600a58..1d25fea42e547a554344aa47e25abb107c646aa5 100644 (file)
@@ -16,6 +16,7 @@
 #define _ASM_TILE_PCI_H
 
 #include <linux/pci.h>
+#include <asm-generic/pci_iomap.h>
 
 /*
  * Structure of a PCI controller (host bridge)
@@ -49,7 +50,6 @@ struct pci_controller {
 int __devinit tile_pci_init(void);
 int __devinit pcibios_init(void);
 
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
 static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
 
 void __devinit pcibios_fixup_bus(struct pci_bus *bus);
index 9d610d3fb11e9ac93c6e17875cff26257fa89e46..25567934a21e90cc5546a7495d784daa2e4e8cfa 100644 (file)
@@ -466,27 +466,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
        return 0;
 }
 
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
-{
-       unsigned long start = pci_resource_start(dev, bar);
-       unsigned long len = pci_resource_len(dev, bar);
-       unsigned long flags = pci_resource_flags(dev, bar);
-
-       if (!len)
-               return NULL;
-       if (max && len > max)
-               len = max;
-
-       if (!(flags & IORESOURCE_MEM)) {
-               pr_info("PCI: Trying to map invalid resource %#lx\n", flags);
-               start = 0;
-       }
-
-       return (void __iomem *)start;
-}
-EXPORT_SYMBOL(pci_iomap);
-
-
 /****************************************************************
  *
  * Tile PCI config space read/write routines
index 942ed6174f1d073e565d52e5dc074c7e84d3b533..eeb8054c7cd84347d242bba052c44cd64a5da5af 100644 (file)
@@ -12,6 +12,7 @@ config UNICORE32
        select GENERIC_IRQ_PROBE
        select GENERIC_IRQ_SHOW
        select ARCH_WANT_FRAME_POINTERS
+       select GENERIC_IOMAP
        help
          UniCore-32 is 32-bit Instruction Set Architecture,
          including a series of low-power-consumption RISC chip
@@ -30,9 +31,6 @@ config GENERIC_CLOCKEVENTS
 config GENERIC_CSUM
        def_bool y
 
-config GENERIC_IOMAP
-       def_bool y
-
 config NO_IOPORT
        bool
 
index 1a5c5a5eb39c8f0f27fa37c4fa526a16c5d9a6f2..adddf6d640771ac88ab0c5b016ad96769cca6a9e 100644 (file)
@@ -37,15 +37,9 @@ extern void __uc32_iounmap(volatile void __iomem *addr);
  */
 #define ioremap(cookie, size)          __uc32_ioremap(cookie, size)
 #define ioremap_cached(cookie, size)   __uc32_ioremap_cached(cookie, size)
+#define ioremap_nocache(cookie, size)  __uc32_ioremap(cookie, size)
 #define iounmap(cookie)                        __uc32_iounmap(cookie)
 
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#undef xlate_dev_mem_ptr
-#define xlate_dev_mem_ptr(p)   __va(p)
-
 #define HAVE_ARCH_PIO_SIZE
 #define PIO_OFFSET             (unsigned int)(PCI_IOBASE)
 #define PIO_MASK               (unsigned int)(IO_SPACE_LIMIT)
index 37b12a06b499ec0f1c4220aab59fe1473a0026d2..181108b8ecce766d0102b9e12ac070e6510337b0 100644 (file)
@@ -123,7 +123,7 @@ int __init mach_nb0916_init(void)
 
        if (request_irq(gpio_to_irq(GPI_LCD_CASE_OFF),
                &nb0916_lcdcaseoff_handler,
-               IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+               IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
                "NB0916 lcd case off", NULL) < 0) {
 
                printk(KERN_DEBUG "LCD-Case-OFF IRQ %d not available\n",
@@ -131,7 +131,7 @@ int __init mach_nb0916_init(void)
        }
 
        if (request_irq(gpio_to_irq(GPI_OTP_INT), &nb0916_overheat_handler,
-               IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+               IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
                "NB0916 overheating protection", NULL) < 0) {
 
                printk(KERN_DEBUG "Overheating Protection IRQ %d not available\n",
index 673d7a89d8fff0fd86573ac9dfede09d456c309c..87adbf5ebfe07cf7547bf3ff553ccf0766665519 100644 (file)
@@ -65,7 +65,7 @@ static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
  */
 static struct resource mem_res[] = {
        {
-               .name = "Kernel text",
+               .name = "Kernel code",
                .start = 0,
                .end = 0,
                .flags = IORESOURCE_MEM
index b163fca56789b2c5e55e86ce00f756f926ce737f..911b549a6df54d007954f287772c529d82e2759d 100644 (file)
@@ -63,10 +63,7 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
        err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
        if (err == 0) {
                sigdelsetmask(&set, ~_BLOCKABLE);
-               spin_lock_irq(&current->sighand->siglock);
-               current->blocked = set;
-               recalc_sigpending();
-               spin_unlock_irq(&current->sighand->siglock);
+               set_current_blocked(&set);
        }
 
        err |= __get_user(regs->UCreg_00, &sf->uc.uc_mcontext.regs.UCreg_00);
@@ -321,6 +318,7 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
 {
        struct thread_info *thread = current_thread_info();
        struct task_struct *tsk = current;
+       sigset_t blocked;
        int usig = sig;
        int ret;
 
@@ -372,13 +370,10 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
        /*
         * Block the signal if we were successful.
         */
-       spin_lock_irq(&tsk->sighand->siglock);
-       sigorsets(&tsk->blocked, &tsk->blocked,
-                 &ka->sa.sa_mask);
+       sigorsets(&blocked, &tsk->blocked, &ka->sa.sa_mask);
        if (!(ka->sa.sa_flags & SA_NODEFER))
-               sigaddset(&tsk->blocked, sig);
-       recalc_sigpending();
-       spin_unlock_irq(&tsk->sighand->siglock);
+               sigaddset(&blocked, sig);
+       set_current_blocked(&blocked);
 
        return 0;
 }
index 080710c092416f2ed6d259e0ef6e3c409149147c..d3824b2ff644b3975588b2220015a8bc17819dac 100644 (file)
@@ -86,7 +86,7 @@ static struct clocksource cksrc_puv3_oscr = {
 
 static struct irqaction puv3_timer_irq = {
        .name           = "ost0",
-       .flags          = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+       .flags          = IRQF_TIMER | IRQF_IRQPOLL,
        .handler        = puv3_ost0_interrupt,
        .dev_id         = &ckevt_puv3_osmr0,
 };
index 1d2a69dd36d89c32e16cbe19d6062d5da95f4d52..1a31254ceb839348de42bd0473e20c2da21e3eb0 100644 (file)
@@ -62,6 +62,7 @@ config X86
        select ANON_INODES
        select HAVE_ARCH_KMEMCHECK
        select HAVE_USER_RETURN_NOTIFIER
+       select ARCH_BINFMT_ELF_RANDOMIZE_PIE
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_TEXT_POKE_SMP
        select HAVE_GENERIC_HARDIRQS
@@ -77,6 +78,7 @@ config X86
        select HAVE_BPF_JIT if (X86_64 && NET)
        select CLKEVT_I8253
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
+       select GENERIC_IOMAP
 
 config INSTRUCTION_DECODER
        def_bool (KPROBES || PERF_EVENTS)
@@ -142,9 +144,6 @@ config NEED_SG_DMA_LENGTH
 config GENERIC_ISA_DMA
        def_bool ISA_DMA_API
 
-config GENERIC_IOMAP
-       def_bool y
-
 config GENERIC_BUG
        def_bool y
        depends on BUG
index f3444f700f3619eedcc8f3fa050586148e80ad70..17c5d4bdee5ed5124c7be91bf8d4ef73b39b7c2f 100644 (file)
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
 #define X86_FEATURE_FSGSBASE   (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
+#define X86_FEATURE_BMI1       (9*32+ 3) /* 1st group bit manipulation extensions */
+#define X86_FEATURE_AVX2       (9*32+ 5) /* AVX2 instructions */
 #define X86_FEATURE_SMEP       (9*32+ 7) /* Supervisor Mode Execution Protection */
+#define X86_FEATURE_BMI2       (9*32+ 8) /* 2nd group bit manipulation extensions */
 #define X86_FEATURE_ERMS       (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
 
 #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
index 345c99cef15262dda6415b5eff3d2140c37bafcc..dffc38ee6255f95944eaf3aa0eb283d4f3ba808a 100644 (file)
@@ -5,6 +5,7 @@ extern struct dma_map_ops nommu_dma_ops;
 extern int force_iommu, no_iommu;
 extern int iommu_detected;
 extern int iommu_pass_through;
+extern int iommu_group_mf;
 
 /* 10 seconds */
 #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
index a026507893e9b566f0578a3955472c4b1f524559..ab4092e3214ecea7d02e3827fe596ed662243214 100644 (file)
@@ -181,6 +181,7 @@ struct x86_emulate_ops {
        int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
        int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
        int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
+       int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
        void (*halt)(struct x86_emulate_ctxt *ctxt);
        void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
        int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
@@ -364,6 +365,7 @@ enum x86_intercept {
 #endif
 
 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
+bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
 #define EMULATION_FAILED -1
 #define EMULATION_OK 0
 #define EMULATION_RESTART 1
index b4973f4dab9832da8569ef17d264406ac5c668d7..52d6640a5ca1eec731418d408fc6e03c161ce3fe 100644 (file)
 #include <linux/mmu_notifier.h>
 #include <linux/tracepoint.h>
 #include <linux/cpumask.h>
+#include <linux/irq_work.h>
 
 #include <linux/kvm.h>
 #include <linux/kvm_para.h>
 #include <linux/kvm_types.h>
+#include <linux/perf_event.h>
 
 #include <asm/pvclock-abi.h>
 #include <asm/desc.h>
@@ -31,6 +33,8 @@
 #define KVM_MEMORY_SLOTS 32
 /* memory slots that does not exposed to userspace */
 #define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+
 #define KVM_MMIO_SIZE 16
 
 #define KVM_PIO_PAGE_OFFSET 1
@@ -228,7 +232,7 @@ struct kvm_mmu_page {
         * One bit set per slot which has memory
         * in this shadow page.
         */
-       DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
+       DECLARE_BITMAP(slot_bitmap, KVM_MEM_SLOTS_NUM);
        bool unsync;
        int root_count;          /* Currently serving as active root */
        unsigned int unsync_children;
@@ -239,14 +243,9 @@ struct kvm_mmu_page {
        int clear_spte_count;
 #endif
 
-       struct rcu_head rcu;
-};
+       int write_flooding_count;
 
-struct kvm_pv_mmu_op_buffer {
-       void *ptr;
-       unsigned len;
-       unsigned processed;
-       char buf[512] __aligned(sizeof(long));
+       struct rcu_head rcu;
 };
 
 struct kvm_pio_request {
@@ -294,6 +293,37 @@ struct kvm_mmu {
        u64 pdptrs[4]; /* pae */
 };
 
+enum pmc_type {
+       KVM_PMC_GP = 0,
+       KVM_PMC_FIXED,
+};
+
+struct kvm_pmc {
+       enum pmc_type type;
+       u8 idx;
+       u64 counter;
+       u64 eventsel;
+       struct perf_event *perf_event;
+       struct kvm_vcpu *vcpu;
+};
+
+struct kvm_pmu {
+       unsigned nr_arch_gp_counters;
+       unsigned nr_arch_fixed_counters;
+       unsigned available_event_types;
+       u64 fixed_ctr_ctrl;
+       u64 global_ctrl;
+       u64 global_status;
+       u64 global_ovf_ctrl;
+       u64 counter_bitmask[2];
+       u64 global_ctrl_mask;
+       u8 version;
+       struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
+       struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
+       struct irq_work irq_work;
+       u64 reprogram_pmi;
+};
+
 struct kvm_vcpu_arch {
        /*
         * rip and regs accesses must go through
@@ -345,19 +375,10 @@ struct kvm_vcpu_arch {
         */
        struct kvm_mmu *walk_mmu;
 
-       /* only needed in kvm_pv_mmu_op() path, but it's hot so
-        * put it here to avoid allocation */
-       struct kvm_pv_mmu_op_buffer mmu_op_buffer;
-
        struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
        struct kvm_mmu_memory_cache mmu_page_cache;
        struct kvm_mmu_memory_cache mmu_page_header_cache;
 
-       gfn_t last_pt_write_gfn;
-       int   last_pt_write_count;
-       u64  *last_pte_updated;
-       gfn_t last_pte_gfn;
-
        struct fpu guest_fpu;
        u64 xcr0;
 
@@ -436,6 +457,8 @@ struct kvm_vcpu_arch {
        unsigned access;
        gfn_t mmio_gfn;
 
+       struct kvm_pmu pmu;
+
        /* used for guest single stepping over the given code position */
        unsigned long singlestep_rip;
 
@@ -444,6 +467,9 @@ struct kvm_vcpu_arch {
 
        cpumask_var_t wbinvd_dirty_mask;
 
+       unsigned long last_retry_eip;
+       unsigned long last_retry_addr;
+
        struct {
                bool halted;
                gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
@@ -459,7 +485,6 @@ struct kvm_arch {
        unsigned int n_requested_mmu_pages;
        unsigned int n_max_mmu_pages;
        unsigned int indirect_shadow_pages;
-       atomic_t invlpg_counter;
        struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
        /*
         * Hash table of struct kvm_mmu_page.
@@ -660,6 +685,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 
 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
+int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
+                              struct kvm_memory_slot *slot);
 void kvm_mmu_zap_all(struct kvm *kvm);
 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
@@ -668,8 +695,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
 
 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
                          const void *val, int bytes);
-int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
-                 gpa_t addr, unsigned long *ret);
 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
 
 extern bool tdp_enabled;
@@ -692,6 +717,7 @@ enum emulation_result {
 #define EMULTYPE_NO_DECODE         (1 << 0)
 #define EMULTYPE_TRAP_UD           (1 << 1)
 #define EMULTYPE_SKIP              (1 << 2)
+#define EMULTYPE_RETRY             (1 << 3)
 int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
                            int emulation_type, void *insn, int insn_len);
 
@@ -734,6 +760,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
 
 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
+bool kvm_rdpmc(struct kvm_vcpu *vcpu);
 
 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
@@ -754,13 +781,14 @@ int fx_init(struct kvm_vcpu *vcpu);
 
 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-                      const u8 *new, int bytes,
-                      bool guest_initiated);
+                      const u8 *new, int bytes);
+int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
 int kvm_mmu_load(struct kvm_vcpu *vcpu);
 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
+gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
                              struct x86_exception *exception);
 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
@@ -782,6 +810,11 @@ void kvm_disable_tdp(void);
 int complete_pio(struct kvm_vcpu *vcpu);
 bool kvm_check_iopl(struct kvm_vcpu *vcpu);
 
+static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
+{
+       return gpa;
+}
+
 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
 {
        struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
@@ -894,4 +927,17 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 
 void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
 
+int kvm_is_in_guest(void);
+
+void kvm_pmu_init(struct kvm_vcpu *vcpu);
+void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
+void kvm_pmu_reset(struct kvm_vcpu *vcpu);
+void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
+bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
+int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
+int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
+int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
+void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
+void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
+
 #endif /* _ASM_X86_KVM_HOST_H */
index a9c2116001d692f08b4d756cc90e7e5cd97ff3e5..f0c6fd6f176b00c9c3fdd972104c1961319f5eb9 100644 (file)
@@ -39,8 +39,6 @@
 #include <asm/desc.h>
 #include <asm/tlbflush.h>
 
-#define MMU_QUEUE_SIZE 1024
-
 static int kvmapf = 1;
 
 static int parse_no_kvmapf(char *arg)
@@ -60,21 +58,10 @@ static int parse_no_stealacc(char *arg)
 
 early_param("no-steal-acc", parse_no_stealacc);
 
-struct kvm_para_state {
-       u8 mmu_queue[MMU_QUEUE_SIZE];
-       int mmu_queue_len;
-};
-
-static DEFINE_PER_CPU(struct kvm_para_state, para_state);
 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
 static int has_steal_clock = 0;
 
-static struct kvm_para_state *kvm_para_state(void)
-{
-       return &per_cpu(para_state, raw_smp_processor_id());
-}
-
 /*
  * No need for any "IO delay" on KVM
  */
@@ -271,151 +258,6 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
        }
 }
 
-static void kvm_mmu_op(void *buffer, unsigned len)
-{
-       int r;
-       unsigned long a1, a2;
-
-       do {
-               a1 = __pa(buffer);
-               a2 = 0;   /* on i386 __pa() always returns <4G */
-               r = kvm_hypercall3(KVM_HC_MMU_OP, len, a1, a2);
-               buffer += r;
-               len -= r;
-       } while (len);
-}
-
-static void mmu_queue_flush(struct kvm_para_state *state)
-{
-       if (state->mmu_queue_len) {
-               kvm_mmu_op(state->mmu_queue, state->mmu_queue_len);
-               state->mmu_queue_len = 0;
-       }
-}
-
-static void kvm_deferred_mmu_op(void *buffer, int len)
-{
-       struct kvm_para_state *state = kvm_para_state();
-
-       if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) {
-               kvm_mmu_op(buffer, len);
-               return;
-       }
-       if (state->mmu_queue_len + len > sizeof state->mmu_queue)
-               mmu_queue_flush(state);
-       memcpy(state->mmu_queue + state->mmu_queue_len, buffer, len);
-       state->mmu_queue_len += len;
-}
-
-static void kvm_mmu_write(void *dest, u64 val)
-{
-       __u64 pte_phys;
-       struct kvm_mmu_op_write_pte wpte;
-
-#ifdef CONFIG_HIGHPTE
-       struct page *page;
-       unsigned long dst = (unsigned long) dest;
-
-       page = kmap_atomic_to_page(dest);
-       pte_phys = page_to_pfn(page);
-       pte_phys <<= PAGE_SHIFT;
-       pte_phys += (dst & ~(PAGE_MASK));
-#else
-       pte_phys = (unsigned long)__pa(dest);
-#endif
-       wpte.header.op = KVM_MMU_OP_WRITE_PTE;
-       wpte.pte_val = val;
-       wpte.pte_phys = pte_phys;
-
-       kvm_deferred_mmu_op(&wpte, sizeof wpte);
-}
-
-/*
- * We only need to hook operations that are MMU writes.  We hook these so that
- * we can use lazy MMU mode to batch these operations.  We could probably
- * improve the performance of the host code if we used some of the information
- * here to simplify processing of batched writes.
- */
-static void kvm_set_pte(pte_t *ptep, pte_t pte)
-{
-       kvm_mmu_write(ptep, pte_val(pte));
-}
-
-static void kvm_set_pte_at(struct mm_struct *mm, unsigned long addr,
-                          pte_t *ptep, pte_t pte)
-{
-       kvm_mmu_write(ptep, pte_val(pte));
-}
-
-static void kvm_set_pmd(pmd_t *pmdp, pmd_t pmd)
-{
-       kvm_mmu_write(pmdp, pmd_val(pmd));
-}
-
-#if PAGETABLE_LEVELS >= 3
-#ifdef CONFIG_X86_PAE
-static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte)
-{
-       kvm_mmu_write(ptep, pte_val(pte));
-}
-
-static void kvm_pte_clear(struct mm_struct *mm,
-                         unsigned long addr, pte_t *ptep)
-{
-       kvm_mmu_write(ptep, 0);
-}
-
-static void kvm_pmd_clear(pmd_t *pmdp)
-{
-       kvm_mmu_write(pmdp, 0);
-}
-#endif
-
-static void kvm_set_pud(pud_t *pudp, pud_t pud)
-{
-       kvm_mmu_write(pudp, pud_val(pud));
-}
-
-#if PAGETABLE_LEVELS == 4
-static void kvm_set_pgd(pgd_t *pgdp, pgd_t pgd)
-{
-       kvm_mmu_write(pgdp, pgd_val(pgd));
-}
-#endif
-#endif /* PAGETABLE_LEVELS >= 3 */
-
-static void kvm_flush_tlb(void)
-{
-       struct kvm_mmu_op_flush_tlb ftlb = {
-               .header.op = KVM_MMU_OP_FLUSH_TLB,
-       };
-
-       kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
-}
-
-static void kvm_release_pt(unsigned long pfn)
-{
-       struct kvm_mmu_op_release_pt rpt = {
-               .header.op = KVM_MMU_OP_RELEASE_PT,
-               .pt_phys = (u64)pfn << PAGE_SHIFT,
-       };
-
-       kvm_mmu_op(&rpt, sizeof rpt);
-}
-
-static void kvm_enter_lazy_mmu(void)
-{
-       paravirt_enter_lazy_mmu();
-}
-
-static void kvm_leave_lazy_mmu(void)
-{
-       struct kvm_para_state *state = kvm_para_state();
-
-       mmu_queue_flush(state);
-       paravirt_leave_lazy_mmu();
-}
-
 static void __init paravirt_ops_setup(void)
 {
        pv_info.name = "KVM";
@@ -424,29 +266,6 @@ static void __init paravirt_ops_setup(void)
        if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
                pv_cpu_ops.io_delay = kvm_io_delay;
 
-       if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) {
-               pv_mmu_ops.set_pte = kvm_set_pte;
-               pv_mmu_ops.set_pte_at = kvm_set_pte_at;
-               pv_mmu_ops.set_pmd = kvm_set_pmd;
-#if PAGETABLE_LEVELS >= 3
-#ifdef CONFIG_X86_PAE
-               pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic;
-               pv_mmu_ops.pte_clear = kvm_pte_clear;
-               pv_mmu_ops.pmd_clear = kvm_pmd_clear;
-#endif
-               pv_mmu_ops.set_pud = kvm_set_pud;
-#if PAGETABLE_LEVELS == 4
-               pv_mmu_ops.set_pgd = kvm_set_pgd;
-#endif
-#endif
-               pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
-               pv_mmu_ops.release_pte = kvm_release_pt;
-               pv_mmu_ops.release_pmd = kvm_release_pt;
-               pv_mmu_ops.release_pud = kvm_release_pt;
-
-               pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu;
-               pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu;
-       }
 #ifdef CONFIG_X86_IO_APIC
        no_timer_check = 1;
 #endif
index 80dc793b3f6331747cd29ea5d0a1303e515f07db..1c4d769e21ea07053f81f75b7aed2b21ab1f397f 100644 (file)
@@ -45,6 +45,15 @@ int iommu_detected __read_mostly = 0;
  */
 int iommu_pass_through __read_mostly;
 
+/*
+ * Group multi-function PCI devices into a single device-group for the
+ * iommu_device_group interface.  This tells the iommu driver to pretend
+ * it cannot distinguish between functions of a device, exposing only one
+ * group for the device.  Useful for disallowing use of individual PCI
+ * functions from userspace drivers.
+ */
+int iommu_group_mf __read_mostly;
+
 extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
 
 /* Dummy device used for NULL arguments (normally ISA). */
@@ -169,6 +178,8 @@ static __init int iommu_setup(char *p)
 #endif
                if (!strncmp(p, "pt", 2))
                        iommu_pass_through = 1;
+               if (!strncmp(p, "group_mf", 8))
+                       iommu_group_mf = 1;
 
                gart_parse_options(p);
 
index 54ddaeb221c18cbd1ed06aa9a66c74752e4717b1..46a01bdc27e2846582d88fca80356506349e1074 100644 (file)
@@ -682,7 +682,6 @@ static int
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
                struct pt_regs *regs)
 {
-       sigset_t blocked;
        int ret;
 
        /* Are we from a system call? */
@@ -733,10 +732,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
         */
        regs->flags &= ~X86_EFLAGS_TF;
 
-       sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
-       if (!(ka->sa.sa_flags & SA_NODEFER))
-               sigaddset(&blocked, sig);
-       set_current_blocked(&blocked);
+       block_sigmask(ka, sig);
 
        tracehook_signal_handler(sig, info, ka, regs,
                                 test_thread_flag(TIF_SINGLESTEP));
index ff5790d8e990f0c382fa43c5ff2d3b5d7ed46c48..1a7fe868f375cb6bccb281599359b8c2dbad8073 100644 (file)
@@ -35,6 +35,7 @@ config KVM
        select KVM_MMIO
        select TASKSTATS
        select TASK_DELAY_ACCT
+       select PERF_EVENTS
        ---help---
          Support hosting fully virtualized guest machines using hardware
          virtualization extensions.  You will need a fairly recent
@@ -52,6 +53,8 @@ config KVM
 config KVM_INTEL
        tristate "KVM for Intel processors support"
        depends on KVM
+       # for perf_guest_get_msrs():
+       depends on CPU_SUP_INTEL
        ---help---
          Provides support for KVM on Intel processors equipped with the VT
          extensions.
index f15501f431c8d972a2eb3cc8dd6fb8c5870e248f..4f579e8dcacf6747a7e3a34db765bf112233680f 100644 (file)
@@ -12,7 +12,7 @@ kvm-$(CONFIG_IOMMU_API)       += $(addprefix ../../../virt/kvm/, iommu.o)
 kvm-$(CONFIG_KVM_ASYNC_PF)     += $(addprefix ../../../virt/kvm/, async_pf.o)
 
 kvm-y                  += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
-                          i8254.o timer.o
+                          i8254.o timer.o cpuid.o pmu.o
 kvm-intel-y            += vmx.o
 kvm-amd-y              += svm.o
 
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
new file mode 100644 (file)
index 0000000..89b02bf
--- /dev/null
@@ -0,0 +1,670 @@
+/*
+ * Kernel-based Virtual Machine driver for Linux
+ * cpuid support routines
+ *
+ * derived from arch/x86/kvm/x86.c
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates.
+ * Copyright IBM Corporation, 2008
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <asm/user.h>
+#include <asm/xsave.h>
+#include "cpuid.h"
+#include "lapic.h"
+#include "mmu.h"
+#include "trace.h"
+
+void kvm_update_cpuid(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+       struct kvm_lapic *apic = vcpu->arch.apic;
+
+       best = kvm_find_cpuid_entry(vcpu, 1, 0);
+       if (!best)
+               return;
+
+       /* Update OSXSAVE bit */
+       if (cpu_has_xsave && best->function == 0x1) {
+               best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
+               if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
+                       best->ecx |= bit(X86_FEATURE_OSXSAVE);
+       }
+
+       if (apic) {
+               if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
+                       apic->lapic_timer.timer_mode_mask = 3 << 17;
+               else
+                       apic->lapic_timer.timer_mode_mask = 1 << 17;
+       }
+
+       kvm_pmu_cpuid_update(vcpu);
+}
+
+static int is_efer_nx(void)
+{
+       unsigned long long efer = 0;
+
+       rdmsrl_safe(MSR_EFER, &efer);
+       return efer & EFER_NX;
+}
+
+static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
+{
+       int i;
+       struct kvm_cpuid_entry2 *e, *entry;
+
+       entry = NULL;
+       for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
+               e = &vcpu->arch.cpuid_entries[i];
+               if (e->function == 0x80000001) {
+                       entry = e;
+                       break;
+               }
+       }
+       if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
+               entry->edx &= ~(1 << 20);
+               printk(KERN_INFO "kvm: guest NX capability removed\n");
+       }
+}
+
+/* when an old userspace process fills a new kernel module */
+int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
+                            struct kvm_cpuid *cpuid,
+                            struct kvm_cpuid_entry __user *entries)
+{
+       int r, i;
+       struct kvm_cpuid_entry *cpuid_entries;
+
+       r = -E2BIG;
+       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+               goto out;
+       r = -ENOMEM;
+       cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
+       if (!cpuid_entries)
+               goto out;
+       r = -EFAULT;
+       if (copy_from_user(cpuid_entries, entries,
+                          cpuid->nent * sizeof(struct kvm_cpuid_entry)))
+               goto out_free;
+       for (i = 0; i < cpuid->nent; i++) {
+               vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
+               vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
+               vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
+               vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
+               vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
+               vcpu->arch.cpuid_entries[i].index = 0;
+               vcpu->arch.cpuid_entries[i].flags = 0;
+               vcpu->arch.cpuid_entries[i].padding[0] = 0;
+               vcpu->arch.cpuid_entries[i].padding[1] = 0;
+               vcpu->arch.cpuid_entries[i].padding[2] = 0;
+       }
+       vcpu->arch.cpuid_nent = cpuid->nent;
+       cpuid_fix_nx_cap(vcpu);
+       r = 0;
+       kvm_apic_set_version(vcpu);
+       kvm_x86_ops->cpuid_update(vcpu);
+       kvm_update_cpuid(vcpu);
+
+out_free:
+       vfree(cpuid_entries);
+out:
+       return r;
+}
+
+int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
+                             struct kvm_cpuid2 *cpuid,
+                             struct kvm_cpuid_entry2 __user *entries)
+{
+       int r;
+
+       r = -E2BIG;
+       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+               goto out;
+       r = -EFAULT;
+       if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
+                          cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
+               goto out;
+       vcpu->arch.cpuid_nent = cpuid->nent;
+       kvm_apic_set_version(vcpu);
+       kvm_x86_ops->cpuid_update(vcpu);
+       kvm_update_cpuid(vcpu);
+       return 0;
+
+out:
+       return r;
+}
+
+int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
+                             struct kvm_cpuid2 *cpuid,
+                             struct kvm_cpuid_entry2 __user *entries)
+{
+       int r;
+
+       r = -E2BIG;
+       if (cpuid->nent < vcpu->arch.cpuid_nent)
+               goto out;
+       r = -EFAULT;
+       if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
+                        vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
+               goto out;
+       return 0;
+
+out:
+       cpuid->nent = vcpu->arch.cpuid_nent;
+       return r;
+}
+
+static void cpuid_mask(u32 *word, int wordnum)
+{
+       *word &= boot_cpu_data.x86_capability[wordnum];
+}
+
+static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+                          u32 index)
+{
+       entry->function = function;
+       entry->index = index;
+       cpuid_count(entry->function, entry->index,
+                   &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
+       entry->flags = 0;
+}
+
+static bool supported_xcr0_bit(unsigned bit)
+{
+       u64 mask = ((u64)1 << bit);
+
+       return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0;
+}
+
+#define F(x) bit(X86_FEATURE_##x)
+
+static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+                        u32 index, int *nent, int maxnent)
+{
+       int r;
+       unsigned f_nx = is_efer_nx() ? F(NX) : 0;
+#ifdef CONFIG_X86_64
+       unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
+                               ? F(GBPAGES) : 0;
+       unsigned f_lm = F(LM);
+#else
+       unsigned f_gbpages = 0;
+       unsigned f_lm = 0;
+#endif
+       unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
+
+       /* cpuid 1.edx */
+       const u32 kvm_supported_word0_x86_features =
+               F(FPU) | F(VME) | F(DE) | F(PSE) |
+               F(TSC) | F(MSR) | F(PAE) | F(MCE) |
+               F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
+               F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
+               F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
+               0 /* Reserved, DS, ACPI */ | F(MMX) |
+               F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
+               0 /* HTT, TM, Reserved, PBE */;
+       /* cpuid 0x80000001.edx */
+       const u32 kvm_supported_word1_x86_features =
+               F(FPU) | F(VME) | F(DE) | F(PSE) |
+               F(TSC) | F(MSR) | F(PAE) | F(MCE) |
+               F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
+               F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
+               F(PAT) | F(PSE36) | 0 /* Reserved */ |
+               f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
+               F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
+               0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
+       /* cpuid 1.ecx */
+       const u32 kvm_supported_word4_x86_features =
+               F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
+               0 /* DS-CPL, VMX, SMX, EST */ |
+               0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
+               F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
+               0 /* Reserved, DCA */ | F(XMM4_1) |
+               F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
+               0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
+               F(F16C) | F(RDRAND);
+       /* cpuid 0x80000001.ecx */
+       const u32 kvm_supported_word6_x86_features =
+               F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
+               F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
+               F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
+               0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
+
+       /* cpuid 0xC0000001.edx */
+       const u32 kvm_supported_word5_x86_features =
+               F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
+               F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
+               F(PMM) | F(PMM_EN);
+
+       /* cpuid 7.0.ebx */
+       const u32 kvm_supported_word9_x86_features =
+               F(FSGSBASE) | F(BMI1) | F(AVX2) | F(SMEP) | F(BMI2) | F(ERMS);
+
+       /* all calls to cpuid_count() should be made on the same cpu */
+       get_cpu();
+
+       r = -E2BIG;
+
+       if (*nent >= maxnent)
+               goto out;
+
+       do_cpuid_1_ent(entry, function, index);
+       ++*nent;
+
+       switch (function) {
+       case 0:
+               entry->eax = min(entry->eax, (u32)0xd);
+               break;
+       case 1:
+               entry->edx &= kvm_supported_word0_x86_features;
+               cpuid_mask(&entry->edx, 0);
+               entry->ecx &= kvm_supported_word4_x86_features;
+               cpuid_mask(&entry->ecx, 4);
+               /* we support x2apic emulation even if host does not support
+                * it since we emulate x2apic in software */
+               entry->ecx |= F(X2APIC);
+               break;
+       /* function 2 entries are STATEFUL. That is, repeated cpuid commands
+        * may return different values. This forces us to get_cpu() before
+        * issuing the first command, and also to emulate this annoying behavior
+        * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
+       case 2: {
+               int t, times = entry->eax & 0xff;
+
+               entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
+               entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+               for (t = 1; t < times; ++t) {
+                       if (*nent >= maxnent)
+                               goto out;
+
+                       do_cpuid_1_ent(&entry[t], function, 0);
+                       entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
+                       ++*nent;
+               }
+               break;
+       }
+       /* function 4 has additional index. */
+       case 4: {
+               int i, cache_type;
+
+               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+               /* read more entries until cache_type is zero */
+               for (i = 1; ; ++i) {
+                       if (*nent >= maxnent)
+                               goto out;
+
+                       cache_type = entry[i - 1].eax & 0x1f;
+                       if (!cache_type)
+                               break;
+                       do_cpuid_1_ent(&entry[i], function, i);
+                       entry[i].flags |=
+                              KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+                       ++*nent;
+               }
+               break;
+       }
+       case 7: {
+               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+               /* Mask ebx against host capbability word 9 */
+               if (index == 0) {
+                       entry->ebx &= kvm_supported_word9_x86_features;
+                       cpuid_mask(&entry->ebx, 9);
+               } else
+                       entry->ebx = 0;
+               entry->eax = 0;
+               entry->ecx = 0;
+               entry->edx = 0;
+               break;
+       }
+       case 9:
+               break;
+       case 0xa: { /* Architectural Performance Monitoring */
+               struct x86_pmu_capability cap;
+               union cpuid10_eax eax;
+               union cpuid10_edx edx;
+
+               perf_get_x86_pmu_capability(&cap);
+
+               /*
+                * Only support guest architectural pmu on a host
+                * with architectural pmu.
+                */
+               if (!cap.version)
+                       memset(&cap, 0, sizeof(cap));
+
+               eax.split.version_id = min(cap.version, 2);
+               eax.split.num_counters = cap.num_counters_gp;
+               eax.split.bit_width = cap.bit_width_gp;
+               eax.split.mask_length = cap.events_mask_len;
+
+               edx.split.num_counters_fixed = cap.num_counters_fixed;
+               edx.split.bit_width_fixed = cap.bit_width_fixed;
+               edx.split.reserved = 0;
+
+               entry->eax = eax.full;
+               entry->ebx = cap.events_mask;
+               entry->ecx = 0;
+               entry->edx = edx.full;
+               break;
+       }
+       /* function 0xb has additional index. */
+       case 0xb: {
+               int i, level_type;
+
+               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+               /* read more entries until level_type is zero */
+               for (i = 1; ; ++i) {
+                       if (*nent >= maxnent)
+                               goto out;
+
+                       level_type = entry[i - 1].ecx & 0xff00;
+                       if (!level_type)
+                               break;
+                       do_cpuid_1_ent(&entry[i], function, i);
+                       entry[i].flags |=
+                              KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+                       ++*nent;
+               }
+               break;
+       }
+       case 0xd: {
+               int idx, i;
+
+               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+               for (idx = 1, i = 1; idx < 64; ++idx) {
+                       if (*nent >= maxnent)
+                               goto out;
+
+                       do_cpuid_1_ent(&entry[i], function, idx);
+                       if (entry[i].eax == 0 || !supported_xcr0_bit(idx))
+                               continue;
+                       entry[i].flags |=
+                              KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+                       ++*nent;
+                       ++i;
+               }
+               break;
+       }
+       case KVM_CPUID_SIGNATURE: {
+               char signature[12] = "KVMKVMKVM\0\0";
+               u32 *sigptr = (u32 *)signature;
+               entry->eax = 0;
+               entry->ebx = sigptr[0];
+               entry->ecx = sigptr[1];
+               entry->edx = sigptr[2];
+               break;
+       }
+       case KVM_CPUID_FEATURES:
+               entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
+                            (1 << KVM_FEATURE_NOP_IO_DELAY) |
+                            (1 << KVM_FEATURE_CLOCKSOURCE2) |
+                            (1 << KVM_FEATURE_ASYNC_PF) |
+                            (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
+
+               if (sched_info_on())
+                       entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
+
+               entry->ebx = 0;
+               entry->ecx = 0;
+               entry->edx = 0;
+               break;
+       case 0x80000000:
+               entry->eax = min(entry->eax, 0x8000001a);
+               break;
+       case 0x80000001:
+               entry->edx &= kvm_supported_word1_x86_features;
+               cpuid_mask(&entry->edx, 1);
+               entry->ecx &= kvm_supported_word6_x86_features;
+               cpuid_mask(&entry->ecx, 6);
+               break;
+       case 0x80000008: {
+               unsigned g_phys_as = (entry->eax >> 16) & 0xff;
+               unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
+               unsigned phys_as = entry->eax & 0xff;
+
+               if (!g_phys_as)
+                       g_phys_as = phys_as;
+               entry->eax = g_phys_as | (virt_as << 8);
+               entry->ebx = entry->edx = 0;
+               break;
+       }
+       case 0x80000019:
+               entry->ecx = entry->edx = 0;
+               break;
+       case 0x8000001a:
+               break;
+       case 0x8000001d:
+               break;
+       /*Add support for Centaur's CPUID instruction*/
+       case 0xC0000000:
+               /*Just support up to 0xC0000004 now*/
+               entry->eax = min(entry->eax, 0xC0000004);
+               break;
+       case 0xC0000001:
+               entry->edx &= kvm_supported_word5_x86_features;
+               cpuid_mask(&entry->edx, 5);
+               break;
+       case 3: /* Processor serial number */
+       case 5: /* MONITOR/MWAIT */
+       case 6: /* Thermal management */
+       case 0x80000007: /* Advanced power management */
+       case 0xC0000002:
+       case 0xC0000003:
+       case 0xC0000004:
+       default:
+               entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+               break;
+       }
+
+       kvm_x86_ops->set_supported_cpuid(function, entry);
+
+       r = 0;
+
+out:
+       put_cpu();
+
+       return r;
+}
+
+#undef F
+
+struct kvm_cpuid_param {
+       u32 func;
+       u32 idx;
+       bool has_leaf_count;
+       bool (*qualifier)(struct kvm_cpuid_param *param);
+};
+
+static bool is_centaur_cpu(struct kvm_cpuid_param *param)
+{
+       return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
+}
+
+int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
+                                     struct kvm_cpuid_entry2 __user *entries)
+{
+       struct kvm_cpuid_entry2 *cpuid_entries;
+       int limit, nent = 0, r = -E2BIG, i;
+       u32 func;
+       static struct kvm_cpuid_param param[] = {
+               { .func = 0, .has_leaf_count = true },
+               { .func = 0x80000000, .has_leaf_count = true },
+               { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true },
+               { .func = KVM_CPUID_SIGNATURE },
+               { .func = KVM_CPUID_FEATURES },
+       };
+
+       if (cpuid->nent < 1)
+               goto out;
+       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+               cpuid->nent = KVM_MAX_CPUID_ENTRIES;
+       r = -ENOMEM;
+       cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
+       if (!cpuid_entries)
+               goto out;
+
+       r = 0;
+       for (i = 0; i < ARRAY_SIZE(param); i++) {
+               struct kvm_cpuid_param *ent = &param[i];
+
+               if (ent->qualifier && !ent->qualifier(ent))
+                       continue;
+
+               r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
+                               &nent, cpuid->nent);
+
+               if (r)
+                       goto out_free;
+
+               if (!ent->has_leaf_count)
+                       continue;
+
+               limit = cpuid_entries[nent - 1].eax;
+               for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
+                       r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
+                                    &nent, cpuid->nent);
+
+               if (r)
+                       goto out_free;
+       }
+
+       r = -EFAULT;
+       if (copy_to_user(entries, cpuid_entries,
+                        nent * sizeof(struct kvm_cpuid_entry2)))
+               goto out_free;
+       cpuid->nent = nent;
+       r = 0;
+
+out_free:
+       vfree(cpuid_entries);
+out:
+       return r;
+}
+
+static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
+{
+       struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
+       int j, nent = vcpu->arch.cpuid_nent;
+
+       e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
+       /* when no next entry is found, the current entry[i] is reselected */
+       for (j = i + 1; ; j = (j + 1) % nent) {
+               struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
+               if (ej->function == e->function) {
+                       ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+                       return j;
+               }
+       }
+       return 0; /* silence gcc, even though control never reaches here */
+}
+
+/* find an entry with matching function, matching index (if needed), and that
+ * should be read next (if it's stateful) */
+static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
+       u32 function, u32 index)
+{
+       if (e->function != function)
+               return 0;
+       if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
+               return 0;
+       if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
+           !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
+               return 0;
+       return 1;
+}
+
+struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
+                                             u32 function, u32 index)
+{
+       int i;
+       struct kvm_cpuid_entry2 *best = NULL;
+
+       for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
+               struct kvm_cpuid_entry2 *e;
+
+               e = &vcpu->arch.cpuid_entries[i];
+               if (is_matching_cpuid_entry(e, function, index)) {
+                       if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
+                               move_to_next_stateful_cpuid_entry(vcpu, i);
+                       best = e;
+                       break;
+               }
+       }
+       return best;
+}
+EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
+
+int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+
+       best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
+       if (!best || best->eax < 0x80000008)
+               goto not_found;
+       best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+       if (best)
+               return best->eax & 0xff;
+not_found:
+       return 36;
+}
+
+/*
+ * If no match is found, check whether we exceed the vCPU's limit
+ * and return the content of the highest valid _standard_ leaf instead.
+ * This is to satisfy the CPUID specification.
+ */
+static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
+                                                  u32 function, u32 index)
+{
+       struct kvm_cpuid_entry2 *maxlevel;
+
+       maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
+       if (!maxlevel || maxlevel->eax >= function)
+               return NULL;
+       if (function & 0x80000000) {
+               maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
+               if (!maxlevel)
+                       return NULL;
+       }
+       return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
+}
+
+void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
+{
+       u32 function, index;
+       struct kvm_cpuid_entry2 *best;
+
+       function = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       index = kvm_register_read(vcpu, VCPU_REGS_RCX);
+       kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
+       kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
+       kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
+       kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
+       best = kvm_find_cpuid_entry(vcpu, function, index);
+
+       if (!best)
+               best = check_cpuid_limit(vcpu, function, index);
+
+       if (best) {
+               kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
+               kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
+               kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
+               kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
+       }
+       kvm_x86_ops->skip_emulated_instruction(vcpu);
+       trace_kvm_cpuid(function,
+                       kvm_register_read(vcpu, VCPU_REGS_RAX),
+                       kvm_register_read(vcpu, VCPU_REGS_RBX),
+                       kvm_register_read(vcpu, VCPU_REGS_RCX),
+                       kvm_register_read(vcpu, VCPU_REGS_RDX));
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
new file mode 100644 (file)
index 0000000..5b97e17
--- /dev/null
@@ -0,0 +1,46 @@
+#ifndef ARCH_X86_KVM_CPUID_H
+#define ARCH_X86_KVM_CPUID_H
+
+#include "x86.h"
+
+void kvm_update_cpuid(struct kvm_vcpu *vcpu);
+struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
+                                             u32 function, u32 index);
+int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
+                                     struct kvm_cpuid_entry2 __user *entries);
+int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
+                            struct kvm_cpuid *cpuid,
+                            struct kvm_cpuid_entry __user *entries);
+int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
+                             struct kvm_cpuid2 *cpuid,
+                             struct kvm_cpuid_entry2 __user *entries);
+int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
+                             struct kvm_cpuid2 *cpuid,
+                             struct kvm_cpuid_entry2 __user *entries);
+
+
+static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+
+       best = kvm_find_cpuid_entry(vcpu, 1, 0);
+       return best && (best->ecx & bit(X86_FEATURE_XSAVE));
+}
+
+static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+
+       best = kvm_find_cpuid_entry(vcpu, 7, 0);
+       return best && (best->ebx & bit(X86_FEATURE_SMEP));
+}
+
+static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+
+       best = kvm_find_cpuid_entry(vcpu, 7, 0);
+       return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
+}
+
+#endif
index f1e3be18a08ff9507ee98f1020896167efe36932..05a562b850252b3c480ce67dd4e1e4c6c47f6055 100644 (file)
 #define Lock        (1<<26) /* lock prefix is allowed for the instruction */
 #define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
 #define No64       (1<<28)
+#define PageTable   (1 << 29)   /* instruction used to write page table */
 /* Source 2 operand type */
-#define Src2Shift   (29)
+#define Src2Shift   (30)
 #define Src2None    (OpNone << Src2Shift)
 #define Src2CL      (OpCL << Src2Shift)
 #define Src2ImmByte (OpImmByte << Src2Shift)
@@ -1674,11 +1675,6 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
        return X86EMUL_CONTINUE;
 }
 
-static int em_grp1a(struct x86_emulate_ctxt *ctxt)
-{
-       return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes);
-}
-
 static int em_grp2(struct x86_emulate_ctxt *ctxt)
 {
        switch (ctxt->modrm_reg) {
@@ -1788,7 +1784,7 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
        return rc;
 }
 
-static int em_grp9(struct x86_emulate_ctxt *ctxt)
+static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
 {
        u64 old = ctxt->dst.orig_val64;
 
@@ -1831,6 +1827,24 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
        return rc;
 }
 
+static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
+{
+       /* Save real source value, then compare EAX against destination. */
+       ctxt->src.orig_val = ctxt->src.val;
+       ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
+       emulate_2op_SrcV(ctxt, "cmp");
+
+       if (ctxt->eflags & EFLG_ZF) {
+               /* Success: write back to memory. */
+               ctxt->dst.val = ctxt->src.orig_val;
+       } else {
+               /* Failure: write the value we saw to EAX. */
+               ctxt->dst.type = OP_REG;
+               ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
+       }
+       return X86EMUL_CONTINUE;
+}
+
 static int em_lseg(struct x86_emulate_ctxt *ctxt)
 {
        int seg = ctxt->src2.val;
@@ -2481,6 +2495,15 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
        return X86EMUL_CONTINUE;
 }
 
+static int em_call(struct x86_emulate_ctxt *ctxt)
+{
+       long rel = ctxt->src.val;
+
+       ctxt->src.val = (unsigned long)ctxt->_eip;
+       jmp_rel(ctxt, rel);
+       return em_push(ctxt);
+}
+
 static int em_call_far(struct x86_emulate_ctxt *ctxt)
 {
        u16 sel, old_cs;
@@ -2622,12 +2645,75 @@ static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
        return X86EMUL_CONTINUE;
 }
 
+static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
+{
+       u64 pmc;
+
+       if (ctxt->ops->read_pmc(ctxt, ctxt->regs[VCPU_REGS_RCX], &pmc))
+               return emulate_gp(ctxt, 0);
+       ctxt->regs[VCPU_REGS_RAX] = (u32)pmc;
+       ctxt->regs[VCPU_REGS_RDX] = pmc >> 32;
+       return X86EMUL_CONTINUE;
+}
+
 static int em_mov(struct x86_emulate_ctxt *ctxt)
 {
        ctxt->dst.val = ctxt->src.val;
        return X86EMUL_CONTINUE;
 }
 
+static int em_cr_write(struct x86_emulate_ctxt *ctxt)
+{
+       if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
+               return emulate_gp(ctxt, 0);
+
+       /* Disable writeback. */
+       ctxt->dst.type = OP_NONE;
+       return X86EMUL_CONTINUE;
+}
+
+static int em_dr_write(struct x86_emulate_ctxt *ctxt)
+{
+       unsigned long val;
+
+       if (ctxt->mode == X86EMUL_MODE_PROT64)
+               val = ctxt->src.val & ~0ULL;
+       else
+               val = ctxt->src.val & ~0U;
+
+       /* #UD condition is already handled. */
+       if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
+               return emulate_gp(ctxt, 0);
+
+       /* Disable writeback. */
+       ctxt->dst.type = OP_NONE;
+       return X86EMUL_CONTINUE;
+}
+
+static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
+{
+       u64 msr_data;
+
+       msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
+               | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
+       if (ctxt->ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data))
+               return emulate_gp(ctxt, 0);
+
+       return X86EMUL_CONTINUE;
+}
+
+static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
+{
+       u64 msr_data;
+
+       if (ctxt->ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data))
+               return emulate_gp(ctxt, 0);
+
+       ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
+       ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
+       return X86EMUL_CONTINUE;
+}
+
 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
 {
        if (ctxt->modrm_reg > VCPU_SREG_GS)
@@ -2775,6 +2861,24 @@ static int em_jcxz(struct x86_emulate_ctxt *ctxt)
        return X86EMUL_CONTINUE;
 }
 
+static int em_in(struct x86_emulate_ctxt *ctxt)
+{
+       if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
+                            &ctxt->dst.val))
+               return X86EMUL_IO_NEEDED;
+
+       return X86EMUL_CONTINUE;
+}
+
+static int em_out(struct x86_emulate_ctxt *ctxt)
+{
+       ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
+                                   &ctxt->src.val, 1);
+       /* Disable writeback. */
+       ctxt->dst.type = OP_NONE;
+       return X86EMUL_CONTINUE;
+}
+
 static int em_cli(struct x86_emulate_ctxt *ctxt)
 {
        if (emulator_bad_iopl(ctxt))
@@ -2794,6 +2898,69 @@ static int em_sti(struct x86_emulate_ctxt *ctxt)
        return X86EMUL_CONTINUE;
 }
 
+static int em_bt(struct x86_emulate_ctxt *ctxt)
+{
+       /* Disable writeback. */
+       ctxt->dst.type = OP_NONE;
+       /* only subword offset */
+       ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
+
+       emulate_2op_SrcV_nobyte(ctxt, "bt");
+       return X86EMUL_CONTINUE;
+}
+
+static int em_bts(struct x86_emulate_ctxt *ctxt)
+{
+       emulate_2op_SrcV_nobyte(ctxt, "bts");
+       return X86EMUL_CONTINUE;
+}
+
+static int em_btr(struct x86_emulate_ctxt *ctxt)
+{
+       emulate_2op_SrcV_nobyte(ctxt, "btr");
+       return X86EMUL_CONTINUE;
+}
+
+static int em_btc(struct x86_emulate_ctxt *ctxt)
+{
+       emulate_2op_SrcV_nobyte(ctxt, "btc");
+       return X86EMUL_CONTINUE;
+}
+
+static int em_bsf(struct x86_emulate_ctxt *ctxt)
+{
+       u8 zf;
+
+       __asm__ ("bsf %2, %0; setz %1"
+                : "=r"(ctxt->dst.val), "=q"(zf)
+                : "r"(ctxt->src.val));
+
+       ctxt->eflags &= ~X86_EFLAGS_ZF;
+       if (zf) {
+               ctxt->eflags |= X86_EFLAGS_ZF;
+               /* Disable writeback. */
+               ctxt->dst.type = OP_NONE;
+       }
+       return X86EMUL_CONTINUE;
+}
+
+static int em_bsr(struct x86_emulate_ctxt *ctxt)
+{
+       u8 zf;
+
+       __asm__ ("bsr %2, %0; setz %1"
+                : "=r"(ctxt->dst.val), "=q"(zf)
+                : "r"(ctxt->src.val));
+
+       ctxt->eflags &= ~X86_EFLAGS_ZF;
+       if (zf) {
+               ctxt->eflags |= X86_EFLAGS_ZF;
+               /* Disable writeback. */
+               ctxt->dst.type = OP_NONE;
+       }
+       return X86EMUL_CONTINUE;
+}
+
 static bool valid_cr(int nr)
 {
        switch (nr) {
@@ -2867,9 +3034,6 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
                break;
                }
        case 4: {
-               u64 cr4;
-
-               cr4 = ctxt->ops->get_cr(ctxt, 4);
                ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
 
                if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
@@ -3003,6 +3167,8 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
 #define D2bv(_f)      D((_f) | ByteOp), D(_f)
 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
 #define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
+#define I2bvIP(_f, _e, _i, _p) \
+       IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
 
 #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e),                \
                I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),     \
@@ -3033,17 +3199,17 @@ static struct opcode group7_rm7[] = {
 
 static struct opcode group1[] = {
        I(Lock, em_add),
-       I(Lock, em_or),
+       I(Lock | PageTable, em_or),
        I(Lock, em_adc),
        I(Lock, em_sbb),
-       I(Lock, em_and),
+       I(Lock | PageTable, em_and),
        I(Lock, em_sub),
        I(Lock, em_xor),
        I(0, em_cmp),
 };
 
 static struct opcode group1A[] = {
-       D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
+       I(DstMem | SrcNone | ModRM | Mov | Stack, em_pop), N, N, N, N, N, N, N,
 };
 
 static struct opcode group3[] = {
@@ -3058,16 +3224,19 @@ static struct opcode group3[] = {
 };
 
 static struct opcode group4[] = {
-       D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
+       I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45),
+       I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45),
        N, N, N, N, N, N,
 };
 
 static struct opcode group5[] = {
-       D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
-       D(SrcMem | ModRM | Stack),
+       I(DstMem | SrcNone | ModRM | Lock, em_grp45),
+       I(DstMem | SrcNone | ModRM | Lock, em_grp45),
+       I(SrcMem | ModRM | Stack, em_grp45),
        I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
-       D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
-       D(SrcMem | ModRM | Stack), N,
+       I(SrcMem | ModRM | Stack, em_grp45),
+       I(SrcMemFAddr | ModRM | ImplicitOps, em_grp45),
+       I(SrcMem | ModRM | Stack, em_grp45), N,
 };
 
 static struct opcode group6[] = {
@@ -3096,18 +3265,21 @@ static struct group_dual group7 = { {
 
 static struct opcode group8[] = {
        N, N, N, N,
-       D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
-       D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
+       I(DstMem | SrcImmByte | ModRM, em_bt),
+       I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_bts),
+       I(DstMem | SrcImmByte | ModRM | Lock, em_btr),
+       I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_btc),
 };
 
 static struct group_dual group9 = { {
-       N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
+       N, I(DstMem64 | ModRM | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
 }, {
        N, N, N, N, N, N, N, N,
 } };
 
 static struct opcode group11[] = {
-       I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
+       I(DstMem | SrcImm | ModRM | Mov | PageTable, em_mov),
+       X7(D(Undefined)),
 };
 
 static struct gprefix pfx_0f_6f_0f_7f = {
@@ -3120,7 +3292,7 @@ static struct opcode opcode_table[256] = {
        I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
        I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
        /* 0x08 - 0x0F */
-       I6ALU(Lock, em_or),
+       I6ALU(Lock | PageTable, em_or),
        I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
        N,
        /* 0x10 - 0x17 */
@@ -3132,7 +3304,7 @@ static struct opcode opcode_table[256] = {
        I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
        I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
        /* 0x20 - 0x27 */
-       I6ALU(Lock, em_and), N, N,
+       I6ALU(Lock | PageTable, em_and), N, N,
        /* 0x28 - 0x2F */
        I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
        /* 0x30 - 0x37 */
@@ -3155,8 +3327,8 @@ static struct opcode opcode_table[256] = {
        I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
        I(SrcImmByte | Mov | Stack, em_push),
        I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
-       D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
-       D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
+       I2bvIP(DstDI | SrcDX | Mov | String, em_in, ins, check_perm_in), /* insb, insw/insd */
+       I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
        /* 0x70 - 0x7F */
        X16(D(SrcImmByte)),
        /* 0x80 - 0x87 */
@@ -3165,11 +3337,11 @@ static struct opcode opcode_table[256] = {
        G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
        G(DstMem | SrcImmByte | ModRM | Group, group1),
        I2bv(DstMem | SrcReg | ModRM, em_test),
-       I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
+       I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
        /* 0x88 - 0x8F */
-       I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
+       I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
        I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
-       I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
+       I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
        D(ModRM | SrcMem | NoAccess | DstReg),
        I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
        G(0, group1A),
@@ -3182,7 +3354,7 @@ static struct opcode opcode_table[256] = {
        II(ImplicitOps | Stack, em_popf, popf), N, N,
        /* 0xA0 - 0xA7 */
        I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
-       I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
+       I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
        I2bv(SrcSI | DstDI | Mov | String, em_mov),
        I2bv(SrcSI | DstDI | String, em_cmp),
        /* 0xA8 - 0xAF */
@@ -3213,13 +3385,13 @@ static struct opcode opcode_table[256] = {
        /* 0xE0 - 0xE7 */
        X3(I(SrcImmByte, em_loop)),
        I(SrcImmByte, em_jcxz),
-       D2bvIP(SrcImmUByte | DstAcc, in,  check_perm_in),
-       D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
+       I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
+       I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
        /* 0xE8 - 0xEF */
-       D(SrcImm | Stack), D(SrcImm | ImplicitOps),
+       I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
        I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
-       D2bvIP(SrcDX | DstAcc, in,  check_perm_in),
-       D2bvIP(SrcAcc | DstDX, out, check_perm_out),
+       I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
+       I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
        /* 0xF0 - 0xF7 */
        N, DI(ImplicitOps, icebp), N, N,
        DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
@@ -3242,15 +3414,15 @@ static struct opcode twobyte_table[256] = {
        /* 0x20 - 0x2F */
        DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
        DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
-       DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
-       DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
+       IIP(ModRM | SrcMem | Priv | Op3264, em_cr_write, cr_write, check_cr_write),
+       IIP(ModRM | SrcMem | Priv | Op3264, em_dr_write, dr_write, check_dr_write),
        N, N, N, N,
        N, N, N, N, N, N, N, N,
        /* 0x30 - 0x3F */
-       DI(ImplicitOps | Priv, wrmsr),
+       II(ImplicitOps | Priv, em_wrmsr, wrmsr),
        IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
-       DI(ImplicitOps | Priv, rdmsr),
-       DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
+       II(ImplicitOps | Priv, em_rdmsr, rdmsr),
+       IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
        I(ImplicitOps | VendorSpecific, em_sysenter),
        I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
        N, N,
@@ -3275,26 +3447,28 @@ static struct opcode twobyte_table[256] = {
        X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
        /* 0xA0 - 0xA7 */
        I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
-       DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
+       DI(ImplicitOps, cpuid), I(DstMem | SrcReg | ModRM | BitOp, em_bt),
        D(DstMem | SrcReg | Src2ImmByte | ModRM),
        D(DstMem | SrcReg | Src2CL | ModRM), N, N,
        /* 0xA8 - 0xAF */
        I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
-       DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
+       DI(ImplicitOps, rsm),
+       I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
        D(DstMem | SrcReg | Src2ImmByte | ModRM),
        D(DstMem | SrcReg | Src2CL | ModRM),
        D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
        /* 0xB0 - 0xB7 */
-       D2bv(DstMem | SrcReg | ModRM | Lock),
+       I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
        I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
-       D(DstMem | SrcReg | ModRM | BitOp | Lock),
+       I(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
        I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
        I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
        D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
        /* 0xB8 - 0xBF */
        N, N,
-       G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
-       D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
+       G(BitOp, group8),
+       I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
+       I(DstReg | SrcMem | ModRM, em_bsf), I(DstReg | SrcMem | ModRM, em_bsr),
        D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
        /* 0xC0 - 0xCF */
        D2bv(DstMem | SrcReg | ModRM | Lock),
@@ -3320,6 +3494,7 @@ static struct opcode twobyte_table[256] = {
 #undef D2bv
 #undef D2bvIP
 #undef I2bv
+#undef I2bvIP
 #undef I6ALU
 
 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
@@ -3697,6 +3872,11 @@ done:
        return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
 }
 
+bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
+{
+       return ctxt->d & PageTable;
+}
+
 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
 {
        /* The second termination condition only applies for REPE
@@ -3720,7 +3900,6 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
 {
        struct x86_emulate_ops *ops = ctxt->ops;
-       u64 msr_data;
        int rc = X86EMUL_CONTINUE;
        int saved_dst_type = ctxt->dst.type;
 
@@ -3854,15 +4033,6 @@ special_insn:
                        goto cannot_emulate;
                ctxt->dst.val = (s32) ctxt->src.val;
                break;
-       case 0x6c:              /* insb */
-       case 0x6d:              /* insw/insd */
-               ctxt->src.val = ctxt->regs[VCPU_REGS_RDX];
-               goto do_io_in;
-       case 0x6e:              /* outsb */
-       case 0x6f:              /* outsw/outsd */
-               ctxt->dst.val = ctxt->regs[VCPU_REGS_RDX];
-               goto do_io_out;
-               break;
        case 0x70 ... 0x7f: /* jcc (short) */
                if (test_cc(ctxt->b, ctxt->eflags))
                        jmp_rel(ctxt, ctxt->src.val);
@@ -3870,9 +4040,6 @@ special_insn:
        case 0x8d: /* lea r16/r32, m */
                ctxt->dst.val = ctxt->src.addr.mem.ea;
                break;
-       case 0x8f:              /* pop (sole member of Grp1a) */
-               rc = em_grp1a(ctxt);
-               break;
        case 0x90 ... 0x97: /* nop / xchg reg, rax */
                if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
                        break;
@@ -3905,38 +4072,11 @@ special_insn:
                ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
                rc = em_grp2(ctxt);
                break;
-       case 0xe4:      /* inb */
-       case 0xe5:      /* in */
-               goto do_io_in;
-       case 0xe6: /* outb */
-       case 0xe7: /* out */
-               goto do_io_out;
-       case 0xe8: /* call (near) */ {
-               long int rel = ctxt->src.val;
-               ctxt->src.val = (unsigned long) ctxt->_eip;
-               jmp_rel(ctxt, rel);
-               rc = em_push(ctxt);
-               break;
-       }
        case 0xe9: /* jmp rel */
        case 0xeb: /* jmp rel short */
                jmp_rel(ctxt, ctxt->src.val);
                ctxt->dst.type = OP_NONE; /* Disable writeback. */
                break;
-       case 0xec: /* in al,dx */
-       case 0xed: /* in (e/r)ax,dx */
-       do_io_in:
-               if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
-                                    &ctxt->dst.val))
-                       goto done; /* IO is needed */
-               break;
-       case 0xee: /* out dx,al */
-       case 0xef: /* out dx,(e/r)ax */
-       do_io_out:
-               ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
-                                     &ctxt->src.val, 1);
-               ctxt->dst.type = OP_NONE;       /* Disable writeback. */
-               break;
        case 0xf4:              /* hlt */
                ctxt->ops->halt(ctxt);
                break;
@@ -3956,12 +4096,6 @@ special_insn:
        case 0xfd: /* std */
                ctxt->eflags |= EFLG_DF;
                break;
-       case 0xfe: /* Grp4 */
-               rc = em_grp45(ctxt);
-               break;
-       case 0xff: /* Grp5 */
-               rc = em_grp45(ctxt);
-               break;
        default:
                goto cannot_emulate;
        }
@@ -4036,49 +4170,6 @@ twobyte_insn:
        case 0x21: /* mov from dr to reg */
                ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
                break;
-       case 0x22: /* mov reg, cr */
-               if (ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) {
-                       emulate_gp(ctxt, 0);
-                       rc = X86EMUL_PROPAGATE_FAULT;
-                       goto done;
-               }
-               ctxt->dst.type = OP_NONE;
-               break;
-       case 0x23: /* mov from reg to dr */
-               if (ops->set_dr(ctxt, ctxt->modrm_reg, ctxt->src.val &
-                               ((ctxt->mode == X86EMUL_MODE_PROT64) ?
-                                ~0ULL : ~0U)) < 0) {
-                       /* #UD condition is already handled by the code above */
-                       emulate_gp(ctxt, 0);
-                       rc = X86EMUL_PROPAGATE_FAULT;
-                       goto done;
-               }
-
-               ctxt->dst.type = OP_NONE;       /* no writeback */
-               break;
-       case 0x30:
-               /* wrmsr */
-               msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
-                       | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
-               if (ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) {
-                       emulate_gp(ctxt, 0);
-                       rc = X86EMUL_PROPAGATE_FAULT;
-                       goto done;
-               }
-               rc = X86EMUL_CONTINUE;
-               break;
-       case 0x32:
-               /* rdmsr */
-               if (ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) {
-                       emulate_gp(ctxt, 0);
-                       rc = X86EMUL_PROPAGATE_FAULT;
-                       goto done;
-               } else {
-                       ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
-                       ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
-               }
-               rc = X86EMUL_CONTINUE;
-               break;
        case 0x40 ... 0x4f:     /* cmov */
                ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
                if (!test_cc(ctxt->b, ctxt->eflags))
@@ -4091,93 +4182,21 @@ twobyte_insn:
        case 0x90 ... 0x9f:     /* setcc r/m8 */
                ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
                break;
-       case 0xa3:
-             bt:               /* bt */
-               ctxt->dst.type = OP_NONE;
-               /* only subword offset */
-               ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
-               emulate_2op_SrcV_nobyte(ctxt, "bt");
-               break;
        case 0xa4: /* shld imm8, r, r/m */
        case 0xa5: /* shld cl, r, r/m */
                emulate_2op_cl(ctxt, "shld");
                break;
-       case 0xab:
-             bts:              /* bts */
-               emulate_2op_SrcV_nobyte(ctxt, "bts");
-               break;
        case 0xac: /* shrd imm8, r, r/m */
        case 0xad: /* shrd cl, r, r/m */
                emulate_2op_cl(ctxt, "shrd");
                break;
        case 0xae:              /* clflush */
                break;
-       case 0xb0 ... 0xb1:     /* cmpxchg */
-               /*
-                * Save real source value, then compare EAX against
-                * destination.
-                */
-               ctxt->src.orig_val = ctxt->src.val;
-               ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
-               emulate_2op_SrcV(ctxt, "cmp");
-               if (ctxt->eflags & EFLG_ZF) {
-                       /* Success: write back to memory. */
-                       ctxt->dst.val = ctxt->src.orig_val;
-               } else {
-                       /* Failure: write the value we saw to EAX. */
-                       ctxt->dst.type = OP_REG;
-                       ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
-               }
-               break;
-       case 0xb3:
-             btr:              /* btr */
-               emulate_2op_SrcV_nobyte(ctxt, "btr");
-               break;
        case 0xb6 ... 0xb7:     /* movzx */
                ctxt->dst.bytes = ctxt->op_bytes;
                ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
                                                       : (u16) ctxt->src.val;
                break;
-       case 0xba:              /* Grp8 */
-               switch (ctxt->modrm_reg & 3) {
-               case 0:
-                       goto bt;
-               case 1:
-                       goto bts;
-               case 2:
-                       goto btr;
-               case 3:
-                       goto btc;
-               }
-               break;
-       case 0xbb:
-             btc:              /* btc */
-               emulate_2op_SrcV_nobyte(ctxt, "btc");
-               break;
-       case 0xbc: {            /* bsf */
-               u8 zf;
-               __asm__ ("bsf %2, %0; setz %1"
-                        : "=r"(ctxt->dst.val), "=q"(zf)
-                        : "r"(ctxt->src.val));
-               ctxt->eflags &= ~X86_EFLAGS_ZF;
-               if (zf) {
-                       ctxt->eflags |= X86_EFLAGS_ZF;
-                       ctxt->dst.type = OP_NONE;       /* Disable writeback. */
-               }
-               break;
-       }
-       case 0xbd: {            /* bsr */
-               u8 zf;
-               __asm__ ("bsr %2, %0; setz %1"
-                        : "=r"(ctxt->dst.val), "=q"(zf)
-                        : "r"(ctxt->src.val));
-               ctxt->eflags &= ~X86_EFLAGS_ZF;
-               if (zf) {
-                       ctxt->eflags |= X86_EFLAGS_ZF;
-                       ctxt->dst.type = OP_NONE;       /* Disable writeback. */
-               }
-               break;
-       }
        case 0xbe ... 0xbf:     /* movsx */
                ctxt->dst.bytes = ctxt->op_bytes;
                ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
@@ -4194,9 +4213,6 @@ twobyte_insn:
                ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
                                                        (u64) ctxt->src.val;
                break;
-       case 0xc7:              /* Grp9 (cmpxchg8b) */
-               rc = em_grp9(ctxt);
-               break;
        default:
                goto cannot_emulate;
        }
index 405f2620392f5e32d393166520e707b2d039b3bb..d68f99df690c72ba81b53f8436c77c2d420d56a5 100644 (file)
@@ -344,7 +344,7 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
        struct kvm_timer *pt = &ps->pit_timer;
        s64 interval;
 
-       if (!irqchip_in_kernel(kvm))
+       if (!irqchip_in_kernel(kvm) || ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
                return;
 
        interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
@@ -397,15 +397,11 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
        case 1:
         /* FIXME: enhance mode 4 precision */
        case 4:
-               if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) {
-                       create_pit_timer(kvm, val, 0);
-               }
+               create_pit_timer(kvm, val, 0);
                break;
        case 2:
        case 3:
-               if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){
-                       create_pit_timer(kvm, val, 1);
-               }
+               create_pit_timer(kvm, val, 1);
                break;
        default:
                destroy_pit_timer(kvm->arch.vpit);
index cac4746d7ffb643b7fcd54451fb2d994f2f53c9b..b6a73537e1efd2270a00d52612342ed168081b55 100644 (file)
@@ -262,9 +262,10 @@ int kvm_pic_read_irq(struct kvm *kvm)
 
 void kvm_pic_reset(struct kvm_kpic_state *s)
 {
-       int irq;
-       struct kvm_vcpu *vcpu0 = s->pics_state->kvm->bsp_vcpu;
+       int irq, i;
+       struct kvm_vcpu *vcpu;
        u8 irr = s->irr, isr = s->imr;
+       bool found = false;
 
        s->last_irr = 0;
        s->irr = 0;
@@ -281,12 +282,19 @@ void kvm_pic_reset(struct kvm_kpic_state *s)
        s->special_fully_nested_mode = 0;
        s->init4 = 0;
 
-       for (irq = 0; irq < PIC_NUM_PINS/2; irq++) {
-               if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0))
-                       if (irr & (1 << irq) || isr & (1 << irq)) {
-                               pic_clear_isr(s, irq);
-                       }
-       }
+       kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
+               if (kvm_apic_accept_pic_intr(vcpu)) {
+                       found = true;
+                       break;
+               }
+
+
+       if (!found)
+               return;
+
+       for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
+               if (irr & (1 << irq) || isr & (1 << irq))
+                       pic_clear_isr(s, irq);
 }
 
 static void pic_ioport_write(void *opaque, u32 addr, u32 val)
index 54abb40199d67d45ed9b1dc752ebc8cd83a6d509..cfdc6e0ef0025f4687c1a7b9d3c6dff42f0c8875 100644 (file)
@@ -38,6 +38,7 @@
 #include "irq.h"
 #include "trace.h"
 #include "x86.h"
+#include "cpuid.h"
 
 #ifndef CONFIG_X86_64
 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
@@ -1120,7 +1121,7 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
+int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
 {
        u32 reg = apic_get_reg(apic, lvt_type);
        int vector, mode, trig_mode;
index 138e8cc6fea600ef42a515cf08d6d2c4a6f7a60b..6f4ce2575d095edce570b7d15c0cd9724b6255ec 100644 (file)
@@ -34,6 +34,7 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu);
 int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
 int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
+int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type);
 
 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
index f1b36cf3e3d0aff4ee1d587deb044a9618d51869..2a2a9b40db199dd49c99af48d9e2c25bc4acd440 100644 (file)
@@ -59,15 +59,6 @@ enum {
        AUDIT_POST_SYNC
 };
 
-char *audit_point_name[] = {
-       "pre page fault",
-       "post page fault",
-       "pre pte write",
-       "post pte write",
-       "pre sync",
-       "post sync"
-};
-
 #undef MMU_DEBUG
 
 #ifdef MMU_DEBUG
@@ -87,9 +78,6 @@ static int dbg = 0;
 module_param(dbg, bool, 0644);
 #endif
 
-static int oos_shadow = 1;
-module_param(oos_shadow, bool, 0644);
-
 #ifndef MMU_DEBUG
 #define ASSERT(x) do { } while (0)
 #else
@@ -593,6 +581,11 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
        return 0;
 }
 
+static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
+{
+       return cache->nobjs;
+}
+
 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
                                  struct kmem_cache *cache)
 {
@@ -953,21 +946,35 @@ static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn)
        }
 }
 
+static unsigned long *__gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level,
+                                   struct kvm_memory_slot *slot)
+{
+       struct kvm_lpage_info *linfo;
+
+       if (likely(level == PT_PAGE_TABLE_LEVEL))
+               return &slot->rmap[gfn - slot->base_gfn];
+
+       linfo = lpage_info_slot(gfn, slot, level);
+       return &linfo->rmap_pde;
+}
+
 /*
  * Take gfn and return the reverse mapping to it.
  */
 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
 {
        struct kvm_memory_slot *slot;
-       struct kvm_lpage_info *linfo;
 
        slot = gfn_to_memslot(kvm, gfn);
-       if (likely(level == PT_PAGE_TABLE_LEVEL))
-               return &slot->rmap[gfn - slot->base_gfn];
+       return __gfn_to_rmap(kvm, gfn, level, slot);
+}
 
-       linfo = lpage_info_slot(gfn, slot, level);
+static bool rmap_can_add(struct kvm_vcpu *vcpu)
+{
+       struct kvm_mmu_memory_cache *cache;
 
-       return &linfo->rmap_pde;
+       cache = &vcpu->arch.mmu_pte_list_desc_cache;
+       return mmu_memory_cache_free_objects(cache);
 }
 
 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
@@ -1004,17 +1011,16 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
                rmap_remove(kvm, sptep);
 }
 
-static int rmap_write_protect(struct kvm *kvm, u64 gfn)
+int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
+                              struct kvm_memory_slot *slot)
 {
        unsigned long *rmapp;
        u64 *spte;
        int i, write_protected = 0;
 
-       rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
-
+       rmapp = __gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL, slot);
        spte = rmap_next(kvm, rmapp, NULL);
        while (spte) {
-               BUG_ON(!spte);
                BUG_ON(!(*spte & PT_PRESENT_MASK));
                rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
                if (is_writable_pte(*spte)) {
@@ -1027,12 +1033,11 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
        /* check for huge page mappings */
        for (i = PT_DIRECTORY_LEVEL;
             i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
-               rmapp = gfn_to_rmap(kvm, gfn, i);
+               rmapp = __gfn_to_rmap(kvm, gfn, i, slot);
                spte = rmap_next(kvm, rmapp, NULL);
                while (spte) {
-                       BUG_ON(!spte);
                        BUG_ON(!(*spte & PT_PRESENT_MASK));
-                       BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
+                       BUG_ON(!is_large_pte(*spte));
                        pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
                        if (is_writable_pte(*spte)) {
                                drop_spte(kvm, spte);
@@ -1047,6 +1052,14 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
        return write_protected;
 }
 
+static int rmap_write_protect(struct kvm *kvm, u64 gfn)
+{
+       struct kvm_memory_slot *slot;
+
+       slot = gfn_to_memslot(kvm, gfn);
+       return kvm_mmu_rmap_write_protect(kvm, gfn, slot);
+}
+
 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
                           unsigned long data)
 {
@@ -1103,15 +1116,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
                          int (*handler)(struct kvm *kvm, unsigned long *rmapp,
                                         unsigned long data))
 {
-       int i, j;
+       int j;
        int ret;
        int retval = 0;
        struct kvm_memslots *slots;
+       struct kvm_memory_slot *memslot;
 
        slots = kvm_memslots(kvm);
 
-       for (i = 0; i < slots->nmemslots; i++) {
-               struct kvm_memory_slot *memslot = &slots->memslots[i];
+       kvm_for_each_memslot(memslot, slots) {
                unsigned long start = memslot->userspace_addr;
                unsigned long end;
 
@@ -1324,7 +1337,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
                                                  PAGE_SIZE);
        set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
        list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
-       bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
+       bitmap_zero(sp->slot_bitmap, KVM_MEM_SLOTS_NUM);
        sp->parent_ptes = 0;
        mmu_page_add_parent_pte(vcpu, sp, parent_pte);
        kvm_mod_used_mmu_pages(vcpu->kvm, +1);
@@ -1511,6 +1524,13 @@ static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
        return ret;
 }
 
+#ifdef CONFIG_KVM_MMU_AUDIT
+#include "mmu_audit.c"
+#else
+static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
+static void mmu_audit_disable(void) { }
+#endif
+
 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                         struct list_head *invalid_list)
 {
@@ -1640,6 +1660,18 @@ static void init_shadow_page_table(struct kvm_mmu_page *sp)
                sp->spt[i] = 0ull;
 }
 
+static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
+{
+       sp->write_flooding_count = 0;
+}
+
+static void clear_sp_write_flooding_count(u64 *spte)
+{
+       struct kvm_mmu_page *sp =  page_header(__pa(spte));
+
+       __clear_sp_write_flooding_count(sp);
+}
+
 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                                             gfn_t gfn,
                                             gva_t gaddr,
@@ -1683,6 +1715,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                } else if (sp->unsync)
                        kvm_mmu_mark_parents_unsync(sp);
 
+               __clear_sp_write_flooding_count(sp);
                trace_kvm_mmu_get_page(sp, false);
                return sp;
        }
@@ -1796,7 +1829,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
        }
 }
 
-static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
+static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
                             u64 *spte)
 {
        u64 pte;
@@ -1804,17 +1837,21 @@ static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
 
        pte = *spte;
        if (is_shadow_present_pte(pte)) {
-               if (is_last_spte(pte, sp->role.level))
+               if (is_last_spte(pte, sp->role.level)) {
                        drop_spte(kvm, spte);
-               else {
+                       if (is_large_pte(pte))
+                               --kvm->stat.lpages;
+               } else {
                        child = page_header(pte & PT64_BASE_ADDR_MASK);
                        drop_parent_pte(child, spte);
                }
-       } else if (is_mmio_spte(pte))
+               return true;
+       }
+
+       if (is_mmio_spte(pte))
                mmu_spte_clear_no_track(spte);
 
-       if (is_large_pte(pte))
-               --kvm->stat.lpages;
+       return false;
 }
 
 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
@@ -1831,15 +1868,6 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
        mmu_page_remove_parent_pte(sp, parent_pte);
 }
 
-static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
-{
-       int i;
-       struct kvm_vcpu *vcpu;
-
-       kvm_for_each_vcpu(i, vcpu, kvm)
-               vcpu->arch.last_pte_updated = NULL;
-}
-
 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        u64 *parent_pte;
@@ -1899,7 +1927,6 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
        }
 
        sp->role.invalid = 1;
-       kvm_mmu_reset_last_pte_updated(kvm);
        return ret;
 }
 
@@ -1985,7 +2012,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
        kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
 }
 
-static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
+int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 {
        struct kvm_mmu_page *sp;
        struct hlist_node *node;
@@ -1994,7 +2021,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 
        pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
        r = 0;
-
+       spin_lock(&kvm->mmu_lock);
        for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
                pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
                         sp->role.word);
@@ -2002,22 +2029,11 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
                kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
        }
        kvm_mmu_commit_zap_page(kvm, &invalid_list);
-       return r;
-}
-
-static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
-{
-       struct kvm_mmu_page *sp;
-       struct hlist_node *node;
-       LIST_HEAD(invalid_list);
+       spin_unlock(&kvm->mmu_lock);
 
-       for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
-               pgprintk("%s: zap %llx %x\n",
-                        __func__, gfn, sp->role.word);
-               kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
-       }
-       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+       return r;
 }
+EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
 
 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
 {
@@ -2169,8 +2185,6 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
                        return 1;
 
                if (!need_unsync && !s->unsync) {
-                       if (!oos_shadow)
-                               return 1;
                        need_unsync = true;
                }
        }
@@ -2191,11 +2205,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
        if (set_mmio_spte(sptep, gfn, pfn, pte_access))
                return 0;
 
-       /*
-        * We don't set the accessed bit, since we sometimes want to see
-        * whether the guest actually used the pte (in order to detect
-        * demand paging).
-        */
        spte = PT_PRESENT_MASK;
        if (!speculative)
                spte |= shadow_accessed_mask;
@@ -2346,10 +2355,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                }
        }
        kvm_release_pfn_clean(pfn);
-       if (speculative) {
-               vcpu->arch.last_pte_updated = sptep;
-               vcpu->arch.last_pte_gfn = gfn;
-       }
 }
 
 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -2840,12 +2845,12 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
                return;
 
        vcpu_clear_mmio_info(vcpu, ~0ul);
-       trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
+       kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
        if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
                hpa_t root = vcpu->arch.mmu.root_hpa;
                sp = page_header(root);
                mmu_sync_children(vcpu, sp);
-               trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
+               kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
                return;
        }
        for (i = 0; i < 4; ++i) {
@@ -2857,7 +2862,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
                        mmu_sync_children(vcpu, sp);
                }
        }
-       trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
+       kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
 }
 
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
@@ -3510,28 +3515,119 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
                kvm_mmu_flush_tlb(vcpu);
 }
 
-static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
+static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
+                                   const u8 *new, int *bytes)
 {
-       u64 *spte = vcpu->arch.last_pte_updated;
+       u64 gentry;
+       int r;
+
+       /*
+        * Assume that the pte write on a page table of the same type
+        * as the current vcpu paging mode since we update the sptes only
+        * when they have the same mode.
+        */
+       if (is_pae(vcpu) && *bytes == 4) {
+               /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
+               *gpa &= ~(gpa_t)7;
+               *bytes = 8;
+               r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, min(*bytes, 8));
+               if (r)
+                       gentry = 0;
+               new = (const u8 *)&gentry;
+       }
 
-       return !!(spte && (*spte & shadow_accessed_mask));
+       switch (*bytes) {
+       case 4:
+               gentry = *(const u32 *)new;
+               break;
+       case 8:
+               gentry = *(const u64 *)new;
+               break;
+       default:
+               gentry = 0;
+               break;
+       }
+
+       return gentry;
 }
 
-static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
+/*
+ * If we're seeing too many writes to a page, it may no longer be a page table,
+ * or we may be forking, in which case it is better to unmap the page.
+ */
+static bool detect_write_flooding(struct kvm_mmu_page *sp, u64 *spte)
 {
-       u64 *spte = vcpu->arch.last_pte_updated;
+       /*
+        * Skip write-flooding detected for the sp whose level is 1, because
+        * it can become unsync, then the guest page is not write-protected.
+        */
+       if (sp->role.level == 1)
+               return false;
 
-       if (spte
-           && vcpu->arch.last_pte_gfn == gfn
-           && shadow_accessed_mask
-           && !(*spte & shadow_accessed_mask)
-           && is_shadow_present_pte(*spte))
-               set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+       return ++sp->write_flooding_count >= 3;
+}
+
+/*
+ * Misaligned accesses are too much trouble to fix up; also, they usually
+ * indicate a page is not used as a page table.
+ */
+static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
+                                   int bytes)
+{
+       unsigned offset, pte_size, misaligned;
+
+       pgprintk("misaligned: gpa %llx bytes %d role %x\n",
+                gpa, bytes, sp->role.word);
+
+       offset = offset_in_page(gpa);
+       pte_size = sp->role.cr4_pae ? 8 : 4;
+
+       /*
+        * Sometimes, the OS only writes the last one bytes to update status
+        * bits, for example, in linux, andb instruction is used in clear_bit().
+        */
+       if (!(offset & (pte_size - 1)) && bytes == 1)
+               return false;
+
+       misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
+       misaligned |= bytes < 4;
+
+       return misaligned;
+}
+
+static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
+{
+       unsigned page_offset, quadrant;
+       u64 *spte;
+       int level;
+
+       page_offset = offset_in_page(gpa);
+       level = sp->role.level;
+       *nspte = 1;
+       if (!sp->role.cr4_pae) {
+               page_offset <<= 1;      /* 32->64 */
+               /*
+                * A 32-bit pde maps 4MB while the shadow pdes map
+                * only 2MB.  So we need to double the offset again
+                * and zap two pdes instead of one.
+                */
+               if (level == PT32_ROOT_LEVEL) {
+                       page_offset &= ~7; /* kill rounding error */
+                       page_offset <<= 1;
+                       *nspte = 2;
+               }
+               quadrant = page_offset >> PAGE_SHIFT;
+               page_offset &= ~PAGE_MASK;
+               if (quadrant != sp->role.quadrant)
+                       return NULL;
+       }
+
+       spte = &sp->spt[page_offset / sizeof(*spte)];
+       return spte;
 }
 
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-                      const u8 *new, int bytes,
-                      bool guest_initiated)
+                      const u8 *new, int bytes)
 {
        gfn_t gfn = gpa >> PAGE_SHIFT;
        union kvm_mmu_page_role mask = { .word = 0 };
@@ -3539,8 +3635,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        struct hlist_node *node;
        LIST_HEAD(invalid_list);
        u64 entry, gentry, *spte;
-       unsigned pte_size, page_offset, misaligned, quadrant, offset;
-       int level, npte, invlpg_counter, r, flooded = 0;
+       int npte;
        bool remote_flush, local_flush, zap_page;
 
        /*
@@ -3551,112 +3646,45 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                return;
 
        zap_page = remote_flush = local_flush = false;
-       offset = offset_in_page(gpa);
 
        pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
 
-       invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
+       gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
 
        /*
-        * Assume that the pte write on a page table of the same type
-        * as the current vcpu paging mode since we update the sptes only
-        * when they have the same mode.
+        * No need to care whether allocation memory is successful
+        * or not since pte prefetch is skiped if it does not have
+        * enough objects in the cache.
         */
-       if ((is_pae(vcpu) && bytes == 4) || !new) {
-               /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
-               if (is_pae(vcpu)) {
-                       gpa &= ~(gpa_t)7;
-                       bytes = 8;
-               }
-               r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
-               if (r)
-                       gentry = 0;
-               new = (const u8 *)&gentry;
-       }
-
-       switch (bytes) {
-       case 4:
-               gentry = *(const u32 *)new;
-               break;
-       case 8:
-               gentry = *(const u64 *)new;
-               break;
-       default:
-               gentry = 0;
-               break;
-       }
+       mmu_topup_memory_caches(vcpu);
 
        spin_lock(&vcpu->kvm->mmu_lock);
-       if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
-               gentry = 0;
-       kvm_mmu_free_some_pages(vcpu);
        ++vcpu->kvm->stat.mmu_pte_write;
-       trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
-       if (guest_initiated) {
-               kvm_mmu_access_page(vcpu, gfn);
-               if (gfn == vcpu->arch.last_pt_write_gfn
-                   && !last_updated_pte_accessed(vcpu)) {
-                       ++vcpu->arch.last_pt_write_count;
-                       if (vcpu->arch.last_pt_write_count >= 3)
-                               flooded = 1;
-               } else {
-                       vcpu->arch.last_pt_write_gfn = gfn;
-                       vcpu->arch.last_pt_write_count = 1;
-                       vcpu->arch.last_pte_updated = NULL;
-               }
-       }
+       kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
 
        mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
        for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
-               pte_size = sp->role.cr4_pae ? 8 : 4;
-               misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
-               misaligned |= bytes < 4;
-               if (misaligned || flooded) {
-                       /*
-                        * Misaligned accesses are too much trouble to fix
-                        * up; also, they usually indicate a page is not used
-                        * as a page table.
-                        *
-                        * If we're seeing too many writes to a page,
-                        * it may no longer be a page table, or we may be
-                        * forking, in which case it is better to unmap the
-                        * page.
-                        */
-                       pgprintk("misaligned: gpa %llx bytes %d role %x\n",
-                                gpa, bytes, sp->role.word);
+               spte = get_written_sptes(sp, gpa, &npte);
+
+               if (detect_write_misaligned(sp, gpa, bytes) ||
+                     detect_write_flooding(sp, spte)) {
                        zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
                                                     &invalid_list);
                        ++vcpu->kvm->stat.mmu_flooded;
                        continue;
                }
-               page_offset = offset;
-               level = sp->role.level;
-               npte = 1;
-               if (!sp->role.cr4_pae) {
-                       page_offset <<= 1;      /* 32->64 */
-                       /*
-                        * A 32-bit pde maps 4MB while the shadow pdes map
-                        * only 2MB.  So we need to double the offset again
-                        * and zap two pdes instead of one.
-                        */
-                       if (level == PT32_ROOT_LEVEL) {
-                               page_offset &= ~7; /* kill rounding error */
-                               page_offset <<= 1;
-                               npte = 2;
-                       }
-                       quadrant = page_offset >> PAGE_SHIFT;
-                       page_offset &= ~PAGE_MASK;
-                       if (quadrant != sp->role.quadrant)
-                               continue;
-               }
+
+               spte = get_written_sptes(sp, gpa, &npte);
+               if (!spte)
+                       continue;
+
                local_flush = true;
-               spte = &sp->spt[page_offset / sizeof(*spte)];
                while (npte--) {
                        entry = *spte;
                        mmu_page_zap_pte(vcpu->kvm, sp, spte);
                        if (gentry &&
                              !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
-                             & mask.word))
+                             & mask.word) && rmap_can_add(vcpu))
                                mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
                        if (!remote_flush && need_remote_flush(entry, *spte))
                                remote_flush = true;
@@ -3665,7 +3693,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        }
        mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
-       trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
+       kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
        spin_unlock(&vcpu->kvm->mmu_lock);
 }
 
@@ -3679,9 +3707,8 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
 
        gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
 
-       spin_lock(&vcpu->kvm->mmu_lock);
        r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
-       spin_unlock(&vcpu->kvm->mmu_lock);
+
        return r;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
@@ -3702,10 +3729,18 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
 }
 
+static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr)
+{
+       if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu))
+               return vcpu_match_mmio_gpa(vcpu, addr);
+
+       return vcpu_match_mmio_gva(vcpu, addr);
+}
+
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
                       void *insn, int insn_len)
 {
-       int r;
+       int r, emulation_type = EMULTYPE_RETRY;
        enum emulation_result er;
 
        r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
@@ -3717,11 +3752,10 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
                goto out;
        }
 
-       r = mmu_topup_memory_caches(vcpu);
-       if (r)
-               goto out;
+       if (is_mmio_page_fault(vcpu, cr2))
+               emulation_type = 0;
 
-       er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
+       er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
 
        switch (er) {
        case EMULATE_DONE:
@@ -3792,7 +3826,11 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
 int kvm_mmu_create(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
-       ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
+
+       vcpu->arch.walk_mmu = &vcpu->arch.mmu;
+       vcpu->arch.mmu.root_hpa = INVALID_PAGE;
+       vcpu->arch.mmu.translate_gpa = translate_gpa;
+       vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
 
        return alloc_mmu_pages(vcpu);
 }
@@ -3852,14 +3890,14 @@ restart:
        spin_unlock(&kvm->mmu_lock);
 }
 
-static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
-                                              struct list_head *invalid_list)
+static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
+                                               struct list_head *invalid_list)
 {
        struct kvm_mmu_page *page;
 
        page = container_of(kvm->arch.active_mmu_pages.prev,
                            struct kvm_mmu_page, link);
-       return kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
+       kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
 }
 
 static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
@@ -3874,15 +3912,15 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
        raw_spin_lock(&kvm_lock);
 
        list_for_each_entry(kvm, &vm_list, vm_list) {
-               int idx, freed_pages;
+               int idx;
                LIST_HEAD(invalid_list);
 
                idx = srcu_read_lock(&kvm->srcu);
                spin_lock(&kvm->mmu_lock);
                if (!kvm_freed && nr_to_scan > 0 &&
                    kvm->arch.n_used_mmu_pages > 0) {
-                       freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
-                                                         &invalid_list);
+                       kvm_mmu_remove_some_alloc_mmu_pages(kvm,
+                                                           &invalid_list);
                        kvm_freed = kvm;
                }
                nr_to_scan--;
@@ -3944,15 +3982,15 @@ nomem:
  */
 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
 {
-       int i;
        unsigned int nr_mmu_pages;
        unsigned int  nr_pages = 0;
        struct kvm_memslots *slots;
+       struct kvm_memory_slot *memslot;
 
        slots = kvm_memslots(kvm);
 
-       for (i = 0; i < slots->nmemslots; i++)
-               nr_pages += slots->memslots[i].npages;
+       kvm_for_each_memslot(memslot, slots)
+               nr_pages += memslot->npages;
 
        nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
        nr_mmu_pages = max(nr_mmu_pages,
@@ -3961,127 +3999,6 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
        return nr_mmu_pages;
 }
 
-static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
-                               unsigned len)
-{
-       if (len > buffer->len)
-               return NULL;
-       return buffer->ptr;
-}
-
-static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
-                               unsigned len)
-{
-       void *ret;
-
-       ret = pv_mmu_peek_buffer(buffer, len);
-       if (!ret)
-               return ret;
-       buffer->ptr += len;
-       buffer->len -= len;
-       buffer->processed += len;
-       return ret;
-}
-
-static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
-                            gpa_t addr, gpa_t value)
-{
-       int bytes = 8;
-       int r;
-
-       if (!is_long_mode(vcpu) && !is_pae(vcpu))
-               bytes = 4;
-
-       r = mmu_topup_memory_caches(vcpu);
-       if (r)
-               return r;
-
-       if (!emulator_write_phys(vcpu, addr, &value, bytes))
-               return -EFAULT;
-
-       return 1;
-}
-
-static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
-{
-       (void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu));
-       return 1;
-}
-
-static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
-{
-       spin_lock(&vcpu->kvm->mmu_lock);
-       mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
-       spin_unlock(&vcpu->kvm->mmu_lock);
-       return 1;
-}
-
-static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
-                            struct kvm_pv_mmu_op_buffer *buffer)
-{
-       struct kvm_mmu_op_header *header;
-
-       header = pv_mmu_peek_buffer(buffer, sizeof *header);
-       if (!header)
-               return 0;
-       switch (header->op) {
-       case KVM_MMU_OP_WRITE_PTE: {
-               struct kvm_mmu_op_write_pte *wpte;
-
-               wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
-               if (!wpte)
-                       return 0;
-               return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
-                                       wpte->pte_val);
-       }
-       case KVM_MMU_OP_FLUSH_TLB: {
-               struct kvm_mmu_op_flush_tlb *ftlb;
-
-               ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
-               if (!ftlb)
-                       return 0;
-               return kvm_pv_mmu_flush_tlb(vcpu);
-       }
-       case KVM_MMU_OP_RELEASE_PT: {
-               struct kvm_mmu_op_release_pt *rpt;
-
-               rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
-               if (!rpt)
-                       return 0;
-               return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
-       }
-       default: return 0;
-       }
-}
-
-int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
-                 gpa_t addr, unsigned long *ret)
-{
-       int r;
-       struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
-
-       buffer->ptr = buffer->buf;
-       buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
-       buffer->processed = 0;
-
-       r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
-       if (r)
-               goto out;
-
-       while (buffer->len) {
-               r = kvm_pv_mmu_op_one(vcpu, buffer);
-               if (r < 0)
-                       goto out;
-               if (r == 0)
-                       break;
-       }
-
-       r = 1;
-out:
-       *ret = buffer->processed;
-       return r;
-}
-
 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
 {
        struct kvm_shadow_walk_iterator iterator;
@@ -4110,12 +4027,6 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
        mmu_free_memory_caches(vcpu);
 }
 
-#ifdef CONFIG_KVM_MMU_AUDIT
-#include "mmu_audit.c"
-#else
-static void mmu_audit_disable(void) { }
-#endif
-
 void kvm_mmu_module_exit(void)
 {
        mmu_destroy_caches();
index 746ec259d02490bf7b7ce552560ff37e170d9825..fe15dcc07a6b9f9497bffc4ea2d6c4d19019c6fd 100644 (file)
 
 #include <linux/ratelimit.h>
 
+char const *audit_point_name[] = {
+       "pre page fault",
+       "post page fault",
+       "pre pte write",
+       "post pte write",
+       "pre sync",
+       "post sync"
+};
+
 #define audit_printk(kvm, fmt, args...)                \
        printk(KERN_ERR "audit: (%s) error: "   \
                fmt, audit_point_name[kvm->arch.audit_point], ##args)
@@ -224,7 +233,10 @@ static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
        mmu_spte_walk(vcpu, audit_spte);
 }
 
-static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point)
+static bool mmu_audit;
+static struct jump_label_key mmu_audit_key;
+
+static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
 {
        static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
 
@@ -236,18 +248,18 @@ static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point)
        audit_vcpu_spte(vcpu);
 }
 
-static bool mmu_audit;
+static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
+{
+       if (static_branch((&mmu_audit_key)))
+               __kvm_mmu_audit(vcpu, point);
+}
 
 static void mmu_audit_enable(void)
 {
-       int ret;
-
        if (mmu_audit)
                return;
 
-       ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
-       WARN_ON(ret);
-
+       jump_label_inc(&mmu_audit_key);
        mmu_audit = true;
 }
 
@@ -256,8 +268,7 @@ static void mmu_audit_disable(void)
        if (!mmu_audit)
                return;
 
-       unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
-       tracepoint_synchronize_unregister();
+       jump_label_dec(&mmu_audit_key);
        mmu_audit = false;
 }
 
index eed67f34146d66557d5f5668a1f7c37191dd50a2..89fb0e81322a96d996d1fd554ec9c1d6c55553f6 100644 (file)
@@ -243,25 +243,6 @@ TRACE_EVENT(
        TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
                  __entry->access)
 );
-
-TRACE_EVENT(
-       kvm_mmu_audit,
-       TP_PROTO(struct kvm_vcpu *vcpu, int audit_point),
-       TP_ARGS(vcpu, audit_point),
-
-       TP_STRUCT__entry(
-               __field(struct kvm_vcpu *, vcpu)
-               __field(int, audit_point)
-       ),
-
-       TP_fast_assign(
-               __entry->vcpu = vcpu;
-               __entry->audit_point = audit_point;
-       ),
-
-       TP_printk("vcpu:%d %s", __entry->vcpu->cpu,
-                 audit_point_name[__entry->audit_point])
-);
 #endif /* _TRACE_KVMMMU_H */
 
 #undef TRACE_INCLUDE_PATH
index 92994100638b26749519bc69d9c59b5a4ba10cc7..15610285ebb647075e17a1388e031128eb764329 100644 (file)
@@ -497,6 +497,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
             shadow_walk_next(&it)) {
                gfn_t table_gfn;
 
+               clear_sp_write_flooding_count(it.sptep);
                drop_large_spte(vcpu, it.sptep);
 
                sp = NULL;
@@ -522,6 +523,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
             shadow_walk_next(&it)) {
                gfn_t direct_gfn;
 
+               clear_sp_write_flooding_count(it.sptep);
                validate_direct_spte(vcpu, it.sptep, direct_access);
 
                drop_large_spte(vcpu, it.sptep);
@@ -536,6 +538,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                link_shadow_page(it.sptep, sp);
        }
 
+       clear_sp_write_flooding_count(it.sptep);
        mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
                     user_fault, write_fault, emulate, it.level,
                     gw->gfn, pfn, prefault, map_writable);
@@ -599,11 +602,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
         */
        if (!r) {
                pgprintk("%s: guest page fault\n", __func__);
-               if (!prefault) {
+               if (!prefault)
                        inject_page_fault(vcpu, &walker.fault);
-                       /* reset fork detector */
-                       vcpu->arch.last_pt_write_count = 0;
-               }
+
                return 0;
        }
 
@@ -631,7 +632,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
        if (mmu_notifier_retry(vcpu, mmu_seq))
                goto out_unlock;
 
-       trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
+       kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
        kvm_mmu_free_some_pages(vcpu);
        if (!force_pt_level)
                transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
@@ -641,11 +642,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
        pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__,
                 sptep, *sptep, emulate);
 
-       if (!emulate)
-               vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
-
        ++vcpu->stat.pf_fixed;
-       trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
+       kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
        spin_unlock(&vcpu->kvm->mmu_lock);
 
        return emulate;
@@ -656,65 +654,66 @@ out_unlock:
        return 0;
 }
 
+static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
+{
+       int offset = 0;
+
+       WARN_ON(sp->role.level != 1);
+
+       if (PTTYPE == 32)
+               offset = sp->role.quadrant << PT64_LEVEL_BITS;
+
+       return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
+}
+
 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 {
        struct kvm_shadow_walk_iterator iterator;
        struct kvm_mmu_page *sp;
-       gpa_t pte_gpa = -1;
        int level;
        u64 *sptep;
-       int need_flush = 0;
 
        vcpu_clear_mmio_info(vcpu, gva);
 
-       spin_lock(&vcpu->kvm->mmu_lock);
+       /*
+        * No need to check return value here, rmap_can_add() can
+        * help us to skip pte prefetch later.
+        */
+       mmu_topup_memory_caches(vcpu);
 
+       spin_lock(&vcpu->kvm->mmu_lock);
        for_each_shadow_entry(vcpu, gva, iterator) {
                level = iterator.level;
                sptep = iterator.sptep;
 
                sp = page_header(__pa(sptep));
                if (is_last_spte(*sptep, level)) {
-                       int offset, shift;
+                       pt_element_t gpte;
+                       gpa_t pte_gpa;
 
                        if (!sp->unsync)
                                break;
 
-                       shift = PAGE_SHIFT -
-                                 (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
-                       offset = sp->role.quadrant << shift;
-
-                       pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
+                       pte_gpa = FNAME(get_level1_sp_gpa)(sp);
                        pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
 
-                       if (is_shadow_present_pte(*sptep)) {
-                               if (is_large_pte(*sptep))
-                                       --vcpu->kvm->stat.lpages;
-                               drop_spte(vcpu->kvm, sptep);
-                               need_flush = 1;
-                       } else if (is_mmio_spte(*sptep))
-                               mmu_spte_clear_no_track(sptep);
+                       if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
+                               kvm_flush_remote_tlbs(vcpu->kvm);
 
-                       break;
+                       if (!rmap_can_add(vcpu))
+                               break;
+
+                       if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
+                                                 sizeof(pt_element_t)))
+                               break;
+
+                       FNAME(update_pte)(vcpu, sp, sptep, &gpte);
                }
 
                if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
                        break;
        }
-
-       if (need_flush)
-               kvm_flush_remote_tlbs(vcpu->kvm);
-
-       atomic_inc(&vcpu->kvm->arch.invlpg_counter);
-
        spin_unlock(&vcpu->kvm->mmu_lock);
-
-       if (pte_gpa == -1)
-               return;
-
-       if (mmu_topup_memory_caches(vcpu))
-               return;
-       kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
 }
 
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
@@ -769,19 +768,14 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
  */
 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
-       int i, offset, nr_present;
+       int i, nr_present = 0;
        bool host_writable;
        gpa_t first_pte_gpa;
 
-       offset = nr_present = 0;
-
        /* direct kvm_mmu_page can not be unsync. */
        BUG_ON(sp->role.direct);
 
-       if (PTTYPE == 32)
-               offset = sp->role.quadrant << PT64_LEVEL_BITS;
-
-       first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
+       first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
 
        for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
                unsigned pte_access;
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
new file mode 100644 (file)
index 0000000..7aad544
--- /dev/null
@@ -0,0 +1,533 @@
+/*
+ * Kernel-based Virtual Machine -- Performane Monitoring Unit support
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates.
+ *
+ * Authors:
+ *   Avi Kivity   <avi@redhat.com>
+ *   Gleb Natapov <gleb@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kvm_host.h>
+#include <linux/perf_event.h>
+#include "x86.h"
+#include "cpuid.h"
+#include "lapic.h"
+
+static struct kvm_arch_event_perf_mapping {
+       u8 eventsel;
+       u8 unit_mask;
+       unsigned event_type;
+       bool inexact;
+} arch_events[] = {
+       /* Index must match CPUID 0x0A.EBX bit vector */
+       [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
+       [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
+       [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
+       [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
+       [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
+       [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+       [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
+};
+
+/* mapping between fixed pmc index and arch_events array */
+int fixed_pmc_events[] = {1, 0, 2};
+
+static bool pmc_is_gp(struct kvm_pmc *pmc)
+{
+       return pmc->type == KVM_PMC_GP;
+}
+
+static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
+{
+       struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+
+       return pmu->counter_bitmask[pmc->type];
+}
+
+static inline bool pmc_enabled(struct kvm_pmc *pmc)
+{
+       struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+       return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
+}
+
+static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
+                                        u32 base)
+{
+       if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
+               return &pmu->gp_counters[msr - base];
+       return NULL;
+}
+
+static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
+{
+       int base = MSR_CORE_PERF_FIXED_CTR0;
+       if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
+               return &pmu->fixed_counters[msr - base];
+       return NULL;
+}
+
+static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
+{
+       return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
+}
+
+static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
+{
+       if (idx < X86_PMC_IDX_FIXED)
+               return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
+       else
+               return get_fixed_pmc_idx(pmu, idx - X86_PMC_IDX_FIXED);
+}
+
+void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.apic)
+               kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
+}
+
+static void trigger_pmi(struct irq_work *irq_work)
+{
+       struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
+                       irq_work);
+       struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
+                       arch.pmu);
+
+       kvm_deliver_pmi(vcpu);
+}
+
+static void kvm_perf_overflow(struct perf_event *perf_event,
+                             struct perf_sample_data *data,
+                             struct pt_regs *regs)
+{
+       struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+       struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+       __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
+}
+
+static void kvm_perf_overflow_intr(struct perf_event *perf_event,
+               struct perf_sample_data *data, struct pt_regs *regs)
+{
+       struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+       struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+       if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
+               kvm_perf_overflow(perf_event, data, regs);
+               kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+               /*
+                * Inject PMI. If vcpu was in a guest mode during NMI PMI
+                * can be ejected on a guest mode re-entry. Otherwise we can't
+                * be sure that vcpu wasn't executing hlt instruction at the
+                * time of vmexit and is not going to re-enter guest mode until,
+                * woken up. So we should wake it, but this is impossible from
+                * NMI context. Do it from irq work instead.
+                */
+               if (!kvm_is_in_guest())
+                       irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
+               else
+                       kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
+       }
+}
+
+static u64 read_pmc(struct kvm_pmc *pmc)
+{
+       u64 counter, enabled, running;
+
+       counter = pmc->counter;
+
+       if (pmc->perf_event)
+               counter += perf_event_read_value(pmc->perf_event,
+                                                &enabled, &running);
+
+       /* FIXME: Scaling needed? */
+
+       return counter & pmc_bitmask(pmc);
+}
+
+static void stop_counter(struct kvm_pmc *pmc)
+{
+       if (pmc->perf_event) {
+               pmc->counter = read_pmc(pmc);
+               perf_event_release_kernel(pmc->perf_event);
+               pmc->perf_event = NULL;
+       }
+}
+
+static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
+               unsigned config, bool exclude_user, bool exclude_kernel,
+               bool intr)
+{
+       struct perf_event *event;
+       struct perf_event_attr attr = {
+               .type = type,
+               .size = sizeof(attr),
+               .pinned = true,
+               .exclude_idle = true,
+               .exclude_host = 1,
+               .exclude_user = exclude_user,
+               .exclude_kernel = exclude_kernel,
+               .config = config,
+       };
+
+       attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
+
+       event = perf_event_create_kernel_counter(&attr, -1, current,
+                                                intr ? kvm_perf_overflow_intr :
+                                                kvm_perf_overflow, pmc);
+       if (IS_ERR(event)) {
+               printk_once("kvm: pmu event creation failed %ld\n",
+                               PTR_ERR(event));
+               return;
+       }
+
+       pmc->perf_event = event;
+       clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
+}
+
+static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
+               u8 unit_mask)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(arch_events); i++)
+               if (arch_events[i].eventsel == event_select
+                               && arch_events[i].unit_mask == unit_mask
+                               && (pmu->available_event_types & (1 << i)))
+                       break;
+
+       if (i == ARRAY_SIZE(arch_events))
+               return PERF_COUNT_HW_MAX;
+
+       return arch_events[i].event_type;
+}
+
+static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+{
+       unsigned config, type = PERF_TYPE_RAW;
+       u8 event_select, unit_mask;
+
+       pmc->eventsel = eventsel;
+
+       stop_counter(pmc);
+
+       if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc))
+               return;
+
+       event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
+       unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+
+       if (!(event_select & (ARCH_PERFMON_EVENTSEL_EDGE |
+                               ARCH_PERFMON_EVENTSEL_INV |
+                               ARCH_PERFMON_EVENTSEL_CMASK))) {
+               config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
+                               unit_mask);
+               if (config != PERF_COUNT_HW_MAX)
+                       type = PERF_TYPE_HARDWARE;
+       }
+
+       if (type == PERF_TYPE_RAW)
+               config = eventsel & X86_RAW_EVENT_MASK;
+
+       reprogram_counter(pmc, type, config,
+                       !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
+                       !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
+                       eventsel & ARCH_PERFMON_EVENTSEL_INT);
+}
+
+static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
+{
+       unsigned en = en_pmi & 0x3;
+       bool pmi = en_pmi & 0x8;
+
+       stop_counter(pmc);
+
+       if (!en || !pmc_enabled(pmc))
+               return;
+
+       reprogram_counter(pmc, PERF_TYPE_HARDWARE,
+                       arch_events[fixed_pmc_events[idx]].event_type,
+                       !(en & 0x2), /* exclude user */
+                       !(en & 0x1), /* exclude kernel */
+                       pmi);
+}
+
+static inline u8 fixed_en_pmi(u64 ctrl, int idx)
+{
+       return (ctrl >> (idx * 4)) & 0xf;
+}
+
+static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
+{
+       int i;
+
+       for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
+               u8 en_pmi = fixed_en_pmi(data, i);
+               struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
+
+               if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi)
+                       continue;
+
+               reprogram_fixed_counter(pmc, en_pmi, i);
+       }
+
+       pmu->fixed_ctr_ctrl = data;
+}
+
+static void reprogram_idx(struct kvm_pmu *pmu, int idx)
+{
+       struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
+
+       if (!pmc)
+               return;
+
+       if (pmc_is_gp(pmc))
+               reprogram_gp_counter(pmc, pmc->eventsel);
+       else {
+               int fidx = idx - X86_PMC_IDX_FIXED;
+               reprogram_fixed_counter(pmc,
+                               fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
+       }
+}
+
+static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
+{
+       int bit;
+       u64 diff = pmu->global_ctrl ^ data;
+
+       pmu->global_ctrl = data;
+
+       for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
+               reprogram_idx(pmu, bit);
+}
+
+bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       int ret;
+
+       switch (msr) {
+       case MSR_CORE_PERF_FIXED_CTR_CTRL:
+       case MSR_CORE_PERF_GLOBAL_STATUS:
+       case MSR_CORE_PERF_GLOBAL_CTRL:
+       case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+               ret = pmu->version > 1;
+               break;
+       default:
+               ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
+                       || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
+                       || get_fixed_pmc(pmu, msr);
+               break;
+       }
+       return ret;
+}
+
+int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       struct kvm_pmc *pmc;
+
+       switch (index) {
+       case MSR_CORE_PERF_FIXED_CTR_CTRL:
+               *data = pmu->fixed_ctr_ctrl;
+               return 0;
+       case MSR_CORE_PERF_GLOBAL_STATUS:
+               *data = pmu->global_status;
+               return 0;
+       case MSR_CORE_PERF_GLOBAL_CTRL:
+               *data = pmu->global_ctrl;
+               return 0;
+       case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+               *data = pmu->global_ovf_ctrl;
+               return 0;
+       default:
+               if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
+                               (pmc = get_fixed_pmc(pmu, index))) {
+                       *data = read_pmc(pmc);
+                       return 0;
+               } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
+                       *data = pmc->eventsel;
+                       return 0;
+               }
+       }
+       return 1;
+}
+
+int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       struct kvm_pmc *pmc;
+
+       switch (index) {
+       case MSR_CORE_PERF_FIXED_CTR_CTRL:
+               if (pmu->fixed_ctr_ctrl == data)
+                       return 0;
+               if (!(data & 0xfffffffffffff444)) {
+                       reprogram_fixed_counters(pmu, data);
+                       return 0;
+               }
+               break;
+       case MSR_CORE_PERF_GLOBAL_STATUS:
+               break; /* RO MSR */
+       case MSR_CORE_PERF_GLOBAL_CTRL:
+               if (pmu->global_ctrl == data)
+                       return 0;
+               if (!(data & pmu->global_ctrl_mask)) {
+                       global_ctrl_changed(pmu, data);
+                       return 0;
+               }
+               break;
+       case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+               if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
+                       pmu->global_status &= ~data;
+                       pmu->global_ovf_ctrl = data;
+                       return 0;
+               }
+               break;
+       default:
+               if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
+                               (pmc = get_fixed_pmc(pmu, index))) {
+                       data = (s64)(s32)data;
+                       pmc->counter += data - read_pmc(pmc);
+                       return 0;
+               } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
+                       if (data == pmc->eventsel)
+                               return 0;
+                       if (!(data & 0xffffffff00200000ull)) {
+                               reprogram_gp_counter(pmc, data);
+                               return 0;
+                       }
+               }
+       }
+       return 1;
+}
+
+int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       bool fast_mode = pmc & (1u << 31);
+       bool fixed = pmc & (1u << 30);
+       struct kvm_pmc *counters;
+       u64 ctr;
+
+       pmc &= (3u << 30) - 1;
+       if (!fixed && pmc >= pmu->nr_arch_gp_counters)
+               return 1;
+       if (fixed && pmc >= pmu->nr_arch_fixed_counters)
+               return 1;
+       counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
+       ctr = read_pmc(&counters[pmc]);
+       if (fast_mode)
+               ctr = (u32)ctr;
+       *data = ctr;
+
+       return 0;
+}
+
+void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       struct kvm_cpuid_entry2 *entry;
+       unsigned bitmap_len;
+
+       pmu->nr_arch_gp_counters = 0;
+       pmu->nr_arch_fixed_counters = 0;
+       pmu->counter_bitmask[KVM_PMC_GP] = 0;
+       pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
+       pmu->version = 0;
+
+       entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
+       if (!entry)
+               return;
+
+       pmu->version = entry->eax & 0xff;
+       if (!pmu->version)
+               return;
+
+       pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff,
+                       X86_PMC_MAX_GENERIC);
+       pmu->counter_bitmask[KVM_PMC_GP] =
+               ((u64)1 << ((entry->eax >> 16) & 0xff)) - 1;
+       bitmap_len = (entry->eax >> 24) & 0xff;
+       pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1);
+
+       if (pmu->version == 1) {
+               pmu->global_ctrl = (1 << pmu->nr_arch_gp_counters) - 1;
+               return;
+       }
+
+       pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
+                       X86_PMC_MAX_FIXED);
+       pmu->counter_bitmask[KVM_PMC_FIXED] =
+               ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
+       pmu->global_ctrl_mask = ~(((1 << pmu->nr_arch_gp_counters) - 1)
+                       | (((1ull << pmu->nr_arch_fixed_counters) - 1)
+                               << X86_PMC_IDX_FIXED));
+}
+
+void kvm_pmu_init(struct kvm_vcpu *vcpu)
+{
+       int i;
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+
+       memset(pmu, 0, sizeof(*pmu));
+       for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
+               pmu->gp_counters[i].type = KVM_PMC_GP;
+               pmu->gp_counters[i].vcpu = vcpu;
+               pmu->gp_counters[i].idx = i;
+       }
+       for (i = 0; i < X86_PMC_MAX_FIXED; i++) {
+               pmu->fixed_counters[i].type = KVM_PMC_FIXED;
+               pmu->fixed_counters[i].vcpu = vcpu;
+               pmu->fixed_counters[i].idx = i + X86_PMC_IDX_FIXED;
+       }
+       init_irq_work(&pmu->irq_work, trigger_pmi);
+       kvm_pmu_cpuid_update(vcpu);
+}
+
+void kvm_pmu_reset(struct kvm_vcpu *vcpu)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       int i;
+
+       irq_work_sync(&pmu->irq_work);
+       for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
+               struct kvm_pmc *pmc = &pmu->gp_counters[i];
+               stop_counter(pmc);
+               pmc->counter = pmc->eventsel = 0;
+       }
+
+       for (i = 0; i < X86_PMC_MAX_FIXED; i++)
+               stop_counter(&pmu->fixed_counters[i]);
+
+       pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
+               pmu->global_ovf_ctrl = 0;
+}
+
+void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
+{
+       kvm_pmu_reset(vcpu);
+}
+
+void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       u64 bitmask;
+       int bit;
+
+       bitmask = pmu->reprogram_pmi;
+
+       for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
+               struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
+
+               if (unlikely(!pmc || !pmc->perf_event)) {
+                       clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
+                       continue;
+               }
+
+               reprogram_idx(pmu, bit);
+       }
+}
index e32243eac2f48874ac494f5f8a27ed7fb605ebc4..5fa553babe566876d70115d1d259bd2d1a4925a6 100644 (file)
@@ -1014,6 +1014,7 @@ static void init_vmcb(struct vcpu_svm *svm)
        set_intercept(svm, INTERCEPT_NMI);
        set_intercept(svm, INTERCEPT_SMI);
        set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
+       set_intercept(svm, INTERCEPT_RDPMC);
        set_intercept(svm, INTERCEPT_CPUID);
        set_intercept(svm, INTERCEPT_INVD);
        set_intercept(svm, INTERCEPT_HLT);
@@ -2770,6 +2771,19 @@ static int emulate_on_interception(struct vcpu_svm *svm)
        return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
 }
 
+static int rdpmc_interception(struct vcpu_svm *svm)
+{
+       int err;
+
+       if (!static_cpu_has(X86_FEATURE_NRIPS))
+               return emulate_on_interception(svm);
+
+       err = kvm_rdpmc(&svm->vcpu);
+       kvm_complete_insn_gp(&svm->vcpu, err);
+
+       return 1;
+}
+
 bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
 {
        unsigned long cr0 = svm->vcpu.arch.cr0;
@@ -3190,6 +3204,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
        [SVM_EXIT_SMI]                          = nop_on_interception,
        [SVM_EXIT_INIT]                         = nop_on_interception,
        [SVM_EXIT_VINTR]                        = interrupt_window_interception,
+       [SVM_EXIT_RDPMC]                        = rdpmc_interception,
        [SVM_EXIT_CPUID]                        = cpuid_interception,
        [SVM_EXIT_IRET]                         = iret_interception,
        [SVM_EXIT_INVD]                         = emulate_on_interception,
index ae432ea1cd835731f26451dd22f9ff53b7b85eb6..6b85cc647f346fed962e4bb5b870fdefc2cbd8cf 100644 (file)
 #include <linux/atomic.h>
 #include "kvm_timer.h"
 
-static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
+enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
 {
-       int restart_timer = 0;
+       struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
+       struct kvm_vcpu *vcpu = ktimer->vcpu;
        wait_queue_head_t *q = &vcpu->wq;
 
        /*
@@ -40,26 +41,7 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
 
        if (ktimer->t_ops->is_periodic(ktimer)) {
                hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
-               restart_timer = 1;
-       }
-
-       return restart_timer;
-}
-
-enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
-{
-       int restart_timer;
-       struct kvm_vcpu *vcpu;
-       struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
-
-       vcpu = ktimer->vcpu;
-       if (!vcpu)
-               return HRTIMER_NORESTART;
-
-       restart_timer = __kvm_timer_fn(vcpu, ktimer);
-       if (restart_timer)
                return HRTIMER_RESTART;
-       else
+       else
                return HRTIMER_NORESTART;
 }
-
index 579a0b51696ac560b1768ae445bb8f2084172f8c..906a7e84200f7b96eedbe5647d613a9a5d375546 100644 (file)
@@ -18,6 +18,7 @@
 
 #include "irq.h"
 #include "mmu.h"
+#include "cpuid.h"
 
 #include <linux/kvm_host.h>
 #include <linux/module.h>
@@ -1747,7 +1748,6 @@ static void setup_msrs(struct vcpu_vmx *vmx)
        int save_nmsrs, index;
        unsigned long *msr_bitmap;
 
-       vmx_load_host_state(vmx);
        save_nmsrs = 0;
 #ifdef CONFIG_X86_64
        if (is_long_mode(&vmx->vcpu)) {
@@ -1956,6 +1956,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
 #endif
                CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
                CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
+               CPU_BASED_RDPMC_EXITING |
                CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
        /*
         * We can allow some features even when not supported by the
@@ -2142,12 +2143,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
                        return 1;
                /* Otherwise falls through */
        default:
-               vmx_load_host_state(to_vmx(vcpu));
                if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
                        return 0;
                msr = find_msr_entry(to_vmx(vcpu), msr_index);
                if (msr) {
-                       vmx_load_host_state(to_vmx(vcpu));
                        data = msr->data;
                        break;
                }
@@ -2171,7 +2170,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 
        switch (msr_index) {
        case MSR_EFER:
-               vmx_load_host_state(vmx);
                ret = kvm_set_msr_common(vcpu, msr_index, data);
                break;
 #ifdef CONFIG_X86_64
@@ -2220,7 +2218,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
                        break;
                msr = find_msr_entry(vmx, msr_index);
                if (msr) {
-                       vmx_load_host_state(vmx);
                        msr->data = data;
                        break;
                }
@@ -2414,7 +2411,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
              CPU_BASED_USE_TSC_OFFSETING |
              CPU_BASED_MWAIT_EXITING |
              CPU_BASED_MONITOR_EXITING |
-             CPU_BASED_INVLPG_EXITING;
+             CPU_BASED_INVLPG_EXITING |
+             CPU_BASED_RDPMC_EXITING;
 
        if (yield_on_hlt)
                min |= CPU_BASED_HLT_EXITING;
@@ -2716,11 +2714,13 @@ static gva_t rmode_tss_base(struct kvm *kvm)
 {
        if (!kvm->arch.tss_addr) {
                struct kvm_memslots *slots;
+               struct kvm_memory_slot *slot;
                gfn_t base_gfn;
 
                slots = kvm_memslots(kvm);
-               base_gfn = slots->memslots[0].base_gfn +
-                                kvm->memslots->memslots[0].npages - 3;
+               slot = id_to_memslot(slots, 0);
+               base_gfn = slot->base_gfn + slot->npages - 3;
+
                return base_gfn << PAGE_SHIFT;
        }
        return kvm->arch.tss_addr;
@@ -3945,12 +3945,15 @@ static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
 static void enable_irq_window(struct kvm_vcpu *vcpu)
 {
        u32 cpu_based_vm_exec_control;
-       if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
-               /* We can get here when nested_run_pending caused
-                * vmx_interrupt_allowed() to return false. In this case, do
-                * nothing - the interrupt will be injected later.
+       if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
+               /*
+                * We get here if vmx_interrupt_allowed() said we can't
+                * inject to L1 now because L2 must run. Ask L2 to exit
+                * right after entry, so we can inject to L1 more promptly.
                 */
+               kvm_make_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
                return;
+       }
 
        cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
        cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
@@ -4077,11 +4080,12 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
 {
        if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
-               struct vmcs12 *vmcs12;
-               if (to_vmx(vcpu)->nested.nested_run_pending)
+               struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+               if (to_vmx(vcpu)->nested.nested_run_pending ||
+                   (vmcs12->idt_vectoring_info_field &
+                    VECTORING_INFO_VALID_MASK))
                        return 0;
                nested_vmx_vmexit(vcpu);
-               vmcs12 = get_vmcs12(vcpu);
                vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
                vmcs12->vm_exit_intr_info = 0;
                /* fall through to normal code, but now in L1, not L2 */
@@ -4611,6 +4615,16 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
        return 1;
 }
 
+static int handle_rdpmc(struct kvm_vcpu *vcpu)
+{
+       int err;
+
+       err = kvm_rdpmc(vcpu);
+       kvm_complete_insn_gp(vcpu, err);
+
+       return 1;
+}
+
 static int handle_wbinvd(struct kvm_vcpu *vcpu)
 {
        skip_emulated_instruction(vcpu);
@@ -5561,6 +5575,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
        [EXIT_REASON_HLT]                     = handle_halt,
        [EXIT_REASON_INVD]                    = handle_invd,
        [EXIT_REASON_INVLPG]                  = handle_invlpg,
+       [EXIT_REASON_RDPMC]                   = handle_rdpmc,
        [EXIT_REASON_VMCALL]                  = handle_vmcall,
        [EXIT_REASON_VMCLEAR]                 = handle_vmclear,
        [EXIT_REASON_VMLAUNCH]                = handle_vmlaunch,
index 4c938da2ba00b40dc430d5a7854b0ee62b6ce631..1171def5f96b76a1119709e706b977be872f8082 100644 (file)
@@ -26,6 +26,7 @@
 #include "tss.h"
 #include "kvm_cache_regs.h"
 #include "x86.h"
+#include "cpuid.h"
 
 #include <linux/clocksource.h>
 #include <linux/interrupt.h>
@@ -82,8 +83,6 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
 
 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
-static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
-                                   struct kvm_cpuid_entry2 __user *entries);
 static void process_nmi(struct kvm_vcpu *vcpu);
 
 struct kvm_x86_ops *kvm_x86_ops;
@@ -574,54 +573,6 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 }
 EXPORT_SYMBOL_GPL(kvm_set_xcr);
 
-static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 1, 0);
-       return best && (best->ecx & bit(X86_FEATURE_XSAVE));
-}
-
-static bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 7, 0);
-       return best && (best->ebx & bit(X86_FEATURE_SMEP));
-}
-
-static bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 7, 0);
-       return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
-}
-
-static void update_cpuid(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-       struct kvm_lapic *apic = vcpu->arch.apic;
-
-       best = kvm_find_cpuid_entry(vcpu, 1, 0);
-       if (!best)
-               return;
-
-       /* Update OSXSAVE bit */
-       if (cpu_has_xsave && best->function == 0x1) {
-               best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
-               if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
-                       best->ecx |= bit(X86_FEATURE_OSXSAVE);
-       }
-
-       if (apic) {
-               if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
-                       apic->lapic_timer.timer_mode_mask = 3 << 17;
-               else
-                       apic->lapic_timer.timer_mode_mask = 1 << 17;
-       }
-}
-
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
        unsigned long old_cr4 = kvm_read_cr4(vcpu);
@@ -655,7 +606,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                kvm_mmu_reset_context(vcpu);
 
        if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
-               update_cpuid(vcpu);
+               kvm_update_cpuid(vcpu);
 
        return 0;
 }
@@ -809,6 +760,21 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
 }
 EXPORT_SYMBOL_GPL(kvm_get_dr);
 
+bool kvm_rdpmc(struct kvm_vcpu *vcpu)
+{
+       u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+       u64 data;
+       int err;
+
+       err = kvm_pmu_read_pmc(vcpu, ecx, &data);
+       if (err)
+               return err;
+       kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
+       kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
+       return err;
+}
+EXPORT_SYMBOL_GPL(kvm_rdpmc);
+
 /*
  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
@@ -1358,12 +1324,11 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
        if (page_num >= blob_size)
                goto out;
        r = -ENOMEM;
-       page = kzalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!page)
+       page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
+       if (IS_ERR(page)) {
+               r = PTR_ERR(page);
                goto out;
-       r = -EFAULT;
-       if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
-               goto out_free;
+       }
        if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
                goto out_free;
        r = 0;
@@ -1652,8 +1617,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
         * which we perfectly emulate ;-). Any other value should be at least
         * reported, some guests depend on them.
         */
-       case MSR_P6_EVNTSEL0:
-       case MSR_P6_EVNTSEL1:
        case MSR_K7_EVNTSEL0:
        case MSR_K7_EVNTSEL1:
        case MSR_K7_EVNTSEL2:
@@ -1665,8 +1628,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
        /* at least RHEL 4 unconditionally writes to the perfctr registers,
         * so we ignore writes to make it happy.
         */
-       case MSR_P6_PERFCTR0:
-       case MSR_P6_PERFCTR1:
        case MSR_K7_PERFCTR0:
        case MSR_K7_PERFCTR1:
        case MSR_K7_PERFCTR2:
@@ -1703,6 +1664,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
        default:
                if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
                        return xen_hvm_config(vcpu, data);
+               if (kvm_pmu_msr(vcpu, msr))
+                       return kvm_pmu_set_msr(vcpu, msr, data);
                if (!ignore_msrs) {
                        pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
                                msr, data);
@@ -1865,10 +1828,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_K8_SYSCFG:
        case MSR_K7_HWCR:
        case MSR_VM_HSAVE_PA:
-       case MSR_P6_PERFCTR0:
-       case MSR_P6_PERFCTR1:
-       case MSR_P6_EVNTSEL0:
-       case MSR_P6_EVNTSEL1:
        case MSR_K7_EVNTSEL0:
        case MSR_K7_PERFCTR0:
        case MSR_K8_INT_PENDING_MSG:
@@ -1979,6 +1938,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
                data = 0xbe702111;
                break;
        default:
+               if (kvm_pmu_msr(vcpu, msr))
+                       return kvm_pmu_get_msr(vcpu, msr, pdata);
                if (!ignore_msrs) {
                        pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
                        return 1;
@@ -2037,15 +1998,12 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
        if (msrs.nmsrs >= MAX_IO_MSRS)
                goto out;
 
-       r = -ENOMEM;
        size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
-       entries = kmalloc(size, GFP_KERNEL);
-       if (!entries)
+       entries = memdup_user(user_msrs->entries, size);
+       if (IS_ERR(entries)) {
+               r = PTR_ERR(entries);
                goto out;
-
-       r = -EFAULT;
-       if (copy_from_user(entries, user_msrs->entries, size))
-               goto out_free;
+       }
 
        r = n = __msr_io(vcpu, &msrs, entries, do_msr);
        if (r < 0)
@@ -2265,466 +2223,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
 }
 
-static int is_efer_nx(void)
-{
-       unsigned long long efer = 0;
-
-       rdmsrl_safe(MSR_EFER, &efer);
-       return efer & EFER_NX;
-}
-
-static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
-{
-       int i;
-       struct kvm_cpuid_entry2 *e, *entry;
-
-       entry = NULL;
-       for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
-               e = &vcpu->arch.cpuid_entries[i];
-               if (e->function == 0x80000001) {
-                       entry = e;
-                       break;
-               }
-       }
-       if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
-               entry->edx &= ~(1 << 20);
-               printk(KERN_INFO "kvm: guest NX capability removed\n");
-       }
-}
-
-/* when an old userspace process fills a new kernel module */
-static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
-                                   struct kvm_cpuid *cpuid,
-                                   struct kvm_cpuid_entry __user *entries)
-{
-       int r, i;
-       struct kvm_cpuid_entry *cpuid_entries;
-
-       r = -E2BIG;
-       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
-               goto out;
-       r = -ENOMEM;
-       cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
-       if (!cpuid_entries)
-               goto out;
-       r = -EFAULT;
-       if (copy_from_user(cpuid_entries, entries,
-                          cpuid->nent * sizeof(struct kvm_cpuid_entry)))
-               goto out_free;
-       for (i = 0; i < cpuid->nent; i++) {
-               vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
-               vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
-               vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
-               vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
-               vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
-               vcpu->arch.cpuid_entries[i].index = 0;
-               vcpu->arch.cpuid_entries[i].flags = 0;
-               vcpu->arch.cpuid_entries[i].padding[0] = 0;
-               vcpu->arch.cpuid_entries[i].padding[1] = 0;
-               vcpu->arch.cpuid_entries[i].padding[2] = 0;
-       }
-       vcpu->arch.cpuid_nent = cpuid->nent;
-       cpuid_fix_nx_cap(vcpu);
-       r = 0;
-       kvm_apic_set_version(vcpu);
-       kvm_x86_ops->cpuid_update(vcpu);
-       update_cpuid(vcpu);
-
-out_free:
-       vfree(cpuid_entries);
-out:
-       return r;
-}
-
-static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
-                                    struct kvm_cpuid2 *cpuid,
-                                    struct kvm_cpuid_entry2 __user *entries)
-{
-       int r;
-
-       r = -E2BIG;
-       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
-               goto out;
-       r = -EFAULT;
-       if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
-                          cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
-               goto out;
-       vcpu->arch.cpuid_nent = cpuid->nent;
-       kvm_apic_set_version(vcpu);
-       kvm_x86_ops->cpuid_update(vcpu);
-       update_cpuid(vcpu);
-       return 0;
-
-out:
-       return r;
-}
-
-static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
-                                    struct kvm_cpuid2 *cpuid,
-                                    struct kvm_cpuid_entry2 __user *entries)
-{
-       int r;
-
-       r = -E2BIG;
-       if (cpuid->nent < vcpu->arch.cpuid_nent)
-               goto out;
-       r = -EFAULT;
-       if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
-                        vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
-               goto out;
-       return 0;
-
-out:
-       cpuid->nent = vcpu->arch.cpuid_nent;
-       return r;
-}
-
-static void cpuid_mask(u32 *word, int wordnum)
-{
-       *word &= boot_cpu_data.x86_capability[wordnum];
-}
-
-static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
-                          u32 index)
-{
-       entry->function = function;
-       entry->index = index;
-       cpuid_count(entry->function, entry->index,
-                   &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
-       entry->flags = 0;
-}
-
-static bool supported_xcr0_bit(unsigned bit)
-{
-       u64 mask = ((u64)1 << bit);
-
-       return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0;
-}
-
-#define F(x) bit(X86_FEATURE_##x)
-
-static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
-                        u32 index, int *nent, int maxnent)
-{
-       unsigned f_nx = is_efer_nx() ? F(NX) : 0;
-#ifdef CONFIG_X86_64
-       unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
-                               ? F(GBPAGES) : 0;
-       unsigned f_lm = F(LM);
-#else
-       unsigned f_gbpages = 0;
-       unsigned f_lm = 0;
-#endif
-       unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
-
-       /* cpuid 1.edx */
-       const u32 kvm_supported_word0_x86_features =
-               F(FPU) | F(VME) | F(DE) | F(PSE) |
-               F(TSC) | F(MSR) | F(PAE) | F(MCE) |
-               F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
-               F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
-               F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
-               0 /* Reserved, DS, ACPI */ | F(MMX) |
-               F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
-               0 /* HTT, TM, Reserved, PBE */;
-       /* cpuid 0x80000001.edx */
-       const u32 kvm_supported_word1_x86_features =
-               F(FPU) | F(VME) | F(DE) | F(PSE) |
-               F(TSC) | F(MSR) | F(PAE) | F(MCE) |
-               F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
-               F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
-               F(PAT) | F(PSE36) | 0 /* Reserved */ |
-               f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
-               F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
-               0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
-       /* cpuid 1.ecx */
-       const u32 kvm_supported_word4_x86_features =
-               F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
-               0 /* DS-CPL, VMX, SMX, EST */ |
-               0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
-               0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
-               0 /* Reserved, DCA */ | F(XMM4_1) |
-               F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
-               0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
-               F(F16C) | F(RDRAND);
-       /* cpuid 0x80000001.ecx */
-       const u32 kvm_supported_word6_x86_features =
-               F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
-               F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
-               F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
-               0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
-
-       /* cpuid 0xC0000001.edx */
-       const u32 kvm_supported_word5_x86_features =
-               F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
-               F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
-               F(PMM) | F(PMM_EN);
-
-       /* cpuid 7.0.ebx */
-       const u32 kvm_supported_word9_x86_features =
-               F(SMEP) | F(FSGSBASE) | F(ERMS);
-
-       /* all calls to cpuid_count() should be made on the same cpu */
-       get_cpu();
-       do_cpuid_1_ent(entry, function, index);
-       ++*nent;
-
-       switch (function) {
-       case 0:
-               entry->eax = min(entry->eax, (u32)0xd);
-               break;
-       case 1:
-               entry->edx &= kvm_supported_word0_x86_features;
-               cpuid_mask(&entry->edx, 0);
-               entry->ecx &= kvm_supported_word4_x86_features;
-               cpuid_mask(&entry->ecx, 4);
-               /* we support x2apic emulation even if host does not support
-                * it since we emulate x2apic in software */
-               entry->ecx |= F(X2APIC);
-               break;
-       /* function 2 entries are STATEFUL. That is, repeated cpuid commands
-        * may return different values. This forces us to get_cpu() before
-        * issuing the first command, and also to emulate this annoying behavior
-        * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
-       case 2: {
-               int t, times = entry->eax & 0xff;
-
-               entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
-               entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
-               for (t = 1; t < times && *nent < maxnent; ++t) {
-                       do_cpuid_1_ent(&entry[t], function, 0);
-                       entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
-                       ++*nent;
-               }
-               break;
-       }
-       /* function 4 has additional index. */
-       case 4: {
-               int i, cache_type;
-
-               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
-               /* read more entries until cache_type is zero */
-               for (i = 1; *nent < maxnent; ++i) {
-                       cache_type = entry[i - 1].eax & 0x1f;
-                       if (!cache_type)
-                               break;
-                       do_cpuid_1_ent(&entry[i], function, i);
-                       entry[i].flags |=
-                              KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
-                       ++*nent;
-               }
-               break;
-       }
-       case 7: {
-               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
-               /* Mask ebx against host capbability word 9 */
-               if (index == 0) {
-                       entry->ebx &= kvm_supported_word9_x86_features;
-                       cpuid_mask(&entry->ebx, 9);
-               } else
-                       entry->ebx = 0;
-               entry->eax = 0;
-               entry->ecx = 0;
-               entry->edx = 0;
-               break;
-       }
-       case 9:
-               break;
-       /* function 0xb has additional index. */
-       case 0xb: {
-               int i, level_type;
-
-               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
-               /* read more entries until level_type is zero */
-               for (i = 1; *nent < maxnent; ++i) {
-                       level_type = entry[i - 1].ecx & 0xff00;
-                       if (!level_type)
-                               break;
-                       do_cpuid_1_ent(&entry[i], function, i);
-                       entry[i].flags |=
-                              KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
-                       ++*nent;
-               }
-               break;
-       }
-       case 0xd: {
-               int idx, i;
-
-               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
-               for (idx = 1, i = 1; *nent < maxnent && idx < 64; ++idx) {
-                       do_cpuid_1_ent(&entry[i], function, idx);
-                       if (entry[i].eax == 0 || !supported_xcr0_bit(idx))
-                               continue;
-                       entry[i].flags |=
-                              KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
-                       ++*nent;
-                       ++i;
-               }
-               break;
-       }
-       case KVM_CPUID_SIGNATURE: {
-               char signature[12] = "KVMKVMKVM\0\0";
-               u32 *sigptr = (u32 *)signature;
-               entry->eax = 0;
-               entry->ebx = sigptr[0];
-               entry->ecx = sigptr[1];
-               entry->edx = sigptr[2];
-               break;
-       }
-       case KVM_CPUID_FEATURES:
-               entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
-                            (1 << KVM_FEATURE_NOP_IO_DELAY) |
-                            (1 << KVM_FEATURE_CLOCKSOURCE2) |
-                            (1 << KVM_FEATURE_ASYNC_PF) |
-                            (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
-
-               if (sched_info_on())
-                       entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
-
-               entry->ebx = 0;
-               entry->ecx = 0;
-               entry->edx = 0;
-               break;
-       case 0x80000000:
-               entry->eax = min(entry->eax, 0x8000001a);
-               break;
-       case 0x80000001:
-               entry->edx &= kvm_supported_word1_x86_features;
-               cpuid_mask(&entry->edx, 1);
-               entry->ecx &= kvm_supported_word6_x86_features;
-               cpuid_mask(&entry->ecx, 6);
-               break;
-       case 0x80000008: {
-               unsigned g_phys_as = (entry->eax >> 16) & 0xff;
-               unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
-               unsigned phys_as = entry->eax & 0xff;
-
-               if (!g_phys_as)
-                       g_phys_as = phys_as;
-               entry->eax = g_phys_as | (virt_as << 8);
-               entry->ebx = entry->edx = 0;
-               break;
-       }
-       case 0x80000019:
-               entry->ecx = entry->edx = 0;
-               break;
-       case 0x8000001a:
-               break;
-       case 0x8000001d:
-               break;
-       /*Add support for Centaur's CPUID instruction*/
-       case 0xC0000000:
-               /*Just support up to 0xC0000004 now*/
-               entry->eax = min(entry->eax, 0xC0000004);
-               break;
-       case 0xC0000001:
-               entry->edx &= kvm_supported_word5_x86_features;
-               cpuid_mask(&entry->edx, 5);
-               break;
-       case 3: /* Processor serial number */
-       case 5: /* MONITOR/MWAIT */
-       case 6: /* Thermal management */
-       case 0xA: /* Architectural Performance Monitoring */
-       case 0x80000007: /* Advanced power management */
-       case 0xC0000002:
-       case 0xC0000003:
-       case 0xC0000004:
-       default:
-               entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
-               break;
-       }
-
-       kvm_x86_ops->set_supported_cpuid(function, entry);
-
-       put_cpu();
-}
-
-#undef F
-
-static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
-                                    struct kvm_cpuid_entry2 __user *entries)
-{
-       struct kvm_cpuid_entry2 *cpuid_entries;
-       int limit, nent = 0, r = -E2BIG;
-       u32 func;
-
-       if (cpuid->nent < 1)
-               goto out;
-       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
-               cpuid->nent = KVM_MAX_CPUID_ENTRIES;
-       r = -ENOMEM;
-       cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
-       if (!cpuid_entries)
-               goto out;
-
-       do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
-       limit = cpuid_entries[0].eax;
-       for (func = 1; func <= limit && nent < cpuid->nent; ++func)
-               do_cpuid_ent(&cpuid_entries[nent], func, 0,
-                            &nent, cpuid->nent);
-       r = -E2BIG;
-       if (nent >= cpuid->nent)
-               goto out_free;
-
-       do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
-       limit = cpuid_entries[nent - 1].eax;
-       for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
-               do_cpuid_ent(&cpuid_entries[nent], func, 0,
-                            &nent, cpuid->nent);
-
-
-
-       r = -E2BIG;
-       if (nent >= cpuid->nent)
-               goto out_free;
-
-       /* Add support for Centaur's CPUID instruction. */
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) {
-               do_cpuid_ent(&cpuid_entries[nent], 0xC0000000, 0,
-                               &nent, cpuid->nent);
-
-               r = -E2BIG;
-               if (nent >= cpuid->nent)
-                       goto out_free;
-
-               limit = cpuid_entries[nent - 1].eax;
-               for (func = 0xC0000001;
-                       func <= limit && nent < cpuid->nent; ++func)
-                       do_cpuid_ent(&cpuid_entries[nent], func, 0,
-                                       &nent, cpuid->nent);
-
-               r = -E2BIG;
-               if (nent >= cpuid->nent)
-                       goto out_free;
-       }
-
-       do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
-                    cpuid->nent);
-
-       r = -E2BIG;
-       if (nent >= cpuid->nent)
-               goto out_free;
-
-       do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
-                    cpuid->nent);
-
-       r = -E2BIG;
-       if (nent >= cpuid->nent)
-               goto out_free;
-
-       r = -EFAULT;
-       if (copy_to_user(entries, cpuid_entries,
-                        nent * sizeof(struct kvm_cpuid_entry2)))
-               goto out_free;
-       cpuid->nent = nent;
-       r = 0;
-
-out_free:
-       vfree(cpuid_entries);
-out:
-       return r;
-}
-
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
                                    struct kvm_lapic_state *s)
 {
@@ -3042,13 +2540,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = -EINVAL;
                if (!vcpu->arch.apic)
                        goto out;
-               u.lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
-               r = -ENOMEM;
-               if (!u.lapic)
-                       goto out;
-               r = -EFAULT;
-               if (copy_from_user(u.lapic, argp, sizeof(struct kvm_lapic_state)))
+               u.lapic = memdup_user(argp, sizeof(*u.lapic));
+               if (IS_ERR(u.lapic)) {
+                       r = PTR_ERR(u.lapic);
                        goto out;
+               }
+
                r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
                if (r)
                        goto out;
@@ -3227,14 +2724,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                break;
        }
        case KVM_SET_XSAVE: {
-               u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
-               r = -ENOMEM;
-               if (!u.xsave)
-                       break;
-
-               r = -EFAULT;
-               if (copy_from_user(u.xsave, argp, sizeof(struct kvm_xsave)))
-                       break;
+               u.xsave = memdup_user(argp, sizeof(*u.xsave));
+               if (IS_ERR(u.xsave)) {
+                       r = PTR_ERR(u.xsave);
+                       goto out;
+               }
 
                r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
                break;
@@ -3255,15 +2749,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                break;
        }
        case KVM_SET_XCRS: {
-               u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
-               r = -ENOMEM;
-               if (!u.xcrs)
-                       break;
-
-               r = -EFAULT;
-               if (copy_from_user(u.xcrs, argp,
-                                  sizeof(struct kvm_xcrs)))
-                       break;
+               u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
+               if (IS_ERR(u.xcrs)) {
+                       r = PTR_ERR(u.xcrs);
+                       goto out;
+               }
 
                r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
                break;
@@ -3460,16 +2950,59 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
        return 0;
 }
 
+/**
+ * write_protect_slot - write protect a slot for dirty logging
+ * @kvm: the kvm instance
+ * @memslot: the slot we protect
+ * @dirty_bitmap: the bitmap indicating which pages are dirty
+ * @nr_dirty_pages: the number of dirty pages
+ *
+ * We have two ways to find all sptes to protect:
+ * 1. Use kvm_mmu_slot_remove_write_access() which walks all shadow pages and
+ *    checks ones that have a spte mapping a page in the slot.
+ * 2. Use kvm_mmu_rmap_write_protect() for each gfn found in the bitmap.
+ *
+ * Generally speaking, if there are not so many dirty pages compared to the
+ * number of shadow pages, we should use the latter.
+ *
+ * Note that letting others write into a page marked dirty in the old bitmap
+ * by using the remaining tlb entry is not a problem.  That page will become
+ * write protected again when we flush the tlb and then be reported dirty to
+ * the user space by copying the old bitmap.
+ */
+static void write_protect_slot(struct kvm *kvm,
+                              struct kvm_memory_slot *memslot,
+                              unsigned long *dirty_bitmap,
+                              unsigned long nr_dirty_pages)
+{
+       /* Not many dirty pages compared to # of shadow pages. */
+       if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) {
+               unsigned long gfn_offset;
+
+               for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) {
+                       unsigned long gfn = memslot->base_gfn + gfn_offset;
+
+                       spin_lock(&kvm->mmu_lock);
+                       kvm_mmu_rmap_write_protect(kvm, gfn, memslot);
+                       spin_unlock(&kvm->mmu_lock);
+               }
+               kvm_flush_remote_tlbs(kvm);
+       } else {
+               spin_lock(&kvm->mmu_lock);
+               kvm_mmu_slot_remove_write_access(kvm, memslot->id);
+               spin_unlock(&kvm->mmu_lock);
+       }
+}
+
 /*
  * Get (and clear) the dirty memory log for a memory slot.
  */
 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                                      struct kvm_dirty_log *log)
 {
-       int r, i;
+       int r;
        struct kvm_memory_slot *memslot;
-       unsigned long n;
-       unsigned long is_dirty = 0;
+       unsigned long n, nr_dirty_pages;
 
        mutex_lock(&kvm->slots_lock);
 
@@ -3477,43 +3010,41 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
        if (log->slot >= KVM_MEMORY_SLOTS)
                goto out;
 
-       memslot = &kvm->memslots->memslots[log->slot];
+       memslot = id_to_memslot(kvm->memslots, log->slot);
        r = -ENOENT;
        if (!memslot->dirty_bitmap)
                goto out;
 
        n = kvm_dirty_bitmap_bytes(memslot);
-
-       for (i = 0; !is_dirty && i < n/sizeof(long); i++)
-               is_dirty = memslot->dirty_bitmap[i];
+       nr_dirty_pages = memslot->nr_dirty_pages;
 
        /* If nothing is dirty, don't bother messing with page tables. */
-       if (is_dirty) {
+       if (nr_dirty_pages) {
                struct kvm_memslots *slots, *old_slots;
-               unsigned long *dirty_bitmap;
+               unsigned long *dirty_bitmap, *dirty_bitmap_head;
 
-               dirty_bitmap = memslot->dirty_bitmap_head;
-               if (memslot->dirty_bitmap == dirty_bitmap)
-                       dirty_bitmap += n / sizeof(long);
-               memset(dirty_bitmap, 0, n);
+               dirty_bitmap = memslot->dirty_bitmap;
+               dirty_bitmap_head = memslot->dirty_bitmap_head;
+               if (dirty_bitmap == dirty_bitmap_head)
+                       dirty_bitmap_head += n / sizeof(long);
+               memset(dirty_bitmap_head, 0, n);
 
                r = -ENOMEM;
-               slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+               slots = kmemdup(kvm->memslots, sizeof(*kvm->memslots), GFP_KERNEL);
                if (!slots)
                        goto out;
-               memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
-               slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
-               slots->generation++;
+
+               memslot = id_to_memslot(slots, log->slot);
+               memslot->nr_dirty_pages = 0;
+               memslot->dirty_bitmap = dirty_bitmap_head;
+               update_memslots(slots, NULL);
 
                old_slots = kvm->memslots;
                rcu_assign_pointer(kvm->memslots, slots);
                synchronize_srcu_expedited(&kvm->srcu);
-               dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
                kfree(old_slots);
 
-               spin_lock(&kvm->mmu_lock);
-               kvm_mmu_slot_remove_write_access(kvm, log->slot);
-               spin_unlock(&kvm->mmu_lock);
+               write_protect_slot(kvm, memslot, dirty_bitmap, nr_dirty_pages);
 
                r = -EFAULT;
                if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
@@ -3658,14 +3189,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
        }
        case KVM_GET_IRQCHIP: {
                /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
-               struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
+               struct kvm_irqchip *chip;
 
-               r = -ENOMEM;
-               if (!chip)
+               chip = memdup_user(argp, sizeof(*chip));
+               if (IS_ERR(chip)) {
+                       r = PTR_ERR(chip);
                        goto out;
-               r = -EFAULT;
-               if (copy_from_user(chip, argp, sizeof *chip))
-                       goto get_irqchip_out;
+               }
+
                r = -ENXIO;
                if (!irqchip_in_kernel(kvm))
                        goto get_irqchip_out;
@@ -3684,14 +3215,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
        }
        case KVM_SET_IRQCHIP: {
                /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
-               struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
+               struct kvm_irqchip *chip;
 
-               r = -ENOMEM;
-               if (!chip)
+               chip = memdup_user(argp, sizeof(*chip));
+               if (IS_ERR(chip)) {
+                       r = PTR_ERR(chip);
                        goto out;
-               r = -EFAULT;
-               if (copy_from_user(chip, argp, sizeof *chip))
-                       goto set_irqchip_out;
+               }
+
                r = -ENXIO;
                if (!irqchip_in_kernel(kvm))
                        goto set_irqchip_out;
@@ -3898,12 +3429,7 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
        kvm_x86_ops->get_segment(vcpu, var, seg);
 }
 
-static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
-{
-       return gpa;
-}
-
-static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
+gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
 {
        gpa_t t_gpa;
        struct x86_exception exception;
@@ -4087,7 +3613,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
        ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
        if (ret < 0)
                return 0;
-       kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
+       kvm_mmu_pte_write(vcpu, gpa, val, bytes);
        return 1;
 }
 
@@ -4324,7 +3850,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
        if (!exchanged)
                return X86EMUL_CMPXCHG_FAILED;
 
-       kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
+       kvm_mmu_pte_write(vcpu, gpa, new, bytes);
 
        return X86EMUL_CONTINUE;
 
@@ -4349,32 +3875,24 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
        return r;
 }
 
-
-static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
-                                   int size, unsigned short port, void *val,
-                                   unsigned int count)
+static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
+                              unsigned short port, void *val,
+                              unsigned int count, bool in)
 {
-       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-
-       if (vcpu->arch.pio.count)
-               goto data_avail;
-
-       trace_kvm_pio(0, port, size, count);
+       trace_kvm_pio(!in, port, size, count);
 
        vcpu->arch.pio.port = port;
-       vcpu->arch.pio.in = 1;
+       vcpu->arch.pio.in = in;
        vcpu->arch.pio.count  = count;
        vcpu->arch.pio.size = size;
 
        if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
-       data_avail:
-               memcpy(val, vcpu->arch.pio_data, size * count);
                vcpu->arch.pio.count = 0;
                return 1;
        }
 
        vcpu->run->exit_reason = KVM_EXIT_IO;
-       vcpu->run->io.direction = KVM_EXIT_IO_IN;
+       vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
        vcpu->run->io.size = size;
        vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
        vcpu->run->io.count = count;
@@ -4383,36 +3901,37 @@ static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
        return 0;
 }
 
-static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
-                                    int size, unsigned short port,
-                                    const void *val, unsigned int count)
+static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
+                                   int size, unsigned short port, void *val,
+                                   unsigned int count)
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+       int ret;
 
-       trace_kvm_pio(1, port, size, count);
-
-       vcpu->arch.pio.port = port;
-       vcpu->arch.pio.in = 0;
-       vcpu->arch.pio.count = count;
-       vcpu->arch.pio.size = size;
-
-       memcpy(vcpu->arch.pio_data, val, size * count);
+       if (vcpu->arch.pio.count)
+               goto data_avail;
 
-       if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
+       ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
+       if (ret) {
+data_avail:
+               memcpy(val, vcpu->arch.pio_data, size * count);
                vcpu->arch.pio.count = 0;
                return 1;
        }
 
-       vcpu->run->exit_reason = KVM_EXIT_IO;
-       vcpu->run->io.direction = KVM_EXIT_IO_OUT;
-       vcpu->run->io.size = size;
-       vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
-       vcpu->run->io.count = count;
-       vcpu->run->io.port = port;
-
        return 0;
 }
 
+static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
+                                    int size, unsigned short port,
+                                    const void *val, unsigned int count)
+{
+       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+       memcpy(vcpu->arch.pio_data, val, size * count);
+       return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
+}
+
 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
 {
        return kvm_x86_ops->get_segment_base(vcpu, seg);
@@ -4627,6 +4146,12 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
        return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
 }
 
+static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
+                            u32 pmc, u64 *pdata)
+{
+       return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata);
+}
+
 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
 {
        emul_to_vcpu(ctxt)->arch.halt_request = 1;
@@ -4679,6 +4204,7 @@ static struct x86_emulate_ops emulate_ops = {
        .set_dr              = emulator_set_dr,
        .set_msr             = emulator_set_msr,
        .get_msr             = emulator_get_msr,
+       .read_pmc            = emulator_read_pmc,
        .halt                = emulator_halt,
        .wbinvd              = emulator_wbinvd,
        .fix_hypercall       = emulator_fix_hypercall,
@@ -4836,6 +4362,50 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
        return false;
 }
 
+static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
+                             unsigned long cr2,  int emulation_type)
+{
+       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+       unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
+
+       last_retry_eip = vcpu->arch.last_retry_eip;
+       last_retry_addr = vcpu->arch.last_retry_addr;
+
+       /*
+        * If the emulation is caused by #PF and it is non-page_table
+        * writing instruction, it means the VM-EXIT is caused by shadow
+        * page protected, we can zap the shadow page and retry this
+        * instruction directly.
+        *
+        * Note: if the guest uses a non-page-table modifying instruction
+        * on the PDE that points to the instruction, then we will unmap
+        * the instruction and go to an infinite loop. So, we cache the
+        * last retried eip and the last fault address, if we meet the eip
+        * and the address again, we can break out of the potential infinite
+        * loop.
+        */
+       vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
+
+       if (!(emulation_type & EMULTYPE_RETRY))
+               return false;
+
+       if (x86_page_table_writing_insn(ctxt))
+               return false;
+
+       if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
+               return false;
+
+       vcpu->arch.last_retry_eip = ctxt->eip;
+       vcpu->arch.last_retry_addr = cr2;
+
+       if (!vcpu->arch.mmu.direct_map)
+               gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
+
+       kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+
+       return true;
+}
+
 int x86_emulate_instruction(struct kvm_vcpu *vcpu,
                            unsigned long cr2,
                            int emulation_type,
@@ -4877,6 +4447,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
                return EMULATE_DONE;
        }
 
+       if (retry_instruction(ctxt, cr2, emulation_type))
+               return EMULATE_DONE;
+
        /* this is needed for vmware backdoor interface to work since it
           changes registers values  during IO operation */
        if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
@@ -5095,17 +4668,17 @@ static void kvm_timer_init(void)
 
 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
 
-static int kvm_is_in_guest(void)
+int kvm_is_in_guest(void)
 {
-       return percpu_read(current_vcpu) != NULL;
+       return __this_cpu_read(current_vcpu) != NULL;
 }
 
 static int kvm_is_user_mode(void)
 {
        int user_mode = 3;
 
-       if (percpu_read(current_vcpu))
-               user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
+       if (__this_cpu_read(current_vcpu))
+               user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
 
        return user_mode != 0;
 }
@@ -5114,8 +4687,8 @@ static unsigned long kvm_get_guest_ip(void)
 {
        unsigned long ip = 0;
 
-       if (percpu_read(current_vcpu))
-               ip = kvm_rip_read(percpu_read(current_vcpu));
+       if (__this_cpu_read(current_vcpu))
+               ip = kvm_rip_read(__this_cpu_read(current_vcpu));
 
        return ip;
 }
@@ -5128,13 +4701,13 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
 
 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
 {
-       percpu_write(current_vcpu, vcpu);
+       __this_cpu_write(current_vcpu, vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
 
 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
 {
-       percpu_write(current_vcpu, NULL);
+       __this_cpu_write(current_vcpu, NULL);
 }
 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
 
@@ -5233,15 +4806,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
 
-static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
-                          unsigned long a1)
-{
-       if (is_long_mode(vcpu))
-               return a0;
-       else
-               return a0 | ((gpa_t)a1 << 32);
-}
-
 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
 {
        u64 param, ingpa, outgpa, ret;
@@ -5337,9 +4901,6 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
        case KVM_HC_VAPIC_POLL_IRQ:
                ret = 0;
                break;
-       case KVM_HC_MMU_OP:
-               r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
-               break;
        default:
                ret = -KVM_ENOSYS;
                break;
@@ -5369,125 +4930,6 @@ int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
        return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
 }
 
-static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
-{
-       struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
-       int j, nent = vcpu->arch.cpuid_nent;
-
-       e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
-       /* when no next entry is found, the current entry[i] is reselected */
-       for (j = i + 1; ; j = (j + 1) % nent) {
-               struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
-               if (ej->function == e->function) {
-                       ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
-                       return j;
-               }
-       }
-       return 0; /* silence gcc, even though control never reaches here */
-}
-
-/* find an entry with matching function, matching index (if needed), and that
- * should be read next (if it's stateful) */
-static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
-       u32 function, u32 index)
-{
-       if (e->function != function)
-               return 0;
-       if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
-               return 0;
-       if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
-           !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
-               return 0;
-       return 1;
-}
-
-struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
-                                             u32 function, u32 index)
-{
-       int i;
-       struct kvm_cpuid_entry2 *best = NULL;
-
-       for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
-               struct kvm_cpuid_entry2 *e;
-
-               e = &vcpu->arch.cpuid_entries[i];
-               if (is_matching_cpuid_entry(e, function, index)) {
-                       if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
-                               move_to_next_stateful_cpuid_entry(vcpu, i);
-                       best = e;
-                       break;
-               }
-       }
-       return best;
-}
-EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
-
-int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
-       if (!best || best->eax < 0x80000008)
-               goto not_found;
-       best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
-       if (best)
-               return best->eax & 0xff;
-not_found:
-       return 36;
-}
-
-/*
- * If no match is found, check whether we exceed the vCPU's limit
- * and return the content of the highest valid _standard_ leaf instead.
- * This is to satisfy the CPUID specification.
- */
-static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
-                                                  u32 function, u32 index)
-{
-       struct kvm_cpuid_entry2 *maxlevel;
-
-       maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
-       if (!maxlevel || maxlevel->eax >= function)
-               return NULL;
-       if (function & 0x80000000) {
-               maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
-               if (!maxlevel)
-                       return NULL;
-       }
-       return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
-}
-
-void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
-{
-       u32 function, index;
-       struct kvm_cpuid_entry2 *best;
-
-       function = kvm_register_read(vcpu, VCPU_REGS_RAX);
-       index = kvm_register_read(vcpu, VCPU_REGS_RCX);
-       kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
-       kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
-       kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
-       kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
-       best = kvm_find_cpuid_entry(vcpu, function, index);
-
-       if (!best)
-               best = check_cpuid_limit(vcpu, function, index);
-
-       if (best) {
-               kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
-               kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
-               kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
-               kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
-       }
-       kvm_x86_ops->skip_emulated_instruction(vcpu);
-       trace_kvm_cpuid(function,
-                       kvm_register_read(vcpu, VCPU_REGS_RAX),
-                       kvm_register_read(vcpu, VCPU_REGS_RBX),
-                       kvm_register_read(vcpu, VCPU_REGS_RCX),
-                       kvm_register_read(vcpu, VCPU_REGS_RDX));
-}
-EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
-
 /*
  * Check if userspace requested an interrupt window, and that the
  * interrupt window is open.
@@ -5648,6 +5090,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        int r;
        bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
                vcpu->run->request_interrupt_window;
+       bool req_immediate_exit = 0;
 
        if (vcpu->requests) {
                if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
@@ -5687,7 +5130,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        record_steal_time(vcpu);
                if (kvm_check_request(KVM_REQ_NMI, vcpu))
                        process_nmi(vcpu);
-
+               req_immediate_exit =
+                       kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
+               if (kvm_check_request(KVM_REQ_PMU, vcpu))
+                       kvm_handle_pmu_event(vcpu);
+               if (kvm_check_request(KVM_REQ_PMI, vcpu))
+                       kvm_deliver_pmi(vcpu);
        }
 
        r = kvm_mmu_reload(vcpu);
@@ -5738,6 +5186,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 
+       if (req_immediate_exit)
+               smp_send_reschedule(vcpu->cpu);
+
        kvm_guest_enter();
 
        if (unlikely(vcpu->arch.switch_db_regs)) {
@@ -5943,10 +5394,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        if (r <= 0)
                goto out;
 
-       if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
-               kvm_register_write(vcpu, VCPU_REGS_RAX,
-                                    kvm_run->hypercall.ret);
-
        r = __vcpu_run(vcpu);
 
 out:
@@ -6148,7 +5595,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
        kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
        if (sregs->cr4 & X86_CR4_OSXSAVE)
-               update_cpuid(vcpu);
+               kvm_update_cpuid(vcpu);
 
        idx = srcu_read_lock(&vcpu->kvm->srcu);
        if (!is_long_mode(vcpu) && is_pae(vcpu)) {
@@ -6425,6 +5872,8 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
        kvm_async_pf_hash_reset(vcpu);
        vcpu->arch.apf.halted = false;
 
+       kvm_pmu_reset(vcpu);
+
        return kvm_x86_ops->vcpu_reset(vcpu);
 }
 
@@ -6473,10 +5922,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        kvm = vcpu->kvm;
 
        vcpu->arch.emulate_ctxt.ops = &emulate_ops;
-       vcpu->arch.walk_mmu = &vcpu->arch.mmu;
-       vcpu->arch.mmu.root_hpa = INVALID_PAGE;
-       vcpu->arch.mmu.translate_gpa = translate_gpa;
-       vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
        if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
        else
@@ -6513,6 +5958,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
                goto fail_free_mce_banks;
 
        kvm_async_pf_hash_reset(vcpu);
+       kvm_pmu_init(vcpu);
 
        return 0;
 fail_free_mce_banks:
@@ -6531,6 +5977,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 {
        int idx;
 
+       kvm_pmu_destroy(vcpu);
        kfree(vcpu->arch.mce_banks);
        kvm_free_lapic(vcpu);
        idx = srcu_read_lock(&vcpu->kvm->srcu);
index d36fe237c6659ff3445772cce4a505499173b4e8..cb80c293cdd8ea1b26dbbaa050c1c5c43b1fd1ab 100644 (file)
@@ -33,9 +33,6 @@ static inline bool kvm_exception_is_soft(unsigned int nr)
        return (nr == BP_VECTOR) || (nr == OF_VECTOR);
 }
 
-struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
-                                             u32 function, u32 index);
-
 static inline bool is_protmode(struct kvm_vcpu *vcpu)
 {
        return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
@@ -125,4 +122,6 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
        gva_t addr, void *val, unsigned int bytes,
        struct x86_exception *exception);
 
+extern u64 host_xcr0;
+
 #endif
index 26c731a106afd11b8db6a37577a84b36a221aff0..fdce49c7aff6cf2ce931f25154d8e7c0ada8338c 100644 (file)
@@ -29,7 +29,8 @@ config XEN_PVHVM
 
 config XEN_MAX_DOMAIN_MEMORY
        int
-       default 128
+       default 500 if X86_64
+       default 64 if X86_32
        depends on XEN
        help
          This only affects the sizing of some bss arrays, the unused
@@ -48,3 +49,4 @@ config XEN_DEBUG_FS
        help
          Enable statistics output and various tuning options in debugfs.
          Enabling this option may incur a significant performance overhead.
+
index 5a40d24ba3316b85b42e5ef5d00280b89a2033b9..3a5f55d51907aa16a8a58acfce22436f56366951 100644 (file)
@@ -54,6 +54,20 @@ static int map_pte_fn(pte_t *pte, struct page *pmd_page,
        return 0;
 }
 
+/*
+ * This function is used to map shared frames to store grant status. It is
+ * different from map_pte_fn above, the frames type here is uint64_t.
+ */
+static int map_pte_fn_status(pte_t *pte, struct page *pmd_page,
+                            unsigned long addr, void *data)
+{
+       uint64_t **frames = (uint64_t **)data;
+
+       set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
+       (*frames)++;
+       return 0;
+}
+
 static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
                        unsigned long addr, void *data)
 {
@@ -64,10 +78,10 @@ static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
 
 int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
                           unsigned long max_nr_gframes,
-                          struct grant_entry **__shared)
+                          void **__shared)
 {
        int rc;
-       struct grant_entry *shared = *__shared;
+       void *shared = *__shared;
 
        if (shared == NULL) {
                struct vm_struct *area =
@@ -83,8 +97,30 @@ int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
        return rc;
 }
 
-void arch_gnttab_unmap_shared(struct grant_entry *shared,
-                             unsigned long nr_gframes)
+int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
+                          unsigned long max_nr_gframes,
+                          grant_status_t **__shared)
+{
+       int rc;
+       grant_status_t *shared = *__shared;
+
+       if (shared == NULL) {
+               /* No need to pass in PTE as we are going to do it
+                * in apply_to_page_range anyhow. */
+               struct vm_struct *area =
+                       alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
+               BUG_ON(area == NULL);
+               shared = area->addr;
+               *__shared = shared;
+       }
+
+       rc = apply_to_page_range(&init_mm, (unsigned long)shared,
+                                PAGE_SIZE * nr_gframes,
+                                map_pte_fn_status, &frames);
+       return rc;
+}
+
+void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
 {
        apply_to_page_range(&init_mm, (unsigned long)shared,
                            PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL);
index f4bf8aa574f432aed85bea73fed2c0620c040e5b..58a0e46c404dc417c9af3cf9a08856096ebc99cf 100644 (file)
@@ -1852,7 +1852,7 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
        xen_write_cr3(__pa(initial_page_table));
 
        memblock_reserve(__pa(xen_start_info->pt_base),
-                        xen_start_info->nr_pt_frames * PAGE_SIZE));
+                        xen_start_info->nr_pt_frames * PAGE_SIZE);
 
        return initial_page_table;
 }
index 2672c798272fa1f419c98aeba0540fd39601f163..7aff6312ce7c75a66d731b5ea0a10743e7b34c5f 100644 (file)
@@ -596,6 +596,13 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
                if (ACPI_SUCCESS(status)) {
                        dev_info(root->bus->bridge,
                                "ACPI _OSC control (0x%02x) granted\n", flags);
+                       if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
+                               /*
+                                * We have ASPM control, but the FADT indicates
+                                * that it's unsupported. Clear it.
+                                */
+                               pcie_clear_aspm(root->bus);
+                       }
                } else {
                        dev_info(root->bus->bridge,
                                "ACPI _OSC request failed (%s), "
index cf047c406d92797777d6255122f23b5a8297c58a..6bdedd7cca2cd3cd630370732b71b8b530992ca9 100644 (file)
@@ -820,7 +820,7 @@ config PATA_PLATFORM
 
 config PATA_OF_PLATFORM
        tristate "OpenFirmware platform device PATA support"
-       depends on PATA_PLATFORM && OF && OF_IRQ
+       depends on PATA_PLATFORM && OF
        help
          This option enables support for generic directly connected ATA
          devices commonly found on embedded systems with OpenFirmware
index cf26222a93c5ddc7a88092ab9c7fcdf262a45ae1..d07bf0366d99ee06cc3d4b3df62d617e5ef7cc15 100644 (file)
@@ -52,7 +52,8 @@
 #define DRV_VERSION    "3.0"
 
 enum {
-       AHCI_PCI_BAR            = 5,
+       AHCI_PCI_BAR_STA2X11    = 0,
+       AHCI_PCI_BAR_STANDARD   = 5,
 };
 
 enum board_ids {
@@ -375,6 +376,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(SI, 0x1185), board_ahci },                /* SiS 968 */
        { PCI_VDEVICE(SI, 0x0186), board_ahci },                /* SiS 968 */
 
+       /* ST Microelectronics */
+       { PCI_VDEVICE(STMICRO, 0xCC06), board_ahci },           /* ST ConneXt */
+
        /* Marvell */
        { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv },        /* 6145 */
        { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv },        /* 6121 */
@@ -622,6 +626,13 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
 {
        int rc;
 
+       /*
+        * If the device fixup already set the dma_mask to some non-standard
+        * value, don't extend it here. This happens on STA2X11, for example.
+        */
+       if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32))
+               return 0;
+
        if (using_dac &&
            !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
                rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -1026,6 +1037,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct ahci_host_priv *hpriv;
        struct ata_host *host;
        int n_ports, i, rc;
+       int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
 
        VPRINTK("ENTER\n");
 
@@ -1057,6 +1069,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_info(&pdev->dev,
                         "PDC42819 can only drive SATA devices with this driver\n");
 
+       /* The Connext uses non-standard BAR */
+       if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
+               ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
+
        /* acquire resources */
        rc = pcim_enable_device(pdev);
        if (rc)
@@ -1065,7 +1081,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* AHCI controllers often implement SFF compatible interface.
         * Grab all PCI BARs just in case.
         */
-       rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
+       rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
        if (rc == -EBUSY)
                pcim_pin_device(pdev);
        if (rc)
@@ -1108,7 +1124,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
                pci_intx(pdev, 1);
 
-       hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+       hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
 
        /* save initial config */
        ahci_pci_save_initial_config(pdev, hpriv);
@@ -1172,8 +1188,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        for (i = 0; i < host->n_ports; i++) {
                struct ata_port *ap = host->ports[i];
 
-               ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
-               ata_port_pbar_desc(ap, AHCI_PCI_BAR,
+               ata_port_pbar_desc(ap, ahci_pci_bar, -1, "abar");
+               ata_port_pbar_desc(ap, ahci_pci_bar,
                                   0x100 + ap->port_no * 0x80, "port");
 
                /* set enclosure management message type */
index 43b875810d1b7b91c98386fb6f3a5c58be79fdb9..48be4e189163d034d3de2d0dae366db7e252bea7 100644 (file)
@@ -202,6 +202,71 @@ static int __devexit ahci_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int ahci_suspend(struct device *dev)
+{
+       struct ahci_platform_data *pdata = dev_get_platdata(dev);
+       struct ata_host *host = dev_get_drvdata(dev);
+       struct ahci_host_priv *hpriv = host->private_data;
+       void __iomem *mmio = hpriv->mmio;
+       u32 ctl;
+       int rc;
+
+       if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+               dev_err(dev, "firmware update required for suspend/resume\n");
+               return -EIO;
+       }
+
+       /*
+        * AHCI spec rev1.1 section 8.3.3:
+        * Software must disable interrupts prior to requesting a
+        * transition of the HBA to D3 state.
+        */
+       ctl = readl(mmio + HOST_CTL);
+       ctl &= ~HOST_IRQ_EN;
+       writel(ctl, mmio + HOST_CTL);
+       readl(mmio + HOST_CTL); /* flush */
+
+       rc = ata_host_suspend(host, PMSG_SUSPEND);
+       if (rc)
+               return rc;
+
+       if (pdata && pdata->suspend)
+               return pdata->suspend(dev);
+       return 0;
+}
+
+static int ahci_resume(struct device *dev)
+{
+       struct ahci_platform_data *pdata = dev_get_platdata(dev);
+       struct ata_host *host = dev_get_drvdata(dev);
+       int rc;
+
+       if (pdata && pdata->resume) {
+               rc = pdata->resume(dev);
+               if (rc)
+                       return rc;
+       }
+
+       if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
+               rc = ahci_reset_controller(host);
+               if (rc)
+                       return rc;
+
+               ahci_init_controller(host);
+       }
+
+       ata_host_resume(host);
+
+       return 0;
+}
+
+static struct dev_pm_ops ahci_pm_ops = {
+       .suspend                = &ahci_suspend,
+       .resume                 = &ahci_resume,
+};
+#endif
+
 static const struct of_device_id ahci_of_match[] = {
        { .compatible = "calxeda,hb-ahci", },
        {},
@@ -214,6 +279,9 @@ static struct platform_driver ahci_driver = {
                .name = "ahci",
                .owner = THIS_MODULE,
                .of_match_table = ahci_of_match,
+#ifdef CONFIG_PM
+               .pm = &ahci_pm_ops,
+#endif
        },
        .id_table       = ahci_devtype,
 };
index 3c92dbd751e0d287e2c124aa28e828ef46ad1858..a72bfd0ecfeebd801143cb0f9767cb9c496b252b 100644 (file)
@@ -746,9 +746,6 @@ static void ahci_start_port(struct ata_port *ap)
        /* enable FIS reception */
        ahci_start_fis_rx(ap);
 
-       /* enable DMA */
-       ahci_start_engine(ap);
-
        /* turn on LEDs */
        if (ap->flags & ATA_FLAG_EM) {
                ata_for_each_link(link, ap, EDGE) {
@@ -2022,7 +2019,7 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
                ahci_power_down(ap);
        else {
                ata_port_err(ap, "%s (%d)\n", emsg, rc);
-               ahci_start_port(ap);
+               ata_port_freeze(ap);
        }
 
        return rc;
index c04ad68cb602f20f734b25130e5dfbc7ac72bfd2..11c9aea4f4f7ea93a50fdc4b3547323d3728738d 100644 (file)
@@ -66,6 +66,7 @@
 #include <asm/byteorder.h>
 #include <linux/cdrom.h>
 #include <linux/ratelimit.h>
+#include <linux/pm_runtime.h>
 
 #include "libata.h"
 #include "libata-transport.h"
@@ -3248,10 +3249,10 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
                ata_force_xfermask(dev);
 
                pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
-               dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
 
                if (libata_dma_mask & mode_mask)
-                       dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
+                       dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
+                                                    dev->udma_mask);
                else
                        dma_mask = 0;
 
@@ -5234,73 +5235,55 @@ bool ata_link_offline(struct ata_link *link)
 }
 
 #ifdef CONFIG_PM
-static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
+static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
                               unsigned int action, unsigned int ehi_flags,
                               int wait)
 {
+       struct ata_link *link;
        unsigned long flags;
-       int i, rc;
-
-       for (i = 0; i < host->n_ports; i++) {
-               struct ata_port *ap = host->ports[i];
-               struct ata_link *link;
+       int rc;
 
-               /* Previous resume operation might still be in
-                * progress.  Wait for PM_PENDING to clear.
-                */
-               if (ap->pflags & ATA_PFLAG_PM_PENDING) {
-                       ata_port_wait_eh(ap);
-                       WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
-               }
+       /* Previous resume operation might still be in
+        * progress.  Wait for PM_PENDING to clear.
+        */
+       if (ap->pflags & ATA_PFLAG_PM_PENDING) {
+               ata_port_wait_eh(ap);
+               WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
+       }
 
-               /* request PM ops to EH */
-               spin_lock_irqsave(ap->lock, flags);
+       /* request PM ops to EH */
+       spin_lock_irqsave(ap->lock, flags);
 
-               ap->pm_mesg = mesg;
-               if (wait) {
-                       rc = 0;
-                       ap->pm_result = &rc;
-               }
+       ap->pm_mesg = mesg;
+       if (wait) {
+               rc = 0;
+               ap->pm_result = &rc;
+       }
 
-               ap->pflags |= ATA_PFLAG_PM_PENDING;
-               ata_for_each_link(link, ap, HOST_FIRST) {
-                       link->eh_info.action |= action;
-                       link->eh_info.flags |= ehi_flags;
-               }
+       ap->pflags |= ATA_PFLAG_PM_PENDING;
+       ata_for_each_link(link, ap, HOST_FIRST) {
+               link->eh_info.action |= action;
+               link->eh_info.flags |= ehi_flags;
+       }
 
-               ata_port_schedule_eh(ap);
+       ata_port_schedule_eh(ap);
 
-               spin_unlock_irqrestore(ap->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 
-               /* wait and check result */
-               if (wait) {
-                       ata_port_wait_eh(ap);
-                       WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
-                       if (rc)
-                               return rc;
-               }
+       /* wait and check result */
+       if (wait) {
+               ata_port_wait_eh(ap);
+               WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
        }
 
-       return 0;
+       return rc;
 }
 
-/**
- *     ata_host_suspend - suspend host
- *     @host: host to suspend
- *     @mesg: PM message
- *
- *     Suspend @host.  Actual operation is performed by EH.  This
- *     function requests EH to perform PM operations and waits for EH
- *     to finish.
- *
- *     LOCKING:
- *     Kernel thread context (may sleep).
- *
- *     RETURNS:
- *     0 on success, -errno on failure.
- */
-int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
+#define to_ata_port(d) container_of(d, struct ata_port, tdev)
+
+static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
 {
+       struct ata_port *ap = to_ata_port(dev);
        unsigned int ehi_flags = ATA_EHI_QUIET;
        int rc;
 
@@ -5315,31 +5298,108 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
        if (mesg.event == PM_EVENT_SUSPEND)
                ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
 
-       rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
-       if (rc == 0)
-               host->dev->power.power_state = mesg;
+       rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, 1);
        return rc;
 }
 
+static int ata_port_suspend(struct device *dev)
+{
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       return ata_port_suspend_common(dev, PMSG_SUSPEND);
+}
+
+static int ata_port_do_freeze(struct device *dev)
+{
+       if (pm_runtime_suspended(dev))
+               pm_runtime_resume(dev);
+
+       return ata_port_suspend_common(dev, PMSG_FREEZE);
+}
+
+static int ata_port_poweroff(struct device *dev)
+{
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       return ata_port_suspend_common(dev, PMSG_HIBERNATE);
+}
+
+static int ata_port_resume_common(struct device *dev)
+{
+       struct ata_port *ap = to_ata_port(dev);
+       int rc;
+
+       rc = ata_port_request_pm(ap, PMSG_ON, ATA_EH_RESET,
+               ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 1);
+       return rc;
+}
+
+static int ata_port_resume(struct device *dev)
+{
+       int rc;
+
+       rc = ata_port_resume_common(dev);
+       if (!rc) {
+               pm_runtime_disable(dev);
+               pm_runtime_set_active(dev);
+               pm_runtime_enable(dev);
+       }
+
+       return rc;
+}
+
+static int ata_port_runtime_idle(struct device *dev)
+{
+       return pm_runtime_suspend(dev);
+}
+
+static const struct dev_pm_ops ata_port_pm_ops = {
+       .suspend = ata_port_suspend,
+       .resume = ata_port_resume,
+       .freeze = ata_port_do_freeze,
+       .thaw = ata_port_resume,
+       .poweroff = ata_port_poweroff,
+       .restore = ata_port_resume,
+
+       .runtime_suspend = ata_port_suspend,
+       .runtime_resume = ata_port_resume_common,
+       .runtime_idle = ata_port_runtime_idle,
+};
+
+/**
+ *     ata_host_suspend - suspend host
+ *     @host: host to suspend
+ *     @mesg: PM message
+ *
+ *     Suspend @host.  Actual operation is performed by port suspend.
+ */
+int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
+{
+       host->dev->power.power_state = mesg;
+       return 0;
+}
+
 /**
  *     ata_host_resume - resume host
  *     @host: host to resume
  *
- *     Resume @host.  Actual operation is performed by EH.  This
- *     function requests EH to perform PM operations and returns.
- *     Note that all resume operations are performed parallelly.
- *
- *     LOCKING:
- *     Kernel thread context (may sleep).
+ *     Resume @host.  Actual operation is performed by port resume.
  */
 void ata_host_resume(struct ata_host *host)
 {
-       ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
-                           ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
        host->dev->power.power_state = PMSG_ON;
 }
 #endif
 
+struct device_type ata_port_type = {
+       .name = "ata_port",
+#ifdef CONFIG_PM
+       .pm = &ata_port_pm_ops,
+#endif
+};
+
 /**
  *     ata_dev_init - Initialize an ata_device structure
  *     @dev: Device structure to initialize
index 2a5412e7e9c11c85d23c12959ad503f9f2e182aa..508a60bfe5c12655353eb2eed6c15690bdb293e5 100644 (file)
@@ -3381,6 +3381,7 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
                if (!shost)
                        goto err_alloc;
 
+               shost->eh_noresume = 1;
                *(struct ata_port **)&shost->hostdata[0] = ap;
                ap->scsi_host = shost;
 
@@ -3398,7 +3399,7 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
                 */
                shost->max_host_blocked = 1;
 
-               rc = scsi_add_host(ap->scsi_host, ap->host->dev);
+               rc = scsi_add_host(ap->scsi_host, &ap->tdev);
                if (rc)
                        goto err_add;
        }
index 4cadfa28f940450ee2f5a890af44e34ad583e921..9691dd0966d7fb59a98e6a692a558a4cdd9c7de3 100644 (file)
@@ -929,11 +929,11 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
        bytes = (bc_hi << 8) | bc_lo;
 
        /* shall be cleared to zero, indicating xfer of data */
-       if (unlikely(ireason & (1 << 0)))
+       if (unlikely(ireason & ATAPI_COD))
                goto atapi_check;
 
        /* make sure transfer direction matches expected */
-       i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
+       i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;
        if (unlikely(do_write != i_write))
                goto atapi_check;
 
index ce9dc6207f3731d292ca838a02adca44013ac4a1..9a7f0ea565df6c6066d7e64465c8385b81014a01 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/libata.h>
 #include <linux/hdreg.h>
 #include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
 
 #include "libata.h"
 #include "libata-transport.h"
@@ -279,6 +280,7 @@ int ata_tport_add(struct device *parent,
        struct device *dev = &ap->tdev;
 
        device_initialize(dev);
+       dev->type = &ata_port_type;
 
        dev->parent = get_device(parent);
        dev->release = ata_tport_release;
@@ -289,6 +291,9 @@ int ata_tport_add(struct device *parent,
                goto tport_err;
        }
 
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
        transport_add_device(dev);
        transport_configure_device(dev);
 
index 773de97988a25143c897df8de7b751ead6e6b696..814486d35c4445b31eadf175c3a06ebceef68bce 100644 (file)
@@ -58,6 +58,7 @@ extern int atapi_passthru16;
 extern int libata_fua;
 extern int libata_noacpi;
 extern int libata_allow_tpm;
+extern struct device_type ata_port_type;
 extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
 extern void ata_force_cbl(struct ata_port *ap);
 extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
index e8574bba3ee4821daaadf3db70ee8402f6cd2cfd..048589fad2ca203ef529e7897a2d9d0e2bc858d5 100644 (file)
@@ -963,17 +963,7 @@ static struct platform_driver arasan_cf_driver = {
        },
 };
 
-static int __init arasan_cf_init(void)
-{
-       return platform_driver_register(&arasan_cf_driver);
-}
-module_init(arasan_cf_init);
-
-static void __exit arasan_cf_exit(void)
-{
-       platform_driver_unregister(&arasan_cf_driver);
-}
-module_exit(arasan_cf_exit);
+module_platform_driver(arasan_cf_driver);
 
 MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
 MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
index 5249e6d918a3831e0a899b6768e1a71695ebc2d3..a7d91a72ee352b45ca71cf1bd2ccb738890f1075 100644 (file)
@@ -454,20 +454,7 @@ static struct platform_driver pata_at91_driver = {
        },
 };
 
-static int __init pata_at91_init(void)
-{
-       return platform_driver_register(&pata_at91_driver);
-}
-
-static void __exit pata_at91_exit(void)
-{
-       platform_driver_unregister(&pata_at91_driver);
-}
-
-
-module_init(pata_at91_init);
-module_exit(pata_at91_exit);
-
+module_platform_driver(pata_at91_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Driver for CF in True IDE mode on AT91SAM9260 SoC");
index bd987bb082eb51d4c23d937e23d5882f45e0cb2f..d6a4677fdf711801e4d09e74d6cc122db9a50904 100644 (file)
@@ -418,14 +418,6 @@ static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
                                        (tcyc_tdvs<<8 | tdvs));
                                ATAPI_SET_ULTRA_TIM_2(base, (tmli<<8 | tss));
                                ATAPI_SET_ULTRA_TIM_3(base, (trp<<8 | tzah));
-
-                               /* Enable host ATAPI Untra DMA interrupts */
-                               ATAPI_SET_INT_MASK(base,
-                                       ATAPI_GET_INT_MASK(base)
-                                       | UDMAIN_DONE_MASK
-                                       | UDMAOUT_DONE_MASK
-                                       | UDMAIN_TERM_MASK
-                                       | UDMAOUT_TERM_MASK);
                        }
                }
        }
@@ -470,10 +462,6 @@ static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
                        ATAPI_SET_MULTI_TIM_0(base, (tm<<8 | td));
                        ATAPI_SET_MULTI_TIM_1(base, (tkr<<8 | tkw));
                        ATAPI_SET_MULTI_TIM_2(base, (teoc<<8 | th));
-
-                       /* Enable host ATAPI Multi DMA interrupts */
-                       ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
-                               | MULTI_DONE_MASK | MULTI_TERM_MASK);
                        SSYNC();
                }
        }
@@ -1153,15 +1141,11 @@ static unsigned char bfin_bmdma_status(struct ata_port *ap)
 {
        unsigned char host_stat = 0;
        void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
-       unsigned short int_status = ATAPI_GET_INT_STATUS(base);
 
-       if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON))
+       if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON | ULTRA_XFER_ON))
                host_stat |= ATA_DMA_ACTIVE;
-       if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT|
-               ATAPI_DEV_INT))
+       if (ATAPI_GET_INT_STATUS(base) & ATAPI_DEV_INT)
                host_stat |= ATA_DMA_INTR;
-       if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT))
-               host_stat |= ATA_DMA_ERR|ATA_DMA_INTR;
 
        dev_dbg(ap->dev, "ATAPI: host_stat=0x%x\n", host_stat);
 
index 628c8fae5937183f24b9a607c17f0765609a3a5e..7a402c75ab907243a5716d6748467453c0d49fe4 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * pata_cs5536.c       - CS5536 PATA for new ATA layer
  *                       (C) 2007 Martin K. Petersen <mkp@mkp.net>
+ *                       (C) 2011 Bartlomiej Zolnierkiewicz
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -55,24 +56,16 @@ MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
 #define DRV_VERSION    "0.0.8"
 
 enum {
-       CFG                     = 0,
-       DTC                     = 1,
-       CAST                    = 2,
-       ETC                     = 3,
-
-       MSR_IDE_BASE            = 0x51300000,
-       MSR_IDE_CFG             = (MSR_IDE_BASE + 0x10),
-       MSR_IDE_DTC             = (MSR_IDE_BASE + 0x12),
-       MSR_IDE_CAST            = (MSR_IDE_BASE + 0x13),
-       MSR_IDE_ETC             = (MSR_IDE_BASE + 0x14),
-
+       MSR_IDE_CFG             = 0x51300010,
        PCI_IDE_CFG             = 0x40,
-       PCI_IDE_DTC             = 0x48,
-       PCI_IDE_CAST            = 0x4c,
-       PCI_IDE_ETC             = 0x50,
 
-       IDE_CFG_CHANEN          = 0x2,
-       IDE_CFG_CABLE           = 0x10000,
+       CFG                     = 0,
+       DTC                     = 2,
+       CAST                    = 3,
+       ETC                     = 4,
+
+       IDE_CFG_CHANEN          = (1 << 1),
+       IDE_CFG_CABLE           = (1 << 17) | (1 << 16),
 
        IDE_D0_SHIFT            = 24,
        IDE_D1_SHIFT            = 16,
@@ -84,45 +77,50 @@ enum {
        IDE_CAST_CMD_MASK       = 0xff,
        IDE_CAST_CMD_SHIFT      = 24,
 
-       IDE_ETC_NODMA           = 0x03,
-};
-
-static const u32 msr_reg[4] = {
-       MSR_IDE_CFG, MSR_IDE_DTC, MSR_IDE_CAST, MSR_IDE_ETC,
-};
-
-static const u8 pci_reg[4] = {
-       PCI_IDE_CFG, PCI_IDE_DTC, PCI_IDE_CAST, PCI_IDE_ETC,
+       IDE_ETC_UDMA_MASK       = 0xc0,
 };
 
-static inline int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
+static int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
 {
        if (unlikely(use_msr)) {
                u32 dummy __maybe_unused;
 
-               rdmsr(msr_reg[reg], *val, dummy);
+               rdmsr(MSR_IDE_CFG + reg, *val, dummy);
                return 0;
        }
 
-       return pci_read_config_dword(pdev, pci_reg[reg], val);
+       return pci_read_config_dword(pdev, PCI_IDE_CFG + reg * 4, val);
 }
 
-static inline int cs5536_write(struct pci_dev *pdev, int reg, int val)
+static int cs5536_write(struct pci_dev *pdev, int reg, int val)
 {
        if (unlikely(use_msr)) {
-               wrmsr(msr_reg[reg], val, 0);
+               wrmsr(MSR_IDE_CFG + reg, val, 0);
                return 0;
        }
 
-       return pci_write_config_dword(pdev, pci_reg[reg], val);
+       return pci_write_config_dword(pdev, PCI_IDE_CFG + reg * 4, val);
+}
+
+static void cs5536_program_dtc(struct ata_device *adev, u8 tim)
+{
+       struct pci_dev *pdev = to_pci_dev(adev->link->ap->host->dev);
+       int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
+       u32 dtc;
+
+       cs5536_read(pdev, DTC, &dtc);
+       dtc &= ~(IDE_DRV_MASK << dshift);
+       dtc |= tim << dshift;
+       cs5536_write(pdev, DTC, dtc);
 }
 
 /**
  *     cs5536_cable_detect     -       detect cable type
  *     @ap: Port to detect on
  *
- *     Perform cable detection for ATA66 capable cable. Return a libata
- *     cable type.
+ *     Perform cable detection for ATA66 capable cable.
+ *
+ *     Returns a cable type.
  */
 
 static int cs5536_cable_detect(struct ata_port *ap)
@@ -132,7 +130,7 @@ static int cs5536_cable_detect(struct ata_port *ap)
 
        cs5536_read(pdev, CFG, &cfg);
 
-       if (cfg & (IDE_CFG_CABLE << ap->port_no))
+       if (cfg & IDE_CFG_CABLE)
                return ATA_CBL_PATA80;
        else
                return ATA_CBL_PATA40;
@@ -162,19 +160,15 @@ static void cs5536_set_piomode(struct ata_port *ap, struct ata_device *adev)
        struct ata_device *pair = ata_dev_pair(adev);
        int mode = adev->pio_mode - XFER_PIO_0;
        int cmdmode = mode;
-       int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
        int cshift = adev->devno ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT;
-       u32 dtc, cast, etc;
+       u32 cast;
 
        if (pair)
                cmdmode = min(mode, pair->pio_mode - XFER_PIO_0);
 
-       cs5536_read(pdev, DTC, &dtc);
-       cs5536_read(pdev, CAST, &cast);
-       cs5536_read(pdev, ETC, &etc);
+       cs5536_program_dtc(adev, drv_timings[mode]);
 
-       dtc &= ~(IDE_DRV_MASK << dshift);
-       dtc |= drv_timings[mode] << dshift;
+       cs5536_read(pdev, CAST, &cast);
 
        cast &= ~(IDE_CAST_DRV_MASK << cshift);
        cast |= addr_timings[mode] << cshift;
@@ -182,12 +176,7 @@ static void cs5536_set_piomode(struct ata_port *ap, struct ata_device *adev)
        cast &= ~(IDE_CAST_CMD_MASK << IDE_CAST_CMD_SHIFT);
        cast |= cmd_timings[cmdmode] << IDE_CAST_CMD_SHIFT;
 
-       etc &= ~(IDE_DRV_MASK << dshift);
-       etc |= IDE_ETC_NODMA << dshift;
-
-       cs5536_write(pdev, DTC, dtc);
        cs5536_write(pdev, CAST, cast);
-       cs5536_write(pdev, ETC, etc);
 }
 
 /**
@@ -208,25 +197,21 @@ static void cs5536_set_dmamode(struct ata_port *ap, struct ata_device *adev)
        };
 
        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
-       u32 dtc, etc;
+       u32 etc;
        int mode = adev->dma_mode;
        int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
 
-       if (mode >= XFER_UDMA_0) {
-               cs5536_read(pdev, ETC, &etc);
+       cs5536_read(pdev, ETC, &etc);
 
+       if (mode >= XFER_UDMA_0) {
                etc &= ~(IDE_DRV_MASK << dshift);
                etc |= udma_timings[mode - XFER_UDMA_0] << dshift;
-
-               cs5536_write(pdev, ETC, etc);
        } else { /* MWDMA */
-               cs5536_read(pdev, DTC, &dtc);
-
-               dtc &= ~(IDE_DRV_MASK << dshift);
-               dtc |= mwdma_timings[mode - XFER_MW_DMA_0] << dshift;
-
-               cs5536_write(pdev, DTC, dtc);
+               etc &= ~(IDE_ETC_UDMA_MASK << dshift);
+               cs5536_program_dtc(adev, mwdma_timings[mode - XFER_MW_DMA_0]);
        }
+
+       cs5536_write(pdev, ETC, etc);
 }
 
 static struct scsi_host_template cs5536_sht = {
index ca9d9caedfa3137fd32e287cce52f05ae70d8bee..c5af97f5107b5af2790b174aa219e7e176879321 100644 (file)
@@ -235,17 +235,7 @@ static struct platform_driver pata_imx_driver = {
        },
 };
 
-static int __init pata_imx_init(void)
-{
-       return platform_driver_register(&pata_imx_driver);
-}
-
-static void __exit pata_imx_exit(void)
-{
-       platform_driver_unregister(&pata_imx_driver);
-}
-module_init(pata_imx_init);
-module_exit(pata_imx_exit);
+module_platform_driver(pata_imx_driver);
 
 MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
 MODULE_DESCRIPTION("low-level driver for iMX PATA");
index 15b64311fe0a8bdf7423e33169d5eaff9833a46a..badb1789a918a21c027ee0d5b0b0610eb7bd2340 100644 (file)
@@ -205,21 +205,10 @@ static struct platform_driver ixp4xx_pata_platform_driver = {
        .remove         = __devexit_p(ixp4xx_pata_remove),
 };
 
-static int __init ixp4xx_pata_init(void)
-{
-       return platform_driver_register(&ixp4xx_pata_platform_driver);
-}
-
-static void __exit ixp4xx_pata_exit(void)
-{
-       platform_driver_unregister(&ixp4xx_pata_platform_driver);
-}
+module_platform_driver(ixp4xx_pata_platform_driver);
 
 MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
 MODULE_DESCRIPTION("low-level driver for ixp4xx Compact Flash PATA");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_ALIAS("platform:" DRV_NAME);
-
-module_init(ixp4xx_pata_init);
-module_exit(ixp4xx_pata_exit);
index 3e1746314f220b6747a929cd353e18dea24db91a..00748ae1a016ec9eaf55f93b36a1142fcb73c028 100644 (file)
@@ -897,26 +897,7 @@ static struct platform_driver mpc52xx_ata_of_platform_driver = {
        },
 };
 
-
-/* ======================================================================== */
-/* Module                                                                   */
-/* ======================================================================== */
-
-static int __init
-mpc52xx_ata_init(void)
-{
-       printk(KERN_INFO "ata: MPC52xx IDE/ATA libata driver\n");
-       return platform_driver_register(&mpc52xx_ata_of_platform_driver);
-}
-
-static void __exit
-mpc52xx_ata_exit(void)
-{
-       platform_driver_unregister(&mpc52xx_ata_of_platform_driver);
-}
-
-module_init(mpc52xx_ata_init);
-module_exit(mpc52xx_ata_exit);
+module_platform_driver(mpc52xx_ata_of_platform_driver);
 
 MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
 MODULE_DESCRIPTION("Freescale MPC52xx IDE/ATA libata driver");
index 2a472c5bb7db7015d76118434a8d50e7795a07d1..1654dc27e7f8365c104d5c6b1523d88c3927d266 100644 (file)
@@ -12,8 +12,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
 #include <linux/ata_platform.h>
 
 static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
@@ -22,7 +21,7 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
        struct device_node *dn = ofdev->dev.of_node;
        struct resource io_res;
        struct resource ctl_res;
-       struct resource irq_res;
+       struct resource *irq_res;
        unsigned int reg_shift = 0;
        int pio_mode = 0;
        int pio_mask;
@@ -51,11 +50,9 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
                }
        }
 
-       ret = of_irq_to_resource(dn, 0, &irq_res);
-       if (!ret)
-               irq_res.start = irq_res.end = 0;
-       else
-               irq_res.flags = 0;
+       irq_res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
+       if (irq_res)
+               irq_res->flags = 0;
 
        prop = of_get_property(dn, "reg-shift", NULL);
        if (prop)
@@ -75,7 +72,7 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
        pio_mask = 1 << pio_mode;
        pio_mask |= (1 << pio_mode) - 1;
 
-       return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, &irq_res,
+       return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq_res,
                                     reg_shift, pio_mask);
 }
 
@@ -101,17 +98,7 @@ static struct platform_driver pata_of_platform_driver = {
        .remove         = __devexit_p(pata_of_platform_remove),
 };
 
-static int __init pata_of_platform_init(void)
-{
-       return platform_driver_register(&pata_of_platform_driver);
-}
-module_init(pata_of_platform_init);
-
-static void __exit pata_of_platform_exit(void)
-{
-       platform_driver_unregister(&pata_of_platform_driver);
-}
-module_exit(pata_of_platform_exit);
+module_platform_driver(pata_of_platform_driver);
 
 MODULE_DESCRIPTION("OF-platform PATA driver");
 MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
index b86d7e22595e6bd511c004e0aaf517b26a1bcf93..5ff31b68135c5c9a01c2b3166ff7f3cfb1a5cd71 100644 (file)
@@ -132,20 +132,9 @@ static struct platform_driver palmld_pata_platform_driver = {
        .remove         = __devexit_p(palmld_pata_remove),
 };
 
-static int __init palmld_pata_init(void)
-{
-       return platform_driver_register(&palmld_pata_platform_driver);
-}
-
-static void __exit palmld_pata_exit(void)
-{
-       platform_driver_unregister(&palmld_pata_platform_driver);
-}
+module_platform_driver(palmld_pata_platform_driver);
 
 MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
 MODULE_DESCRIPTION("PalmLD PATA driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:" DRV_NAME);
-
-module_init(palmld_pata_init);
-module_exit(palmld_pata_exit);
index 2067308f683f65864a79926acfedca11195541b9..f1848aeda78398a5994428719d63aefa207afaa7 100644 (file)
@@ -256,17 +256,7 @@ static struct platform_driver pata_platform_driver = {
        },
 };
 
-static int __init pata_platform_init(void)
-{
-       return platform_driver_register(&pata_platform_driver);
-}
-
-static void __exit pata_platform_exit(void)
-{
-       platform_driver_unregister(&pata_platform_driver);
-}
-module_init(pata_platform_init);
-module_exit(pata_platform_exit);
+module_platform_driver(pata_platform_driver);
 
 module_param(pio_mask, int, 0);
 
index b4ede40f8ae15189c20300dd585e8045e54efb6c..0bb0fb7b26bc345ce84304461cf0185e5a4d5f32 100644 (file)
@@ -390,18 +390,7 @@ static struct platform_driver pxa_ata_driver = {
        },
 };
 
-static int __init pxa_ata_init(void)
-{
-       return platform_driver_register(&pxa_ata_driver);
-}
-
-static void __exit pxa_ata_exit(void)
-{
-       platform_driver_unregister(&pxa_ata_driver);
-}
-
-module_init(pxa_ata_init);
-module_exit(pxa_ata_exit);
+module_platform_driver(pxa_ata_driver);
 
 MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
 MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU");
index 1b9d10d9c5d912b8306b9cd73d30e5d521cfd6a7..9417101bd5ca4524eb4aefdd797e4f41fc6840ae 100644 (file)
@@ -188,9 +188,6 @@ static __devexit int rb532_pata_driver_remove(struct platform_device *pdev)
        return 0;
 }
 
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:" DRV_NAME);
-
 static struct platform_driver rb532_pata_platform_driver = {
        .probe          = rb532_pata_driver_probe,
        .remove         = __devexit_p(rb532_pata_driver_remove),
@@ -200,27 +197,13 @@ static struct platform_driver rb532_pata_platform_driver = {
        },
 };
 
-/* ------------------------------------------------------------------------ */
-
 #define DRV_INFO DRV_DESC " version " DRV_VERSION
 
-static int __init rb532_pata_module_init(void)
-{
-       printk(KERN_INFO DRV_INFO "\n");
-
-       return platform_driver_register(&rb532_pata_platform_driver);
-}
-
-static void __exit rb532_pata_module_exit(void)
-{
-       platform_driver_unregister(&rb532_pata_platform_driver);
-}
+module_platform_driver(rb532_pata_platform_driver);
 
 MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
 MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
 MODULE_DESCRIPTION(DRV_DESC);
 MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
-
-module_init(rb532_pata_module_init);
-module_exit(rb532_pata_module_exit);
+MODULE_ALIAS("platform:" DRV_NAME);
index 5c4237452f5072a97a82eca00e95d55c15ce2542..69f7cde49c6b987a4fb263f9d28d5d49169c8ff6 100644 (file)
@@ -1777,18 +1777,7 @@ static struct platform_driver sata_dwc_driver = {
        .remove = sata_dwc_remove,
 };
 
-static int __init sata_dwc_init(void)
-{
-       return platform_driver_register(&sata_dwc_driver);
-}
-
-static void __exit sata_dwc_exit(void)
-{
-       platform_driver_unregister(&sata_dwc_driver);
-}
-
-module_init(sata_dwc_init);
-module_exit(sata_dwc_exit);
+module_platform_driver(sata_dwc_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
index 78ae7b67b09e7bdf79823fece7aa9fef0d01b720..5a2c95ba050a28caacbb4e3a799a0cdd176fcf47 100644 (file)
@@ -1452,21 +1452,9 @@ static struct platform_driver fsl_sata_driver = {
 #endif
 };
 
-static int __init sata_fsl_init(void)
-{
-       platform_driver_register(&fsl_sata_driver);
-       return 0;
-}
-
-static void __exit sata_fsl_exit(void)
-{
-       platform_driver_unregister(&fsl_sata_driver);
-}
+module_platform_driver(fsl_sata_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Ashish Kalra, Freescale Semiconductor");
 MODULE_DESCRIPTION("Freescale 3.0Gbps SATA controller low level driver");
 MODULE_VERSION("1.10");
-
-module_init(sata_fsl_init);
-module_exit(sata_fsl_exit);
index 8069322e4c9e8baa7aab978821e1d4e2f00a0f98..37c794d312644d30430446d32e3ed844565cc362 100644 (file)
@@ -787,17 +787,14 @@ static const struct xenbus_device_id xen_blkbk_ids[] = {
 };
 
 
-static struct xenbus_driver xen_blkbk = {
-       .name = "vbd",
-       .owner = THIS_MODULE,
-       .ids = xen_blkbk_ids,
+static DEFINE_XENBUS_DRIVER(xen_blkbk, ,
        .probe = xen_blkbk_probe,
        .remove = xen_blkbk_remove,
        .otherend_changed = frontend_changed
-};
+);
 
 
 int xen_blkif_xenbus_init(void)
 {
-       return xenbus_register_backend(&xen_blkbk);
+       return xenbus_register_backend(&xen_blkbk_driver);
 }
index 7b2ec5908413da7b989044828879afa173613b87..9fd3ee203b1e7b5e450c3e32737e61a64f9cc6b1 100644 (file)
@@ -1437,16 +1437,13 @@ static const struct xenbus_device_id blkfront_ids[] = {
        { "" }
 };
 
-static struct xenbus_driver blkfront = {
-       .name = "vbd",
-       .owner = THIS_MODULE,
-       .ids = blkfront_ids,
+static DEFINE_XENBUS_DRIVER(blkfront, ,
        .probe = blkfront_probe,
        .remove = blkfront_remove,
        .resume = blkfront_resume,
        .otherend_changed = blkback_changed,
        .is_ready = blkfront_is_ready,
-};
+);
 
 static int __init xlblk_init(void)
 {
@@ -1461,7 +1458,7 @@ static int __init xlblk_init(void)
                return -ENODEV;
        }
 
-       ret = xenbus_register_frontend(&blkfront);
+       ret = xenbus_register_frontend(&blkfront_driver);
        if (ret) {
                unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
                return ret;
@@ -1474,7 +1471,7 @@ module_init(xlblk_init);
 
 static void __exit xlblk_exit(void)
 {
-       return xenbus_unregister_driver(&blkfront);
+       return xenbus_unregister_driver(&blkfront_driver);
 }
 module_exit(xlblk_exit);
 
index fb1975d82a73abbf6cab4d76d1ea593da7093e57..1a17e338735e363b3ebdf7624aca334d507144c9 100644 (file)
@@ -456,7 +456,7 @@ static inline void ace_fsm_yieldirq(struct ace_device *ace)
 {
        dev_dbg(ace->dev, "ace_fsm_yieldirq()\n");
 
-       if (ace->irq == NO_IRQ)
+       if (!ace->irq)
                /* No IRQ assigned, so need to poll */
                tasklet_schedule(&ace->fsm_tasklet);
        ace->fsm_continue_flag = 0;
@@ -1034,12 +1034,12 @@ static int __devinit ace_setup(struct ace_device *ace)
                ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
 
        /* Now we can hook up the irq handler */
-       if (ace->irq != NO_IRQ) {
+       if (ace->irq) {
                rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
                if (rc) {
                        /* Failure - fall back to polled mode */
                        dev_err(ace->dev, "request_irq failed\n");
-                       ace->irq = NO_IRQ;
+                       ace->irq = 0;
                }
        }
 
@@ -1086,7 +1086,7 @@ static void __devexit ace_teardown(struct ace_device *ace)
 
        tasklet_kill(&ace->fsm_tasklet);
 
-       if (ace->irq != NO_IRQ)
+       if (ace->irq)
                free_irq(ace->irq, ace);
 
        iounmap(ace->baseaddr);
@@ -1156,7 +1156,7 @@ static int __devinit ace_probe(struct platform_device *dev)
        resource_size_t physaddr = 0;
        int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
        u32 id = dev->id;
-       int irq = NO_IRQ;
+       int irq = 0;
        int i;
 
        dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
index b072648dc3f64904f60df2f58d230de06688024e..17e05d1076b34f2ec9e762c38239b7a02993b06e 100644 (file)
@@ -514,12 +514,12 @@ static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
        switch (*bridge_agpstat & 7) {
        case 4:
                *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
-               printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
+               printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
                        "Fixing up support for x2 & x1\n");
                break;
        case 2:
                *bridge_agpstat |= AGPSTAT2_1X;
-               printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
+               printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
                        "Fixing up support for x1\n");
                break;
        default:
@@ -693,7 +693,7 @@ static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
                        *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
                        *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
                } else {
-                       printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
+                       printk(KERN_INFO PFX "Fell back to AGPx4 mode because ");
                        if (!(*bridge_agpstat & AGPSTAT3_8X)) {
                                printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
                                        *bridge_agpstat, origbridge);
@@ -956,7 +956,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
        bridge->driver->cache_flush();
 #ifdef CONFIG_X86
        if (set_memory_uc((unsigned long)table, 1 << page_order))
-               printk(KERN_WARNING "Could not set GATT table memory to UC!");
+               printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
 
        bridge->gatt_table = (void *)table;
 #else
index e3380137b05b07107a9a62357cd7af1e8301a9ce..573532f7553e737c1c7cee6cc3443d67cb4f8139 100644 (file)
@@ -176,15 +176,6 @@ config GPIO_SCH
          The Intel Tunnel Creek processor has 5 GPIOs powered by the
          core power rail and 9 from suspend power supply.
 
-config GPIO_U300
-       bool "ST-Ericsson U300 COH 901 335/571 GPIO"
-       depends on GPIOLIB && ARCH_U300
-       help
-         Say yes here to support GPIO interface on ST-Ericsson U300.
-         The names of the two IP block variants supported are
-         COH 901 335 and COH 901 571/3. They contain 3, 5 or 7
-         ports of 8 GPIO pins each.
-
 config GPIO_VX855
        tristate "VIA VX855/VX875 GPIO"
        depends on PCI
index 8ef9e9abe97082d26acb915bc169b053a6ff30bb..62e641e79e8f059194c0bb5315e7d46e1fd105e2 100644 (file)
@@ -54,7 +54,6 @@ obj-$(CONFIG_ARCH_DAVINCI_TNETV107X) += gpio-tnetv107x.o
 obj-$(CONFIG_GPIO_TPS65910)    += gpio-tps65910.o
 obj-$(CONFIG_GPIO_TPS65912)    += gpio-tps65912.o
 obj-$(CONFIG_GPIO_TWL4030)     += gpio-twl4030.o
-obj-$(CONFIG_MACH_U300)                += gpio-u300.o
 obj-$(CONFIG_GPIO_UCB1400)     += gpio-ucb1400.o
 obj-$(CONFIG_GPIO_VR41XX)      += gpio-vr41xx.o
 obj-$(CONFIG_GPIO_VX855)       += gpio-vx855.o
diff --git a/drivers/gpio/gpio-u300.c b/drivers/gpio/gpio-u300.c
deleted file mode 100644 (file)
index 4035778..0000000
+++ /dev/null
@@ -1,917 +0,0 @@
-/*
- * U300 GPIO module.
- *
- * Copyright (C) 2007-2011 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
- * This can driver either of the two basic GPIO cores
- * available in the U300 platforms:
- * COH 901 335   - Used in DB3150 (U300 1.0) and DB3200 (U330 1.0)
- * COH 901 571/3 - Used in DB3210 (U365 2.0) and DB3350 (U335 1.0)
- * Author: Linus Walleij <linus.walleij@linaro.org>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- */
-#include <linux/module.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <mach/gpio-u300.h>
-
-/*
- * Bias modes for U300 GPIOs
- *
- * GPIO_U300_CONFIG_BIAS_UNKNOWN: this bias mode is not known to us
- * GPIO_U300_CONFIG_BIAS_FLOAT: no specific bias, the GPIO will float or state
- *     is not controlled by software
- * GPIO_U300_CONFIG_BIAS_PULL_UP: the GPIO will be pulled up (usually with high
- *     impedance to VDD)
- */
-#define GPIO_U300_CONFIG_BIAS_UNKNOWN  0x1000
-#define GPIO_U300_CONFIG_BIAS_FLOAT    0x1001
-#define GPIO_U300_CONFIG_BIAS_PULL_UP  0x1002
-
-/*
- * Drive modes for U300 GPIOs (output)
- *
- * GPIO_U300_CONFIG_DRIVE_PUSH_PULL: the GPIO will be driven actively high and
- *     low, this is the most typical case and is typically achieved with two
- *     active transistors on the output
- * GPIO_U300_CONFIG_DRIVE_OPEN_DRAIN: the GPIO will be driven with open drain
- *     (open collector) which means it is usually wired with other output
- *     ports which are then pulled up with an external resistor
- * GPIO_U300_CONFIG_DRIVE_OPEN_SOURCE: the GPIO will be driven with open drain
- *     (open emitter) which is the same as open drain mutatis mutandis but
- *     pulled to ground
- */
-#define GPIO_U300_CONFIG_DRIVE_PUSH_PULL       0x2000
-#define GPIO_U300_CONFIG_DRIVE_OPEN_DRAIN      0x2001
-#define GPIO_U300_CONFIG_DRIVE_OPEN_SOURCE     0x2002
-
-/*
- * Register definitions for COH 901 335 variant
- */
-#define U300_335_PORT_STRIDE                           (0x1C)
-/* Port X Pin Data Register 32bit, this is both input and output (R/W) */
-#define U300_335_PXPDIR                                        (0x00)
-#define U300_335_PXPDOR                                        (0x00)
-/* Port X Pin Config Register 32bit (R/W) */
-#define U300_335_PXPCR                                 (0x04)
-/* This register layout is the same in both blocks */
-#define U300_GPIO_PXPCR_ALL_PINS_MODE_MASK             (0x0000FFFFUL)
-#define U300_GPIO_PXPCR_PIN_MODE_MASK                  (0x00000003UL)
-#define U300_GPIO_PXPCR_PIN_MODE_SHIFT                 (0x00000002UL)
-#define U300_GPIO_PXPCR_PIN_MODE_INPUT                 (0x00000000UL)
-#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL      (0x00000001UL)
-#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_DRAIN     (0x00000002UL)
-#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_SOURCE    (0x00000003UL)
-/* Port X Interrupt Event Register 32bit (R/W) */
-#define U300_335_PXIEV                                 (0x08)
-/* Port X Interrupt Enable Register 32bit (R/W) */
-#define U300_335_PXIEN                                 (0x0C)
-/* Port X Interrupt Force Register 32bit (R/W) */
-#define U300_335_PXIFR                                 (0x10)
-/* Port X Interrupt Config Register 32bit (R/W) */
-#define U300_335_PXICR                                 (0x14)
-/* This register layout is the same in both blocks */
-#define U300_GPIO_PXICR_ALL_IRQ_CONFIG_MASK            (0x000000FFUL)
-#define U300_GPIO_PXICR_IRQ_CONFIG_MASK                        (0x00000001UL)
-#define U300_GPIO_PXICR_IRQ_CONFIG_FALLING_EDGE                (0x00000000UL)
-#define U300_GPIO_PXICR_IRQ_CONFIG_RISING_EDGE         (0x00000001UL)
-/* Port X Pull-up Enable Register 32bit (R/W) */
-#define U300_335_PXPER                                 (0x18)
-/* This register layout is the same in both blocks */
-#define U300_GPIO_PXPER_ALL_PULL_UP_DISABLE_MASK       (0x000000FFUL)
-#define U300_GPIO_PXPER_PULL_UP_DISABLE                        (0x00000001UL)
-/* Control Register 32bit (R/W) */
-#define U300_335_CR                                    (0x54)
-#define U300_335_CR_BLOCK_CLOCK_ENABLE                 (0x00000001UL)
-
-/*
- * Register definitions for COH 901 571 / 3 variant
- */
-#define U300_571_PORT_STRIDE                           (0x30)
-/*
- * Control Register 32bit (R/W)
- * bit 15-9 (mask 0x0000FE00) contains the number of cores. 8*cores
- * gives the number of GPIO pins.
- * bit 8-2  (mask 0x000001FC) contains the core version ID.
- */
-#define U300_571_CR                                    (0x00)
-#define U300_571_CR_SYNC_SEL_ENABLE                    (0x00000002UL)
-#define U300_571_CR_BLOCK_CLKRQ_ENABLE                 (0x00000001UL)
-/*
- * These registers have the same layout and function as the corresponding
- * COH 901 335 registers, just at different offset.
- */
-#define U300_571_PXPDIR                                        (0x04)
-#define U300_571_PXPDOR                                        (0x08)
-#define U300_571_PXPCR                                 (0x0C)
-#define U300_571_PXPER                                 (0x10)
-#define U300_571_PXIEV                                 (0x14)
-#define U300_571_PXIEN                                 (0x18)
-#define U300_571_PXIFR                                 (0x1C)
-#define U300_571_PXICR                                 (0x20)
-
-/* 8 bits per port, no version has more than 7 ports */
-#define U300_GPIO_PINS_PER_PORT 8
-#define U300_GPIO_MAX (U300_GPIO_PINS_PER_PORT * 7)
-
-struct u300_gpio {
-       struct gpio_chip chip;
-       struct list_head port_list;
-       struct clk *clk;
-       struct resource *memres;
-       void __iomem *base;
-       struct device *dev;
-       int irq_base;
-       u32 stride;
-       /* Register offsets */
-       u32 pcr;
-       u32 dor;
-       u32 dir;
-       u32 per;
-       u32 icr;
-       u32 ien;
-       u32 iev;
-};
-
-struct u300_gpio_port {
-       struct list_head node;
-       struct u300_gpio *gpio;
-       char name[8];
-       int irq;
-       int number;
-       u8 toggle_edge_mode;
-};
-
-/*
- * Macro to expand to read a specific register found in the "gpio"
- * struct. It requires the struct u300_gpio *gpio variable to exist in
- * its context. It calculates the port offset from the given pin
- * offset, muliplies by the port stride and adds the register offset
- * so it provides a pointer to the desired register.
- */
-#define U300_PIN_REG(pin, reg) \
-       (gpio->base + (pin >> 3) * gpio->stride + gpio->reg)
-
-/*
- * Provides a bitmask for a specific gpio pin inside an 8-bit GPIO
- * register.
- */
-#define U300_PIN_BIT(pin) \
-       (1 << (pin & 0x07))
-
-struct u300_gpio_confdata {
-       u16 bias_mode;
-       bool output;
-       int outval;
-};
-
-/* BS335 has seven ports of 8 bits each = GPIO pins 0..55 */
-#define BS335_GPIO_NUM_PORTS 7
-/* BS365 has five ports of 8 bits each = GPIO pins 0..39 */
-#define BS365_GPIO_NUM_PORTS 5
-
-#define U300_FLOATING_INPUT { \
-       .bias_mode = GPIO_U300_CONFIG_BIAS_FLOAT, \
-       .output = false, \
-}
-
-#define U300_PULL_UP_INPUT { \
-       .bias_mode = GPIO_U300_CONFIG_BIAS_PULL_UP, \
-       .output = false, \
-}
-
-#define U300_OUTPUT_LOW { \
-       .output = true, \
-       .outval = 0, \
-}
-
-#define U300_OUTPUT_HIGH { \
-       .output = true, \
-       .outval = 1, \
-}
-
-
-/* Initial configuration */
-static const struct __initdata u300_gpio_confdata
-bs335_gpio_config[BS335_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
-       /* Port 0, pins 0-7 */
-       {
-               U300_FLOATING_INPUT,
-               U300_OUTPUT_HIGH,
-               U300_FLOATING_INPUT,
-               U300_OUTPUT_LOW,
-               U300_OUTPUT_LOW,
-               U300_OUTPUT_LOW,
-               U300_OUTPUT_LOW,
-               U300_OUTPUT_LOW,
-       },
-       /* Port 1, pins 0-7 */
-       {
-               U300_OUTPUT_LOW,
-               U300_OUTPUT_LOW,
-               U300_OUTPUT_LOW,
-               U300_PULL_UP_INPUT,
-               U300_FLOATING_INPUT,
-               U300_OUTPUT_HIGH,
-               U300_OUTPUT_LOW,
-               U300_OUTPUT_LOW,
-       },
-       /* Port 2, pins 0-7 */
-       {
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_OUTPUT_LOW,
-               U300_PULL_UP_INPUT,
-               U300_OUTPUT_LOW,
-               U300_PULL_UP_INPUT,
-       },
-       /* Port 3, pins 0-7 */
-       {
-               U300_PULL_UP_INPUT,
-               U300_OUTPUT_LOW,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-       },
-       /* Port 4, pins 0-7 */
-       {
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-       },
-       /* Port 5, pins 0-7 */
-       {
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-       },
-       /* Port 6, pind 0-7 */
-       {
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-       }
-};
-
-static const struct __initdata u300_gpio_confdata
-bs365_gpio_config[BS365_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
-       /* Port 0, pins 0-7 */
-       {
-               U300_FLOATING_INPUT,
-               U300_OUTPUT_LOW,
-               U300_FLOATING_INPUT,
-               U300_OUTPUT_LOW,
-               U300_OUTPUT_LOW,
-               U300_OUTPUT_LOW,
-               U300_PULL_UP_INPUT,
-               U300_FLOATING_INPUT,
-       },
-       /* Port 1, pins 0-7 */
-       {
-               U300_OUTPUT_LOW,
-               U300_FLOATING_INPUT,
-               U300_OUTPUT_LOW,
-               U300_FLOATING_INPUT,
-               U300_FLOATING_INPUT,
-               U300_OUTPUT_HIGH,
-               U300_OUTPUT_LOW,
-               U300_OUTPUT_LOW,
-       },
-       /* Port 2, pins 0-7 */
-       {
-               U300_FLOATING_INPUT,
-               U300_PULL_UP_INPUT,
-               U300_OUTPUT_LOW,
-               U300_OUTPUT_LOW,
-               U300_PULL_UP_INPUT,
-               U300_PULL_UP_INPUT,
-               U300_PULL_UP_INPUT,
-               U300_PULL_UP_INPUT,
-       },
-       /* Port 3, pins 0-7 */
-       {
-               U300_PULL_UP_INPUT,
-               U300_PULL_UP_INPUT,
-               U300_PULL_UP_INPUT,
-               U300_PULL_UP_INPUT,
-               U300_PULL_UP_INPUT,
-               U300_PULL_UP_INPUT,
-               U300_PULL_UP_INPUT,
-               U300_PULL_UP_INPUT,
-       },
-       /* Port 4, pins 0-7 */
-       {
-               U300_PULL_UP_INPUT,
-               U300_PULL_UP_INPUT,
-               U300_PULL_UP_INPUT,
-               U300_PULL_UP_INPUT,
-               /* These 4 pins doesn't exist on DB3210 */
-               U300_OUTPUT_LOW,
-               U300_OUTPUT_LOW,
-               U300_OUTPUT_LOW,
-               U300_OUTPUT_LOW,
-       }
-};
-
-/**
- * to_u300_gpio() - get the pointer to u300_gpio
- * @chip: the gpio chip member of the structure u300_gpio
- */
-static inline struct u300_gpio *to_u300_gpio(struct gpio_chip *chip)
-{
-       return container_of(chip, struct u300_gpio, chip);
-}
-
-static int u300_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
-       struct u300_gpio *gpio = to_u300_gpio(chip);
-
-       return readl(U300_PIN_REG(offset, dir)) & U300_PIN_BIT(offset);
-}
-
-static void u300_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
-       struct u300_gpio *gpio = to_u300_gpio(chip);
-       unsigned long flags;
-       u32 val;
-
-       local_irq_save(flags);
-
-       val = readl(U300_PIN_REG(offset, dor));
-       if (value)
-               writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, dor));
-       else
-               writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, dor));
-
-       local_irq_restore(flags);
-}
-
-static int u300_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
-       struct u300_gpio *gpio = to_u300_gpio(chip);
-       unsigned long flags;
-       u32 val;
-
-       local_irq_save(flags);
-       val = readl(U300_PIN_REG(offset, pcr));
-       /* Mask out this pin, note 2 bits per setting */
-       val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << ((offset & 0x07) << 1));
-       writel(val, U300_PIN_REG(offset, pcr));
-       local_irq_restore(flags);
-       return 0;
-}
-
-static int u300_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
-                                     int value)
-{
-       struct u300_gpio *gpio = to_u300_gpio(chip);
-       unsigned long flags;
-       u32 oldmode;
-       u32 val;
-
-       local_irq_save(flags);
-       val = readl(U300_PIN_REG(offset, pcr));
-       /*
-        * Drive mode must be set by the special mode set function, set
-        * push/pull mode by default if no mode has been selected.
-        */
-       oldmode = val & (U300_GPIO_PXPCR_PIN_MODE_MASK <<
-                        ((offset & 0x07) << 1));
-       /* mode = 0 means input, else some mode is already set */
-       if (oldmode == 0) {
-               val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK <<
-                        ((offset & 0x07) << 1));
-               val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL
-                       << ((offset & 0x07) << 1));
-               writel(val, U300_PIN_REG(offset, pcr));
-       }
-       u300_gpio_set(chip, offset, value);
-       local_irq_restore(flags);
-       return 0;
-}
-
-static int u300_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
-       struct u300_gpio *gpio = to_u300_gpio(chip);
-       int retirq = gpio->irq_base + offset;
-
-       dev_dbg(gpio->dev, "request IRQ for GPIO %d, return %d\n", offset,
-               retirq);
-       return retirq;
-}
-
-static int u300_gpio_config(struct gpio_chip *chip, unsigned offset,
-                    u16 param, unsigned long *data)
-{
-       struct u300_gpio *gpio = to_u300_gpio(chip);
-       unsigned long flags;
-       u32 val;
-
-       local_irq_save(flags);
-       switch (param) {
-       case GPIO_U300_CONFIG_BIAS_UNKNOWN:
-       case GPIO_U300_CONFIG_BIAS_FLOAT:
-               val = readl(U300_PIN_REG(offset, per));
-               writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, per));
-               break;
-       case GPIO_U300_CONFIG_BIAS_PULL_UP:
-               val = readl(U300_PIN_REG(offset, per));
-               writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, per));
-               break;
-       case GPIO_U300_CONFIG_DRIVE_PUSH_PULL:
-               val = readl(U300_PIN_REG(offset, pcr));
-               val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
-                        << ((offset & 0x07) << 1));
-               val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL
-                       << ((offset & 0x07) << 1));
-               writel(val, U300_PIN_REG(offset, pcr));
-               break;
-       case GPIO_U300_CONFIG_DRIVE_OPEN_DRAIN:
-               val = readl(U300_PIN_REG(offset, pcr));
-               val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
-                        << ((offset & 0x07) << 1));
-               val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_DRAIN
-                       << ((offset & 0x07) << 1));
-               writel(val, U300_PIN_REG(offset, pcr));
-               break;
-       case GPIO_U300_CONFIG_DRIVE_OPEN_SOURCE:
-               val = readl(U300_PIN_REG(offset, pcr));
-               val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
-                        << ((offset & 0x07) << 1));
-               val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_SOURCE
-                       << ((offset & 0x07) << 1));
-               writel(val, U300_PIN_REG(offset, pcr));
-               break;
-       default:
-               local_irq_restore(flags);
-               dev_err(gpio->dev, "illegal configuration requested\n");
-               return -EINVAL;
-       }
-       local_irq_restore(flags);
-       return 0;
-}
-
-static struct gpio_chip u300_gpio_chip = {
-       .label                  = "u300-gpio-chip",
-       .owner                  = THIS_MODULE,
-       .get                    = u300_gpio_get,
-       .set                    = u300_gpio_set,
-       .direction_input        = u300_gpio_direction_input,
-       .direction_output       = u300_gpio_direction_output,
-       .to_irq                 = u300_gpio_to_irq,
-};
-
-static void u300_toggle_trigger(struct u300_gpio *gpio, unsigned offset)
-{
-       u32 val;
-
-       val = readl(U300_PIN_REG(offset, icr));
-       /* Set mode depending on state */
-       if (u300_gpio_get(&gpio->chip, offset)) {
-               /* High now, let's trigger on falling edge next then */
-               writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
-               dev_dbg(gpio->dev, "next IRQ on falling edge on pin %d\n",
-                       offset);
-       } else {
-               /* Low now, let's trigger on rising edge next then */
-               writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
-               dev_dbg(gpio->dev, "next IRQ on rising edge on pin %d\n",
-                       offset);
-       }
-}
-
-static int u300_gpio_irq_type(struct irq_data *d, unsigned trigger)
-{
-       struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
-       struct u300_gpio *gpio = port->gpio;
-       int offset = d->irq - gpio->irq_base;
-       u32 val;
-
-       if ((trigger & IRQF_TRIGGER_RISING) &&
-           (trigger & IRQF_TRIGGER_FALLING)) {
-               /*
-                * The GPIO block can only trigger on falling OR rising edges,
-                * not both. So we need to toggle the mode whenever the pin
-                * goes from one state to the other with a special state flag
-                */
-               dev_dbg(gpio->dev,
-                       "trigger on both rising and falling edge on pin %d\n",
-                       offset);
-               port->toggle_edge_mode |= U300_PIN_BIT(offset);
-               u300_toggle_trigger(gpio, offset);
-       } else if (trigger & IRQF_TRIGGER_RISING) {
-               dev_dbg(gpio->dev, "trigger on rising edge on pin %d\n",
-                       offset);
-               val = readl(U300_PIN_REG(offset, icr));
-               writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
-               port->toggle_edge_mode &= ~U300_PIN_BIT(offset);
-       } else if (trigger & IRQF_TRIGGER_FALLING) {
-               dev_dbg(gpio->dev, "trigger on falling edge on pin %d\n",
-                       offset);
-               val = readl(U300_PIN_REG(offset, icr));
-               writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
-               port->toggle_edge_mode &= ~U300_PIN_BIT(offset);
-       }
-
-       return 0;
-}
-
-static void u300_gpio_irq_enable(struct irq_data *d)
-{
-       struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
-       struct u300_gpio *gpio = port->gpio;
-       int offset = d->irq - gpio->irq_base;
-       u32 val;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       val = readl(U300_PIN_REG(offset, ien));
-       writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, ien));
-       local_irq_restore(flags);
-}
-
-static void u300_gpio_irq_disable(struct irq_data *d)
-{
-       struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
-       struct u300_gpio *gpio = port->gpio;
-       int offset = d->irq - gpio->irq_base;
-       u32 val;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       val = readl(U300_PIN_REG(offset, ien));
-       writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, ien));
-       local_irq_restore(flags);
-}
-
-static struct irq_chip u300_gpio_irqchip = {
-       .name                   = "u300-gpio-irqchip",
-       .irq_enable             = u300_gpio_irq_enable,
-       .irq_disable            = u300_gpio_irq_disable,
-       .irq_set_type           = u300_gpio_irq_type,
-
-};
-
-static void u300_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
-{
-       struct u300_gpio_port *port = irq_get_handler_data(irq);
-       struct u300_gpio *gpio = port->gpio;
-       int pinoffset = port->number << 3; /* get the right stride */
-       unsigned long val;
-
-       desc->irq_data.chip->irq_ack(&desc->irq_data);
-       /* Read event register */
-       val = readl(U300_PIN_REG(pinoffset, iev));
-       /* Mask relevant bits */
-       val &= 0xFFU; /* 8 bits per port */
-       /* ACK IRQ (clear event) */
-       writel(val, U300_PIN_REG(pinoffset, iev));
-
-       /* Call IRQ handler */
-       if (val != 0) {
-               int irqoffset;
-
-               for_each_set_bit(irqoffset, &val, U300_GPIO_PINS_PER_PORT) {
-                       int pin_irq = gpio->irq_base + (port->number << 3)
-                               + irqoffset;
-                       int offset = pinoffset + irqoffset;
-
-                       dev_dbg(gpio->dev, "GPIO IRQ %d on pin %d\n",
-                               pin_irq, offset);
-                       generic_handle_irq(pin_irq);
-                       /*
-                        * Triggering IRQ on both rising and falling edge
-                        * needs mockery
-                        */
-                       if (port->toggle_edge_mode & U300_PIN_BIT(offset))
-                               u300_toggle_trigger(gpio, offset);
-               }
-       }
-
-       desc->irq_data.chip->irq_unmask(&desc->irq_data);
-}
-
-static void __init u300_gpio_init_pin(struct u300_gpio *gpio,
-                                     int offset,
-                                     const struct u300_gpio_confdata *conf)
-{
-       /* Set mode: input or output */
-       if (conf->output) {
-               u300_gpio_direction_output(&gpio->chip, offset, conf->outval);
-
-               /* Deactivate bias mode for output */
-               u300_gpio_config(&gpio->chip, offset,
-                                GPIO_U300_CONFIG_BIAS_FLOAT,
-                                NULL);
-
-               /* Set drive mode for output */
-               u300_gpio_config(&gpio->chip, offset,
-                                GPIO_U300_CONFIG_DRIVE_PUSH_PULL, NULL);
-
-               dev_dbg(gpio->dev, "set up pin %d as output, value: %d\n",
-                       offset, conf->outval);
-       } else {
-               u300_gpio_direction_input(&gpio->chip, offset);
-
-               /* Always set output low on input pins */
-               u300_gpio_set(&gpio->chip, offset, 0);
-
-               /* Set bias mode for input */
-               u300_gpio_config(&gpio->chip, offset, conf->bias_mode, NULL);
-
-               dev_dbg(gpio->dev, "set up pin %d as input, bias: %04x\n",
-                       offset, conf->bias_mode);
-       }
-}
-
-static void __init u300_gpio_init_coh901571(struct u300_gpio *gpio,
-                                    struct u300_gpio_platform *plat)
-{
-       int i, j;
-
-       /* Write default config and values to all pins */
-       for (i = 0; i < plat->ports; i++) {
-               for (j = 0; j < 8; j++) {
-                       const struct u300_gpio_confdata *conf;
-                       int offset = (i*8) + j;
-
-                       if (plat->variant == U300_GPIO_COH901571_3_BS335)
-                               conf = &bs335_gpio_config[i][j];
-                       else if (plat->variant == U300_GPIO_COH901571_3_BS365)
-                               conf = &bs365_gpio_config[i][j];
-                       else
-                               break;
-
-                       u300_gpio_init_pin(gpio, offset, conf);
-               }
-       }
-}
-
-static inline void u300_gpio_free_ports(struct u300_gpio *gpio)
-{
-       struct u300_gpio_port *port;
-       struct list_head *p, *n;
-
-       list_for_each_safe(p, n, &gpio->port_list) {
-               port = list_entry(p, struct u300_gpio_port, node);
-               list_del(&port->node);
-               free_irq(port->irq, port);
-               kfree(port);
-       }
-}
-
-static int __init u300_gpio_probe(struct platform_device *pdev)
-{
-       struct u300_gpio_platform *plat = dev_get_platdata(&pdev->dev);
-       struct u300_gpio *gpio;
-       int err = 0;
-       int portno;
-       u32 val;
-       u32 ifr;
-       int i;
-
-       gpio = kzalloc(sizeof(struct u300_gpio), GFP_KERNEL);
-       if (gpio == NULL) {
-               dev_err(&pdev->dev, "failed to allocate memory\n");
-               return -ENOMEM;
-       }
-
-       gpio->chip = u300_gpio_chip;
-       gpio->chip.ngpio = plat->ports * U300_GPIO_PINS_PER_PORT;
-       gpio->irq_base = plat->gpio_irq_base;
-       gpio->chip.dev = &pdev->dev;
-       gpio->chip.base = plat->gpio_base;
-       gpio->dev = &pdev->dev;
-
-       /* Get GPIO clock */
-       gpio->clk = clk_get(gpio->dev, NULL);
-       if (IS_ERR(gpio->clk)) {
-               err = PTR_ERR(gpio->clk);
-               dev_err(gpio->dev, "could not get GPIO clock\n");
-               goto err_no_clk;
-       }
-       err = clk_enable(gpio->clk);
-       if (err) {
-               dev_err(gpio->dev, "could not enable GPIO clock\n");
-               goto err_no_clk_enable;
-       }
-
-       gpio->memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!gpio->memres) {
-               dev_err(gpio->dev, "could not get GPIO memory resource\n");
-               err = -ENODEV;
-               goto err_no_resource;
-       }
-
-       if (!request_mem_region(gpio->memres->start,
-                               resource_size(gpio->memres),
-                               "GPIO Controller")) {
-               err = -ENODEV;
-               goto err_no_ioregion;
-       }
-
-       gpio->base = ioremap(gpio->memres->start, resource_size(gpio->memres));
-       if (!gpio->base) {
-               err = -ENOMEM;
-               goto err_no_ioremap;
-       }
-
-       if (plat->variant == U300_GPIO_COH901335) {
-               dev_info(gpio->dev,
-                        "initializing GPIO Controller COH 901 335\n");
-               gpio->stride = U300_335_PORT_STRIDE;
-               gpio->pcr = U300_335_PXPCR;
-               gpio->dor = U300_335_PXPDOR;
-               gpio->dir = U300_335_PXPDIR;
-               gpio->per = U300_335_PXPER;
-               gpio->icr = U300_335_PXICR;
-               gpio->ien = U300_335_PXIEN;
-               gpio->iev = U300_335_PXIEV;
-               ifr = U300_335_PXIFR;
-
-               /* Turn on the GPIO block */
-               writel(U300_335_CR_BLOCK_CLOCK_ENABLE,
-                      gpio->base + U300_335_CR);
-       } else if (plat->variant == U300_GPIO_COH901571_3_BS335 ||
-                  plat->variant == U300_GPIO_COH901571_3_BS365) {
-               dev_info(gpio->dev,
-                        "initializing GPIO Controller COH 901 571/3\n");
-               gpio->stride = U300_571_PORT_STRIDE;
-               gpio->pcr = U300_571_PXPCR;
-               gpio->dor = U300_571_PXPDOR;
-               gpio->dir = U300_571_PXPDIR;
-               gpio->per = U300_571_PXPER;
-               gpio->icr = U300_571_PXICR;
-               gpio->ien = U300_571_PXIEN;
-               gpio->iev = U300_571_PXIEV;
-               ifr = U300_571_PXIFR;
-
-               val = readl(gpio->base + U300_571_CR);
-               dev_info(gpio->dev, "COH901571/3 block version: %d, " \
-                        "number of cores: %d totalling %d pins\n",
-                        ((val & 0x000001FC) >> 2),
-                        ((val & 0x0000FE00) >> 9),
-                        ((val & 0x0000FE00) >> 9) * 8);
-               writel(U300_571_CR_BLOCK_CLKRQ_ENABLE,
-                      gpio->base + U300_571_CR);
-               u300_gpio_init_coh901571(gpio, plat);
-       } else {
-               dev_err(gpio->dev, "unknown block variant\n");
-               err = -ENODEV;
-               goto err_unknown_variant;
-       }
-
-       /* Add each port with its IRQ separately */
-       INIT_LIST_HEAD(&gpio->port_list);
-       for (portno = 0 ; portno < plat->ports; portno++) {
-               struct u300_gpio_port *port =
-                       kmalloc(sizeof(struct u300_gpio_port), GFP_KERNEL);
-
-               if (!port) {
-                       dev_err(gpio->dev, "out of memory\n");
-                       err = -ENOMEM;
-                       goto err_no_port;
-               }
-
-               snprintf(port->name, 8, "gpio%d", portno);
-               port->number = portno;
-               port->gpio = gpio;
-
-               port->irq = platform_get_irq_byname(pdev,
-                                                   port->name);
-
-               dev_dbg(gpio->dev, "register IRQ %d for %s\n", port->irq,
-                       port->name);
-
-               irq_set_chained_handler(port->irq, u300_gpio_irq_handler);
-               irq_set_handler_data(port->irq, port);
-
-               /* For each GPIO pin set the unique IRQ handler */
-               for (i = 0; i < U300_GPIO_PINS_PER_PORT; i++) {
-                       int irqno = gpio->irq_base + (portno << 3) + i;
-
-                       dev_dbg(gpio->dev, "handler for IRQ %d on %s\n",
-                               irqno, port->name);
-                       irq_set_chip_and_handler(irqno, &u300_gpio_irqchip,
-                                                handle_simple_irq);
-                       set_irq_flags(irqno, IRQF_VALID);
-                       irq_set_chip_data(irqno, port);
-               }
-
-               /* Turns off irq force (test register) for this port */
-               writel(0x0, gpio->base + portno * gpio->stride + ifr);
-
-               list_add_tail(&port->node, &gpio->port_list);
-       }
-       dev_dbg(gpio->dev, "initialized %d GPIO ports\n", portno);
-
-       err = gpiochip_add(&gpio->chip);
-       if (err) {
-               dev_err(gpio->dev, "unable to add gpiochip: %d\n", err);
-               goto err_no_chip;
-       }
-
-       platform_set_drvdata(pdev, gpio);
-
-       return 0;
-
-err_no_chip:
-err_no_port:
-       u300_gpio_free_ports(gpio);
-err_unknown_variant:
-       iounmap(gpio->base);
-err_no_ioremap:
-       release_mem_region(gpio->memres->start, resource_size(gpio->memres));
-err_no_ioregion:
-err_no_resource:
-       clk_disable(gpio->clk);
-err_no_clk_enable:
-       clk_put(gpio->clk);
-err_no_clk:
-       kfree(gpio);
-       dev_info(&pdev->dev, "module ERROR:%d\n", err);
-       return err;
-}
-
-static int __exit u300_gpio_remove(struct platform_device *pdev)
-{
-       struct u300_gpio_platform *plat = dev_get_platdata(&pdev->dev);
-       struct u300_gpio *gpio = platform_get_drvdata(pdev);
-       int err;
-
-       /* Turn off the GPIO block */
-       if (plat->variant == U300_GPIO_COH901335)
-               writel(0x00000000U, gpio->base + U300_335_CR);
-       if (plat->variant == U300_GPIO_COH901571_3_BS335 ||
-           plat->variant == U300_GPIO_COH901571_3_BS365)
-               writel(0x00000000U, gpio->base + U300_571_CR);
-
-       err = gpiochip_remove(&gpio->chip);
-       if (err < 0) {
-               dev_err(gpio->dev, "unable to remove gpiochip: %d\n", err);
-               return err;
-       }
-       u300_gpio_free_ports(gpio);
-       iounmap(gpio->base);
-       release_mem_region(gpio->memres->start,
-                          resource_size(gpio->memres));
-       clk_disable(gpio->clk);
-       clk_put(gpio->clk);
-       platform_set_drvdata(pdev, NULL);
-       kfree(gpio);
-       return 0;
-}
-
-static struct platform_driver u300_gpio_driver = {
-       .driver         = {
-               .name   = "u300-gpio",
-       },
-       .remove         = __exit_p(u300_gpio_remove),
-};
-
-
-static int __init u300_gpio_init(void)
-{
-       return platform_driver_probe(&u300_gpio_driver, u300_gpio_probe);
-}
-
-static void __exit u300_gpio_exit(void)
-{
-       platform_driver_unregister(&u300_gpio_driver);
-}
-
-arch_initcall(u300_gpio_init);
-module_exit(u300_gpio_exit);
-
-MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
-MODULE_DESCRIPTION("ST-Ericsson AB COH 901 335/COH 901 571/3 GPIO driver");
-MODULE_LICENSE("GPL");
index 1368826ef28475c0f191510b7016dd1068b55187..2418429a98360fd996888e5ccce6189bad9d9a86 100644 (file)
@@ -162,3 +162,6 @@ config DRM_SAVAGE
 source "drivers/gpu/drm/exynos/Kconfig"
 
 source "drivers/gpu/drm/vmwgfx/Kconfig"
+
+source "drivers/gpu/drm/gma500/Kconfig"
+
index c0496f660707e97576893a9f71c9e9e1523fba06..0cde1b80fdb198a074d1b30e7cf8d330852c6fa8 100644 (file)
@@ -9,7 +9,7 @@ drm-y       :=  drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
                drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
                drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
                drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
-               drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
+               drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
                drm_crtc.o drm_modes.o drm_edid.o \
                drm_info.o drm_debugfs.o drm_encoder_slave.o \
                drm_trace_points.o drm_global.o drm_usb.o
@@ -36,4 +36,5 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
 obj-$(CONFIG_DRM_VIA)  +=via/
 obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
 obj-$(CONFIG_DRM_EXYNOS) +=exynos/
+obj-$(CONFIG_DRM_GMA500) += gma500/
 obj-y                  += i2c/
index 6d440fb894cf2acba53c863d20d10b16ccfa14b5..325365f6d355c3c12bf2386bb354bd095d873061 100644 (file)
@@ -154,8 +154,6 @@ int drm_getsareactx(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       mutex_unlock(&dev->struct_mutex);
-
        request->handle = NULL;
        list_for_each_entry(_entry, &dev->maplist, head) {
                if (_entry->map == map) {
@@ -164,6 +162,9 @@ int drm_getsareactx(struct drm_device *dev, void *data,
                        break;
                }
        }
+
+       mutex_unlock(&dev->struct_mutex);
+
        if (request->handle == NULL)
                return -EINVAL;
 
index 8323fc3898401ac957d9a677b947455f5a598b91..5e818a808acee995cc2f0e73e0abb84f6bb60894 100644 (file)
@@ -36,6 +36,7 @@
 #include "drmP.h"
 #include "drm_crtc.h"
 #include "drm_edid.h"
+#include "drm_fourcc.h"
 
 struct drm_prop_enum_list {
        int type;
@@ -324,6 +325,7 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
 {
        struct drm_device *dev = fb->dev;
        struct drm_crtc *crtc;
+       struct drm_plane *plane;
        struct drm_mode_set set;
        int ret;
 
@@ -340,6 +342,18 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
                }
        }
 
+       list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+               if (plane->fb == fb) {
+                       /* should turn off the crtc */
+                       ret = plane->funcs->disable_plane(plane);
+                       if (ret)
+                               DRM_ERROR("failed to disable plane with busy fb\n");
+                       /* disconnect the plane from the fb and crtc: */
+                       plane->fb = NULL;
+                       plane->crtc = NULL;
+               }
+       }
+
        drm_mode_object_put(dev, &fb->base);
        list_del(&fb->head);
        dev->mode_config.num_fb--;
@@ -540,6 +554,63 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
 }
 EXPORT_SYMBOL(drm_encoder_cleanup);
 
+int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+                  unsigned long possible_crtcs,
+                  const struct drm_plane_funcs *funcs,
+                  const uint32_t *formats, uint32_t format_count,
+                  bool priv)
+{
+       mutex_lock(&dev->mode_config.mutex);
+
+       plane->dev = dev;
+       drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
+       plane->funcs = funcs;
+       plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
+                                     GFP_KERNEL);
+       if (!plane->format_types) {
+               DRM_DEBUG_KMS("out of memory when allocating plane\n");
+               drm_mode_object_put(dev, &plane->base);
+               mutex_unlock(&dev->mode_config.mutex);
+               return -ENOMEM;
+       }
+
+       memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
+       plane->format_count = format_count;
+       plane->possible_crtcs = possible_crtcs;
+
+       /* private planes are not exposed to userspace, but depending on
+        * display hardware, might be convenient to allow sharing programming
+        * for the scanout engine with the crtc implementation.
+        */
+       if (!priv) {
+               list_add_tail(&plane->head, &dev->mode_config.plane_list);
+               dev->mode_config.num_plane++;
+       } else {
+               INIT_LIST_HEAD(&plane->head);
+       }
+
+       mutex_unlock(&dev->mode_config.mutex);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_plane_init);
+
+void drm_plane_cleanup(struct drm_plane *plane)
+{
+       struct drm_device *dev = plane->dev;
+
+       mutex_lock(&dev->mode_config.mutex);
+       kfree(plane->format_types);
+       drm_mode_object_put(dev, &plane->base);
+       /* if not added to a list, it must be a private plane */
+       if (!list_empty(&plane->head)) {
+               list_del(&plane->head);
+               dev->mode_config.num_plane--;
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_plane_cleanup);
+
 /**
  * drm_mode_create - create a new display mode
  * @dev: DRM device
@@ -871,6 +942,7 @@ void drm_mode_config_init(struct drm_device *dev)
        INIT_LIST_HEAD(&dev->mode_config.encoder_list);
        INIT_LIST_HEAD(&dev->mode_config.property_list);
        INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+       INIT_LIST_HEAD(&dev->mode_config.plane_list);
        idr_init(&dev->mode_config.crtc_idr);
 
        mutex_lock(&dev->mode_config.mutex);
@@ -947,6 +1019,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
        struct drm_encoder *encoder, *enct;
        struct drm_framebuffer *fb, *fbt;
        struct drm_property *property, *pt;
+       struct drm_plane *plane, *plt;
 
        list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
                                 head) {
@@ -971,6 +1044,10 @@ void drm_mode_config_cleanup(struct drm_device *dev)
                crtc->funcs->destroy(crtc);
        }
 
+       list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+                                head) {
+               plane->funcs->destroy(plane);
+       }
 }
 EXPORT_SYMBOL(drm_mode_config_cleanup);
 
@@ -1379,7 +1456,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
         */
        if ((out_resp->count_modes >= mode_count) && mode_count) {
                copied = 0;
-               mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
+               mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
                list_for_each_entry(mode, &connector->modes, head) {
                        drm_crtc_convert_to_umode(&u_mode, mode);
                        if (copy_to_user(mode_ptr + copied,
@@ -1394,8 +1471,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 
        if ((out_resp->count_props >= props_count) && props_count) {
                copied = 0;
-               prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
-               prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
+               prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
+               prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
                for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
                        if (connector->property_ids[i] != 0) {
                                if (put_user(connector->property_ids[i],
@@ -1417,7 +1494,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 
        if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
                copied = 0;
-               encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
+               encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
                for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
                        if (connector->encoder_ids[i] != 0) {
                                if (put_user(connector->encoder_ids[i],
@@ -1470,6 +1547,245 @@ out:
        return ret;
 }
 
+/**
+ * drm_mode_getplane_res - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Return an plane count and set of IDs.
+ */
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv)
+{
+       struct drm_mode_get_plane_res *plane_resp = data;
+       struct drm_mode_config *config;
+       struct drm_plane *plane;
+       uint32_t __user *plane_ptr;
+       int copied = 0, ret = 0;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       mutex_lock(&dev->mode_config.mutex);
+       config = &dev->mode_config;
+
+       /*
+        * This ioctl is called twice, once to determine how much space is
+        * needed, and the 2nd time to fill it.
+        */
+       if (config->num_plane &&
+           (plane_resp->count_planes >= config->num_plane)) {
+               plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
+
+               list_for_each_entry(plane, &config->plane_list, head) {
+                       if (put_user(plane->base.id, plane_ptr + copied)) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+                       copied++;
+               }
+       }
+       plane_resp->count_planes = config->num_plane;
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_getplane - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Return plane info, including formats supported, gamma size, any
+ * current fb, etc.
+ */
+int drm_mode_getplane(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       struct drm_mode_get_plane *plane_resp = data;
+       struct drm_mode_object *obj;
+       struct drm_plane *plane;
+       uint32_t __user *format_ptr;
+       int ret = 0;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, plane_resp->plane_id,
+                                  DRM_MODE_OBJECT_PLANE);
+       if (!obj) {
+               ret = -ENOENT;
+               goto out;
+       }
+       plane = obj_to_plane(obj);
+
+       if (plane->crtc)
+               plane_resp->crtc_id = plane->crtc->base.id;
+       else
+               plane_resp->crtc_id = 0;
+
+       if (plane->fb)
+               plane_resp->fb_id = plane->fb->base.id;
+       else
+               plane_resp->fb_id = 0;
+
+       plane_resp->plane_id = plane->base.id;
+       plane_resp->possible_crtcs = plane->possible_crtcs;
+       plane_resp->gamma_size = plane->gamma_size;
+
+       /*
+        * This ioctl is called twice, once to determine how much space is
+        * needed, and the 2nd time to fill it.
+        */
+       if (plane->format_count &&
+           (plane_resp->count_format_types >= plane->format_count)) {
+               format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
+               if (copy_to_user(format_ptr,
+                                plane->format_types,
+                                sizeof(uint32_t) * plane->format_count)) {
+                       ret = -EFAULT;
+                       goto out;
+               }
+       }
+       plane_resp->count_format_types = plane->format_count;
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_setplane - set up or tear down an plane
+ * @dev: DRM device
+ * @data: ioctl data*
+ * @file_prive: DRM file info
+ *
+ * Set plane info, including placement, fb, scaling, and other factors.
+ * Or pass a NULL fb to disable.
+ */
+int drm_mode_setplane(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       struct drm_mode_set_plane *plane_req = data;
+       struct drm_mode_object *obj;
+       struct drm_plane *plane;
+       struct drm_crtc *crtc;
+       struct drm_framebuffer *fb;
+       int ret = 0;
+       unsigned int fb_width, fb_height;
+       int i;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       /*
+        * First, find the plane, crtc, and fb objects.  If not available,
+        * we don't bother to call the driver.
+        */
+       obj = drm_mode_object_find(dev, plane_req->plane_id,
+                                  DRM_MODE_OBJECT_PLANE);
+       if (!obj) {
+               DRM_DEBUG_KMS("Unknown plane ID %d\n",
+                             plane_req->plane_id);
+               ret = -ENOENT;
+               goto out;
+       }
+       plane = obj_to_plane(obj);
+
+       /* No fb means shut it down */
+       if (!plane_req->fb_id) {
+               plane->funcs->disable_plane(plane);
+               plane->crtc = NULL;
+               plane->fb = NULL;
+               goto out;
+       }
+
+       obj = drm_mode_object_find(dev, plane_req->crtc_id,
+                                  DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+                             plane_req->crtc_id);
+               ret = -ENOENT;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       obj = drm_mode_object_find(dev, plane_req->fb_id,
+                                  DRM_MODE_OBJECT_FB);
+       if (!obj) {
+               DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+                             plane_req->fb_id);
+               ret = -ENOENT;
+               goto out;
+       }
+       fb = obj_to_fb(obj);
+
+       /* Check whether this plane supports the fb pixel format. */
+       for (i = 0; i < plane->format_count; i++)
+               if (fb->pixel_format == plane->format_types[i])
+                       break;
+       if (i == plane->format_count) {
+               DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       fb_width = fb->width << 16;
+       fb_height = fb->height << 16;
+
+       /* Make sure source coordinates are inside the fb. */
+       if (plane_req->src_w > fb_width ||
+           plane_req->src_x > fb_width - plane_req->src_w ||
+           plane_req->src_h > fb_height ||
+           plane_req->src_y > fb_height - plane_req->src_h) {
+               DRM_DEBUG_KMS("Invalid source coordinates "
+                             "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+                             plane_req->src_w >> 16,
+                             ((plane_req->src_w & 0xffff) * 15625) >> 10,
+                             plane_req->src_h >> 16,
+                             ((plane_req->src_h & 0xffff) * 15625) >> 10,
+                             plane_req->src_x >> 16,
+                             ((plane_req->src_x & 0xffff) * 15625) >> 10,
+                             plane_req->src_y >> 16,
+                             ((plane_req->src_y & 0xffff) * 15625) >> 10);
+               ret = -ENOSPC;
+               goto out;
+       }
+
+       /* Give drivers some help against integer overflows */
+       if (plane_req->crtc_w > INT_MAX ||
+           plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
+           plane_req->crtc_h > INT_MAX ||
+           plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
+               DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+                             plane_req->crtc_w, plane_req->crtc_h,
+                             plane_req->crtc_x, plane_req->crtc_y);
+               ret = -ERANGE;
+               goto out;
+       }
+
+       ret = plane->funcs->update_plane(plane, crtc, fb,
+                                        plane_req->crtc_x, plane_req->crtc_y,
+                                        plane_req->crtc_w, plane_req->crtc_h,
+                                        plane_req->src_x, plane_req->src_y,
+                                        plane_req->src_w, plane_req->src_h);
+       if (!ret) {
+               plane->crtc = crtc;
+               plane->fb = fb;
+       }
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+
+       return ret;
+}
+
 /**
  * drm_mode_setcrtc - set CRTC configuration
  * @inode: inode from the ioctl
@@ -1576,7 +1892,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
                }
 
                for (i = 0; i < crtc_req->count_connectors; i++) {
-                       set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
+                       set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
                        if (get_user(out_id, &set_connectors_ptr[i])) {
                                ret = -EFAULT;
                                goto out;
@@ -1625,10 +1941,8 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       if (!req->flags) {
-               DRM_ERROR("no operation set\n");
+       if (!req->flags)
                return -EINVAL;
-       }
 
        mutex_lock(&dev->mode_config.mutex);
        obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
@@ -1641,7 +1955,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
 
        if (req->flags & DRM_MODE_CURSOR_BO) {
                if (!crtc->funcs->cursor_set) {
-                       DRM_ERROR("crtc does not support cursor\n");
                        ret = -ENXIO;
                        goto out;
                }
@@ -1654,7 +1967,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
                if (crtc->funcs->cursor_move) {
                        ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
                } else {
-                       DRM_ERROR("crtc does not support cursor\n");
                        ret = -EFAULT;
                        goto out;
                }
@@ -1664,6 +1976,42 @@ out:
        return ret;
 }
 
+/* Original addfb only supported RGB formats, so figure out which one */
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+{
+       uint32_t fmt;
+
+       switch (bpp) {
+       case 8:
+               fmt = DRM_FORMAT_RGB332;
+               break;
+       case 16:
+               if (depth == 15)
+                       fmt = DRM_FORMAT_XRGB1555;
+               else
+                       fmt = DRM_FORMAT_RGB565;
+               break;
+       case 24:
+               fmt = DRM_FORMAT_RGB888;
+               break;
+       case 32:
+               if (depth == 24)
+                       fmt = DRM_FORMAT_XRGB8888;
+               else if (depth == 30)
+                       fmt = DRM_FORMAT_XRGB2101010;
+               else
+                       fmt = DRM_FORMAT_ARGB8888;
+               break;
+       default:
+               DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
+               fmt = DRM_FORMAT_XRGB8888;
+               break;
+       }
+
+       return fmt;
+}
+EXPORT_SYMBOL(drm_mode_legacy_fb_format);
+
 /**
  * drm_mode_addfb - add an FB to the graphics configuration
  * @inode: inode from the ioctl
@@ -1684,7 +2032,140 @@ out:
 int drm_mode_addfb(struct drm_device *dev,
                   void *data, struct drm_file *file_priv)
 {
-       struct drm_mode_fb_cmd *r = data;
+       struct drm_mode_fb_cmd *or = data;
+       struct drm_mode_fb_cmd2 r = {};
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_framebuffer *fb;
+       int ret = 0;
+
+       /* Use new struct with format internally */
+       r.fb_id = or->fb_id;
+       r.width = or->width;
+       r.height = or->height;
+       r.pitches[0] = or->pitch;
+       r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+       r.handles[0] = or->handle;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       if ((config->min_width > r.width) || (r.width > config->max_width))
+               return -EINVAL;
+
+       if ((config->min_height > r.height) || (r.height > config->max_height))
+               return -EINVAL;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       /* TODO check buffer is sufficiently large */
+       /* TODO setup destructor callback */
+
+       fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
+       if (IS_ERR(fb)) {
+               DRM_ERROR("could not create framebuffer\n");
+               ret = PTR_ERR(fb);
+               goto out;
+       }
+
+       or->fb_id = fb->base.id;
+       list_add(&fb->filp_head, &file_priv->fbs);
+       DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+static int format_check(struct drm_mode_fb_cmd2 *r)
+{
+       uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
+
+       switch (format) {
+       case DRM_FORMAT_C8:
+       case DRM_FORMAT_RGB332:
+       case DRM_FORMAT_BGR233:
+       case DRM_FORMAT_XRGB4444:
+       case DRM_FORMAT_XBGR4444:
+       case DRM_FORMAT_RGBX4444:
+       case DRM_FORMAT_BGRX4444:
+       case DRM_FORMAT_ARGB4444:
+       case DRM_FORMAT_ABGR4444:
+       case DRM_FORMAT_RGBA4444:
+       case DRM_FORMAT_BGRA4444:
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_XBGR1555:
+       case DRM_FORMAT_RGBX5551:
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_ARGB1555:
+       case DRM_FORMAT_ABGR1555:
+       case DRM_FORMAT_RGBA5551:
+       case DRM_FORMAT_BGRA5551:
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_BGR565:
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_BGR888:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_RGBX8888:
+       case DRM_FORMAT_BGRX8888:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_ABGR8888:
+       case DRM_FORMAT_RGBA8888:
+       case DRM_FORMAT_BGRA8888:
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_RGBX1010102:
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_ARGB2101010:
+       case DRM_FORMAT_ABGR2101010:
+       case DRM_FORMAT_RGBA1010102:
+       case DRM_FORMAT_BGRA1010102:
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+       case DRM_FORMAT_AYUV:
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+       case DRM_FORMAT_YUV411:
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YUV444:
+       case DRM_FORMAT_YVU444:
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Add a new FB to the specified CRTC, given a user request with format.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb2(struct drm_device *dev,
+                   void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_fb_cmd2 *r = data;
        struct drm_mode_config *config = &dev->mode_config;
        struct drm_framebuffer *fb;
        int ret = 0;
@@ -1693,18 +2174,23 @@ int drm_mode_addfb(struct drm_device *dev,
                return -EINVAL;
 
        if ((config->min_width > r->width) || (r->width > config->max_width)) {
-               DRM_ERROR("mode new framebuffer width not within limits\n");
+               DRM_ERROR("bad framebuffer width %d, should be >= %d && <= %d\n",
+                         r->width, config->min_width, config->max_width);
                return -EINVAL;
        }
        if ((config->min_height > r->height) || (r->height > config->max_height)) {
-               DRM_ERROR("mode new framebuffer height not within limits\n");
+               DRM_ERROR("bad framebuffer height %d, should be >= %d && <= %d\n",
+                         r->height, config->min_height, config->max_height);
                return -EINVAL;
        }
 
-       mutex_lock(&dev->mode_config.mutex);
+       ret = format_check(r);
+       if (ret) {
+               DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format);
+               return ret;
+       }
 
-       /* TODO check buffer is sufficiently large */
-       /* TODO setup destructor callback */
+       mutex_lock(&dev->mode_config.mutex);
 
        fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
        if (IS_ERR(fb)) {
@@ -1756,7 +2242,6 @@ int drm_mode_rmfb(struct drm_device *dev,
        obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
        /* TODO check that we really get a framebuffer back. */
        if (!obj) {
-               DRM_ERROR("mode invalid framebuffer id\n");
                ret = -EINVAL;
                goto out;
        }
@@ -1767,7 +2252,6 @@ int drm_mode_rmfb(struct drm_device *dev,
                        found = 1;
 
        if (!found) {
-               DRM_ERROR("tried to remove a fb that we didn't own\n");
                ret = -EINVAL;
                goto out;
        }
@@ -1814,7 +2298,6 @@ int drm_mode_getfb(struct drm_device *dev,
        mutex_lock(&dev->mode_config.mutex);
        obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
        if (!obj) {
-               DRM_ERROR("invalid framebuffer id\n");
                ret = -EINVAL;
                goto out;
        }
@@ -1824,7 +2307,7 @@ int drm_mode_getfb(struct drm_device *dev,
        r->width = fb->width;
        r->depth = fb->depth;
        r->bpp = fb->bits_per_pixel;
-       r->pitch = fb->pitch;
+       r->pitch = fb->pitches[0];
        fb->funcs->create_handle(fb, file_priv, &r->handle);
 
 out:
@@ -1850,14 +2333,13 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
        mutex_lock(&dev->mode_config.mutex);
        obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
        if (!obj) {
-               DRM_ERROR("invalid framebuffer id\n");
                ret = -EINVAL;
                goto out_err1;
        }
        fb = obj_to_fb(obj);
 
        num_clips = r->num_clips;
-       clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+       clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
 
        if (!num_clips != !clips_ptr) {
                ret = -EINVAL;
@@ -2253,7 +2735,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
        struct drm_property_enum *prop_enum;
        struct drm_mode_property_enum __user *enum_ptr;
        struct drm_property_blob *prop_blob;
-       uint32_t *blob_id_ptr;
+       uint32_t __user *blob_id_ptr;
        uint64_t __user *values_ptr;
        uint32_t __user *blob_length_ptr;
 
@@ -2283,7 +2765,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
        out_resp->flags = property->flags;
 
        if ((out_resp->count_values >= value_count) && value_count) {
-               values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
+               values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
                for (i = 0; i < value_count; i++) {
                        if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
                                ret = -EFAULT;
@@ -2296,7 +2778,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
        if (property->flags & DRM_MODE_PROP_ENUM) {
                if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
                        copied = 0;
-                       enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
+                       enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
                        list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
 
                                if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
@@ -2318,8 +2800,8 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
        if (property->flags & DRM_MODE_PROP_BLOB) {
                if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
                        copied = 0;
-                       blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
-                       blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
+                       blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
+                       blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
 
                        list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
                                if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
@@ -2380,7 +2862,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
        struct drm_mode_get_blob *out_resp = data;
        struct drm_property_blob *blob;
        int ret = 0;
-       void *blob_ptr;
+       void __user *blob_ptr;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
@@ -2394,7 +2876,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
        blob = obj_to_blob(obj);
 
        if (out_resp->length == blob->length) {
-               blob_ptr = (void *)(unsigned long)out_resp->data;
+               blob_ptr = (void __user *)(unsigned long)out_resp->data;
                if (copy_to_user(blob_ptr, blob->data, blob->length)){
                        ret = -EFAULT;
                        goto done;
@@ -2788,3 +3270,71 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
 
        return dev->driver->dumb_destroy(file_priv, dev, args->handle);
 }
+
+/*
+ * Just need to support RGB formats here for compat with code that doesn't
+ * use pixel formats directly yet.
+ */
+void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+                         int *bpp)
+{
+       switch (format) {
+       case DRM_FORMAT_RGB332:
+       case DRM_FORMAT_BGR233:
+               *depth = 8;
+               *bpp = 8;
+               break;
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_XBGR1555:
+       case DRM_FORMAT_RGBX5551:
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_ARGB1555:
+       case DRM_FORMAT_ABGR1555:
+       case DRM_FORMAT_RGBA5551:
+       case DRM_FORMAT_BGRA5551:
+               *depth = 15;
+               *bpp = 16;
+               break;
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_BGR565:
+               *depth = 16;
+               *bpp = 16;
+               break;
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_BGR888:
+               *depth = 24;
+               *bpp = 24;
+               break;
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_RGBX8888:
+       case DRM_FORMAT_BGRX8888:
+               *depth = 24;
+               *bpp = 32;
+               break;
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_RGBX1010102:
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_ARGB2101010:
+       case DRM_FORMAT_ABGR2101010:
+       case DRM_FORMAT_RGBA1010102:
+       case DRM_FORMAT_BGRA1010102:
+               *depth = 30;
+               *bpp = 32;
+               break;
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_ABGR8888:
+       case DRM_FORMAT_RGBA8888:
+       case DRM_FORMAT_BGRA8888:
+               *depth = 32;
+               *bpp = 32;
+               break;
+       default:
+               DRM_DEBUG_KMS("unsupported pixel format\n");
+               *depth = 0;
+               *bpp = 0;
+               break;
+       }
+}
+EXPORT_SYMBOL(drm_fb_get_bpp_depth);
index d2619d72ceceb16d7b88a33b09708219483019b4..84a4a809793ff7bcf7fdf68e9d28555eb5e5b8ba 100644 (file)
@@ -34,6 +34,7 @@
 
 #include "drmP.h"
 #include "drm_crtc.h"
+#include "drm_fourcc.h"
 #include "drm_crtc_helper.h"
 #include "drm_fb_helper.h"
 
@@ -710,7 +711,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                        for (i = 0; i < set->num_connectors; i++) {
                                DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
                                              drm_get_connector_name(set->connectors[i]));
-                               set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
+                               set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
                        }
                }
                drm_helper_disable_unused_functions(dev);
@@ -847,13 +848,19 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
 EXPORT_SYMBOL(drm_helper_connector_dpms);
 
 int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
-                                  struct drm_mode_fb_cmd *mode_cmd)
+                                  struct drm_mode_fb_cmd2 *mode_cmd)
 {
+       int i;
+
        fb->width = mode_cmd->width;
        fb->height = mode_cmd->height;
-       fb->pitch = mode_cmd->pitch;
-       fb->bits_per_pixel = mode_cmd->bpp;
-       fb->depth = mode_cmd->depth;
+       for (i = 0; i < 4; i++) {
+               fb->pitches[i] = mode_cmd->pitches[i];
+               fb->offsets[i] = mode_cmd->offsets[i];
+       }
+       drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
+                                   &fb->bits_per_pixel);
+       fb->pixel_format = mode_cmd->pixel_format;
 
        return 0;
 }
@@ -1008,3 +1015,36 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
                queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
 }
 EXPORT_SYMBOL(drm_helper_hpd_irq_event);
+
+
+/**
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The number of planes used by the specified pixel format.
+ */
+int drm_format_num_planes(uint32_t format)
+{
+       switch (format) {
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+       case DRM_FORMAT_YUV411:
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YUV444:
+       case DRM_FORMAT_YVU444:
+               return 3;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+               return 2;
+       default:
+               return 1;
+       }
+}
+EXPORT_SYMBOL(drm_format_num_planes);
index 40c187c60f44fcb2ea45bedede0fe003f9e8ebb2..ebf7d3f68fc4c33446897be3bdee3dedde0038e6 100644 (file)
@@ -61,14 +61,14 @@ static int drm_version(struct drm_device *dev, void *data,
 
 /** Ioctl table */
 static struct drm_ioctl_desc drm_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
        DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
-       DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
-       DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
-       DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
 
        DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -136,8 +136,11 @@ static struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
 
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
@@ -150,6 +153,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
index 3e927ce7557d6a41f7bc1a6e27b088be127006d0..ece03fc2d3864aa25ac7bebccfa916cfacc6fd07 100644 (file)
@@ -508,25 +508,10 @@ static void
 cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
 {
        int i, n = 0;
-       u8 rev = ext[0x01], d = ext[0x02];
+       u8 d = ext[0x02];
        u8 *det_base = ext + d;
 
-       switch (rev) {
-       case 0:
-               /* can't happen */
-               return;
-       case 1:
-               /* have to infer how many blocks we have, check pixel clock */
-               for (i = 0; i < 6; i++)
-                       if (det_base[18*i] || det_base[18*i+1])
-                               n++;
-               break;
-       default:
-               /* explicit count */
-               n = min(ext[0x03] & 0x0f, 6);
-               break;
-       }
-
+       n = (127 - d) / 18;
        for (i = 0; i < n; i++)
                cb((struct detailed_timing *)(det_base + 18 * i), closure);
 }
@@ -1319,6 +1304,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
 
 #define HDMI_IDENTIFIER 0x000C03
 #define AUDIO_BLOCK    0x01
+#define VIDEO_BLOCK     0x02
 #define VENDOR_BLOCK    0x03
 #define SPEAKER_BLOCK  0x04
 #define EDID_BASIC_AUDIO       (1 << 6)
@@ -1349,6 +1335,47 @@ u8 *drm_find_cea_extension(struct edid *edid)
 }
 EXPORT_SYMBOL(drm_find_cea_extension);
 
+static int
+do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
+{
+       struct drm_device *dev = connector->dev;
+       u8 * mode, cea_mode;
+       int modes = 0;
+
+       for (mode = db; mode < db + len; mode++) {
+               cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
+               if (cea_mode < drm_num_cea_modes) {
+                       struct drm_display_mode *newmode;
+                       newmode = drm_mode_duplicate(dev,
+                                                    &edid_cea_modes[cea_mode]);
+                       if (newmode) {
+                               drm_mode_probed_add(connector, newmode);
+                               modes++;
+                       }
+               }
+       }
+
+       return modes;
+}
+
+static int
+add_cea_modes(struct drm_connector *connector, struct edid *edid)
+{
+       u8 * cea = drm_find_cea_extension(edid);
+       u8 * db, dbl;
+       int modes = 0;
+
+       if (cea && cea[1] >= 3) {
+               for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
+                       dbl = db[0] & 0x1f;
+                       if (((db[0] & 0xe0) >> 5) == VIDEO_BLOCK)
+                               modes += do_cea_modes (connector, db+1, dbl);
+               }
+       }
+
+       return modes;
+}
+
 static void
 parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
 {
@@ -1432,26 +1459,29 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
        eld[18] = edid->prod_code[0];
        eld[19] = edid->prod_code[1];
 
-       for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
-               dbl = db[0] & 0x1f;
-
-               switch ((db[0] & 0xe0) >> 5) {
-               case AUDIO_BLOCK:       /* Audio Data Block, contains SADs */
-                       sad_count = dbl / 3;
-                       memcpy(eld + 20 + mnl, &db[1], dbl);
-                       break;
-               case SPEAKER_BLOCK:     /* Speaker Allocation Data Block */
-                       eld[7] = db[1];
-                       break;
-               case VENDOR_BLOCK:
-                       /* HDMI Vendor-Specific Data Block */
-                       if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
-                               parse_hdmi_vsdb(connector, db);
-                       break;
-               default:
-                       break;
+       if (cea[1] >= 3)
+               for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
+                       dbl = db[0] & 0x1f;
+                       
+                       switch ((db[0] & 0xe0) >> 5) {
+                       case AUDIO_BLOCK:
+                               /* Audio Data Block, contains SADs */
+                               sad_count = dbl / 3;
+                               memcpy(eld + 20 + mnl, &db[1], dbl);
+                               break;
+                       case SPEAKER_BLOCK:
+                                /* Speaker Allocation Data Block */
+                               eld[7] = db[1];
+                               break;
+                       case VENDOR_BLOCK:
+                               /* HDMI Vendor-Specific Data Block */
+                               if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
+                                       parse_hdmi_vsdb(connector, db);
+                               break;
+                       default:
+                               break;
+                       }
                }
-       }
        eld[5] |= sad_count << 4;
        eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
 
@@ -1722,6 +1752,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
        num_modes += add_standard_modes(connector, edid);
        num_modes += add_established_modes(connector, edid);
        num_modes += add_inferred_modes(connector, edid);
+       num_modes += add_cea_modes(connector, edid);
 
        if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
                edid_fixup_preferred(connector, quirks);
index 5f2064489fd50522deedd9a3feb81fda55d349d7..a91ffb1172208beea3b85b5d04ff0def347f89ef 100644 (file)
@@ -378,3 +378,287 @@ static const struct {
        { 1920, 1440, 75, 0 },
 };
 static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
+
+/*
+ * Probably taken from CEA-861 spec.
+ * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode edid_cea_modes[] = {
+       /* 640x480@60Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+                  752, 800, 0, 480, 490, 492, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 720x480@60Hz */
+       { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+                  798, 858, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 720x480@60Hz */
+       { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+                  798, 858, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1280x720@60Hz */
+       { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+                  1430, 1650, 0, 720, 725, 730, 750, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1080i@60Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+                  2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x480i@60Hz */
+       { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+                  1602, 1716, 0, 480, 488, 494, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x480i@60Hz */
+       { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+                  1602, 1716, 0, 480, 488, 494, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x240@60Hz */
+       { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+                  1602, 1716, 0, 240, 244, 247, 262, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x240@60Hz */
+       { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+                  1602, 1716, 0, 240, 244, 247, 262, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 2880x480i@60Hz */
+       { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+                  3204, 3432, 0, 480, 488, 494, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 2880x480i@60Hz */
+       { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+                  3204, 3432, 0, 480, 488, 494, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 2880x240@60Hz */
+       { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+                  3204, 3432, 0, 240, 244, 247, 262, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 2880x240@60Hz */
+       { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+                  3204, 3432, 0, 240, 244, 247, 262, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x480@60Hz */
+       { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+                  1596, 1716, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x480@60Hz */
+       { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+                  1596, 1716, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1920x1080@60Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+                  2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 720x576@50Hz */
+       { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+                  796, 864, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 720x576@50Hz */
+       { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+                  796, 864, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1280x720@50Hz */
+       { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+                  1760, 1980, 0, 720, 725, 730, 750, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1080i@50Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+                  2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x576i@50Hz */
+       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+                  1590, 1728, 0, 576, 580, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x576i@50Hz */
+       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+                  1590, 1728, 0, 576, 580, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x288@50Hz */
+       { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+                  1590, 1728, 0, 288, 290, 293, 312, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x288@50Hz */
+       { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+                  1590, 1728, 0, 288, 290, 293, 312, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 2880x576i@50Hz */
+       { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+                  3180, 3456, 0, 576, 580, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 2880x576i@50Hz */
+       { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+                  3180, 3456, 0, 576, 580, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 2880x288@50Hz */
+       { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+                  3180, 3456, 0, 288, 290, 293, 312, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 2880x288@50Hz */
+       { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+                  3180, 3456, 0, 288, 290, 293, 312, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x576@50Hz */
+       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+                  1592, 1728, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x576@50Hz */
+       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+                  1592, 1728, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1920x1080@50Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+                  2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1080@24Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+                  2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1080@25Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+                  2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1080@30Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+                  2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 2880x480@60Hz */
+       { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+                  3192, 3432, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 2880x480@60Hz */
+       { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+                  3192, 3432, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 2880x576@50Hz */
+       { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+                  3184, 3456, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 2880x576@50Hz */
+       { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+                  3184, 3456, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1920x1080i@50Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+                  2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1920x1080i@100Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+                  2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1280x720@100Hz */
+       { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+                  1760, 1980, 0, 720, 725, 730, 750, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 720x576@100Hz */
+       { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+                  796, 864, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 720x576@100Hz */
+       { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+                  796, 864, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x576i@100Hz */
+       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+                  1590, 1728, 0, 576, 580, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x576i@100Hz */
+       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+                  1590, 1728, 0, 576, 580, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1920x1080i@120Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+                  2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1280x720@120Hz */
+       { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+                  1430, 1650, 0, 720, 725, 730, 750, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 720x480@120Hz */
+       { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+                  798, 858, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 720x480@120Hz */
+       { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+                  798, 858, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x480i@120Hz */
+       { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+                  1602, 1716, 0, 480, 488, 494, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x480i@120Hz */
+       { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+                  1602, 1716, 0, 480, 488, 494, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 720x576@200Hz */
+       { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+                  796, 864, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 720x576@200Hz */
+       { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+                  796, 864, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x576i@200Hz */
+       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+                  1590, 1728, 0, 576, 580, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x576i@200Hz */
+       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+                  1590, 1728, 0, 576, 580, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 720x480@240Hz */
+       { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+                  798, 858, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 720x480@240Hz */
+       { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+                  798, 858, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x480i@240 */
+       { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+                  1602, 1716, 0, 480, 488, 494, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x480i@240 */
+       { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+                  1602, 1716, 0, 480, 488, 494, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1280x720@24Hz */
+       { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+                  3080, 3300, 0, 720, 725, 730, 750, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x720@25Hz */
+       { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+                  3740, 3960, 0, 720, 725, 730, 750, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x720@30Hz */
+       { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+                  3080, 3300, 0, 720, 725, 730, 750, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1080@120Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+                  2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1080@100Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+                  2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+static const int drm_num_cea_modes =
+       sizeof (edid_cea_modes) / sizeof (edid_cea_modes[0]);
index 80fe39d98b0c2e2ca7a22b762d3d9dcf87781d1c..aada26f63decc7a91d2803079dc9795d053ecdc2 100644 (file)
@@ -255,6 +255,13 @@ bool drm_fb_helper_force_kernel_mode(void)
 int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
                        void *panic_str)
 {
+       /*
+        * It's a waste of time and effort to switch back to text console
+        * if the kernel should reboot before panic messages can be seen.
+        */
+       if (panic_timeout < 0)
+               return 0;
+
        printk(KERN_ERR "panic occurred, switching back to text console\n");
        return drm_fb_helper_force_kernel_mode();
 }
index 4911e1d1dcf2a60d527235c35da7a33edc864be4..c00cf154cc0bbd81ce3c807fde6bffb2a5ff777f 100644 (file)
@@ -182,7 +182,7 @@ int drm_stub_open(struct inode *inode, struct file *filp)
                goto out;
 
        old_fops = filp->f_op;
-       filp->f_op = fops_get(&dev->driver->fops);
+       filp->f_op = fops_get(dev->driver->fops);
        if (filp->f_op == NULL) {
                filp->f_op = old_fops;
                goto out;
index 904d7e9c8e4752ffa738ee33b0f884e305825a12..956fd38d7c9ed6da526479cf8b1d91ad2038c22e 100644 (file)
@@ -158,14 +158,11 @@ int drm_getmap(struct drm_device *dev, void *data,
        int i;
 
        idx = map->offset;
-
-       mutex_lock(&dev->struct_mutex);
-       if (idx < 0) {
-               mutex_unlock(&dev->struct_mutex);
+       if (idx < 0)
                return -EINVAL;
-       }
 
        i = 0;
+       mutex_lock(&dev->struct_mutex);
        list_for_each(list, &dev->maplist) {
                if (i == idx) {
                        r_list = list_entry(list, struct drm_map_list, head);
@@ -211,9 +208,9 @@ int drm_getclient(struct drm_device *dev, void *data,
        int i;
 
        idx = client->idx;
-       mutex_lock(&dev->struct_mutex);
-
        i = 0;
+
+       mutex_lock(&dev->struct_mutex);
        list_for_each_entry(pt, &dev->filelist, lhead) {
                if (i++ >= idx) {
                        client->auth = pt->authenticated;
@@ -249,8 +246,6 @@ int drm_getstats(struct drm_device *dev, void *data,
 
        memset(stats, 0, sizeof(*stats));
 
-       mutex_lock(&dev->struct_mutex);
-
        for (i = 0; i < dev->counters; i++) {
                if (dev->types[i] == _DRM_STAT_LOCK)
                        stats->data[i].value =
@@ -262,8 +257,6 @@ int drm_getstats(struct drm_device *dev, void *data,
 
        stats->count = dev->counters;
 
-       mutex_unlock(&dev->struct_mutex);
-
        return 0;
 }
 
index 632ae243ede0b9f1df4b42f2e96b30479be4e05a..c79c713eeba0ec62aa759dcb6de69404692f5758 100644 (file)
@@ -33,6 +33,7 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <linux/export.h>
 #include "drmP.h"
 
 static int drm_notifier(void *priv);
@@ -345,6 +346,7 @@ void drm_idlelock_take(struct drm_lock_data *lock_data)
        }
        spin_unlock_bh(&lock_data->spinlock);
 }
+EXPORT_SYMBOL(drm_idlelock_take);
 
 void drm_idlelock_release(struct drm_lock_data *lock_data)
 {
@@ -364,6 +366,7 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
        }
        spin_unlock_bh(&lock_data->spinlock);
 }
+EXPORT_SYMBOL(drm_idlelock_release);
 
 int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
 {
diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
deleted file mode 100644 (file)
index cebce45..0000000
+++ /dev/null
@@ -1,351 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Simple memory manager interface that keeps track on allocate regions on a
- * per "owner" basis. All regions associated with an "owner" can be released
- * with a simple call. Typically if the "owner" exists. The owner is any
- * "unsigned long" identifier. Can typically be a pointer to a file private
- * struct or a context identifier.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include <linux/export.h>
-#include "drm_sman.h"
-
-struct drm_owner_item {
-       struct drm_hash_item owner_hash;
-       struct list_head sman_list;
-       struct list_head mem_blocks;
-};
-
-void drm_sman_takedown(struct drm_sman * sman)
-{
-       drm_ht_remove(&sman->user_hash_tab);
-       drm_ht_remove(&sman->owner_hash_tab);
-       kfree(sman->mm);
-}
-
-EXPORT_SYMBOL(drm_sman_takedown);
-
-int
-drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
-             unsigned int user_order, unsigned int owner_order)
-{
-       int ret = 0;
-
-       sman->mm = kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL);
-       if (!sman->mm) {
-               ret = -ENOMEM;
-               goto out;
-       }
-       sman->num_managers = num_managers;
-       INIT_LIST_HEAD(&sman->owner_items);
-       ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
-       if (ret)
-               goto out1;
-       ret = drm_ht_create(&sman->user_hash_tab, user_order);
-       if (!ret)
-               goto out;
-
-       drm_ht_remove(&sman->owner_hash_tab);
-out1:
-       kfree(sman->mm);
-out:
-       return ret;
-}
-
-EXPORT_SYMBOL(drm_sman_init);
-
-static void *drm_sman_mm_allocate(void *private, unsigned long size,
-                                 unsigned alignment)
-{
-       struct drm_mm *mm = (struct drm_mm *) private;
-       struct drm_mm_node *tmp;
-
-       tmp = drm_mm_search_free(mm, size, alignment, 1);
-       if (!tmp) {
-               return NULL;
-       }
-       tmp = drm_mm_get_block(tmp, size, alignment);
-       return tmp;
-}
-
-static void drm_sman_mm_free(void *private, void *ref)
-{
-       struct drm_mm_node *node = (struct drm_mm_node *) ref;
-
-       drm_mm_put_block(node);
-}
-
-static void drm_sman_mm_destroy(void *private)
-{
-       struct drm_mm *mm = (struct drm_mm *) private;
-       drm_mm_takedown(mm);
-       kfree(mm);
-}
-
-static unsigned long drm_sman_mm_offset(void *private, void *ref)
-{
-       struct drm_mm_node *node = (struct drm_mm_node *) ref;
-       return node->start;
-}
-
-int
-drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
-                  unsigned long start, unsigned long size)
-{
-       struct drm_sman_mm *sman_mm;
-       struct drm_mm *mm;
-       int ret;
-
-       BUG_ON(manager >= sman->num_managers);
-
-       sman_mm = &sman->mm[manager];
-       mm = kzalloc(sizeof(*mm), GFP_KERNEL);
-       if (!mm) {
-               return -ENOMEM;
-       }
-       sman_mm->private = mm;
-       ret = drm_mm_init(mm, start, size);
-
-       if (ret) {
-               kfree(mm);
-               return ret;
-       }
-
-       sman_mm->allocate = drm_sman_mm_allocate;
-       sman_mm->free = drm_sman_mm_free;
-       sman_mm->destroy = drm_sman_mm_destroy;
-       sman_mm->offset = drm_sman_mm_offset;
-
-       return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_set_range);
-
-int
-drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
-                    struct drm_sman_mm * allocator)
-{
-       BUG_ON(manager >= sman->num_managers);
-       sman->mm[manager] = *allocator;
-
-       return 0;
-}
-EXPORT_SYMBOL(drm_sman_set_manager);
-
-static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
-                                                unsigned long owner)
-{
-       int ret;
-       struct drm_hash_item *owner_hash_item;
-       struct drm_owner_item *owner_item;
-
-       ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
-       if (!ret) {
-               return drm_hash_entry(owner_hash_item, struct drm_owner_item,
-                                     owner_hash);
-       }
-
-       owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL);
-       if (!owner_item)
-               goto out;
-
-       INIT_LIST_HEAD(&owner_item->mem_blocks);
-       owner_item->owner_hash.key = owner;
-       if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
-               goto out1;
-
-       list_add_tail(&owner_item->sman_list, &sman->owner_items);
-       return owner_item;
-
-out1:
-       kfree(owner_item);
-out:
-       return NULL;
-}
-
-struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
-                                   unsigned long size, unsigned alignment,
-                                   unsigned long owner)
-{
-       void *tmp;
-       struct drm_sman_mm *sman_mm;
-       struct drm_owner_item *owner_item;
-       struct drm_memblock_item *memblock;
-
-       BUG_ON(manager >= sman->num_managers);
-
-       sman_mm = &sman->mm[manager];
-       tmp = sman_mm->allocate(sman_mm->private, size, alignment);
-
-       if (!tmp) {
-               return NULL;
-       }
-
-       memblock = kzalloc(sizeof(*memblock), GFP_KERNEL);
-
-       if (!memblock)
-               goto out;
-
-       memblock->mm_info = tmp;
-       memblock->mm = sman_mm;
-       memblock->sman = sman;
-
-       if (drm_ht_just_insert_please
-           (&sman->user_hash_tab, &memblock->user_hash,
-            (unsigned long)memblock, 32, 0, 0))
-               goto out1;
-
-       owner_item = drm_sman_get_owner_item(sman, owner);
-       if (!owner_item)
-               goto out2;
-
-       list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
-
-       return memblock;
-
-out2:
-       drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
-out1:
-       kfree(memblock);
-out:
-       sman_mm->free(sman_mm->private, tmp);
-
-       return NULL;
-}
-
-EXPORT_SYMBOL(drm_sman_alloc);
-
-static void drm_sman_free(struct drm_memblock_item *item)
-{
-       struct drm_sman *sman = item->sman;
-
-       list_del(&item->owner_list);
-       drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
-       item->mm->free(item->mm->private, item->mm_info);
-       kfree(item);
-}
-
-int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
-{
-       struct drm_hash_item *hash_item;
-       struct drm_memblock_item *memblock_item;
-
-       if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
-               return -EINVAL;
-
-       memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
-                                      user_hash);
-       drm_sman_free(memblock_item);
-       return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_free_key);
-
-static void drm_sman_remove_owner(struct drm_sman *sman,
-                                 struct drm_owner_item *owner_item)
-{
-       list_del(&owner_item->sman_list);
-       drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
-       kfree(owner_item);
-}
-
-int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
-{
-
-       struct drm_hash_item *hash_item;
-       struct drm_owner_item *owner_item;
-
-       if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
-               return -1;
-       }
-
-       owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
-       if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
-               drm_sman_remove_owner(sman, owner_item);
-               return -1;
-       }
-
-       return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_owner_clean);
-
-static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
-                                     struct drm_owner_item *owner_item)
-{
-       struct drm_memblock_item *entry, *next;
-
-       list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
-                                owner_list) {
-               drm_sman_free(entry);
-       }
-       drm_sman_remove_owner(sman, owner_item);
-}
-
-void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
-{
-
-       struct drm_hash_item *hash_item;
-       struct drm_owner_item *owner_item;
-
-       if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
-
-               return;
-       }
-
-       owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
-       drm_sman_do_owner_cleanup(sman, owner_item);
-}
-
-EXPORT_SYMBOL(drm_sman_owner_cleanup);
-
-void drm_sman_cleanup(struct drm_sman *sman)
-{
-       struct drm_owner_item *entry, *next;
-       unsigned int i;
-       struct drm_sman_mm *sman_mm;
-
-       list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
-               drm_sman_do_owner_cleanup(sman, entry);
-       }
-       if (sman->mm) {
-               for (i = 0; i < sman->num_managers; ++i) {
-                       sman_mm = &sman->mm[i];
-                       if (sman_mm->private) {
-                               sman_mm->destroy(sman_mm->private);
-                               sman_mm->private = NULL;
-                       }
-               }
-       }
-}
-
-EXPORT_SYMBOL(drm_sman_cleanup);
index 847466aab4352068fc98e5c659dcf9b66f56e71f..f9aaa56eae07c76575c090ac0ded6a086b215ca1 100644 (file)
@@ -18,3 +18,10 @@ config DRM_EXYNOS_FIMD
        help
          Choose this option if you want to use Exynos FIMD for DRM.
          If M is selected, the module will be called exynos_drm_fimd
+
+config DRM_EXYNOS_HDMI
+       tristate "Exynos DRM HDMI"
+       depends on DRM_EXYNOS
+       help
+         Choose this option if you want to use Exynos HDMI for DRM.
+         If M is selected, the module will be called exynos_drm_hdmi
index 0496d3ff2683bed46593aab37eb3a8a230ab42f5..395e69c9a96e63daea257add8befa40acdc80640 100644 (file)
@@ -5,7 +5,10 @@
 ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
 exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
                exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
-               exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o
+               exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
+               exynos_drm_plane.o
 
 obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
 obj-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
+obj-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o exynos_ddc.o \
+                                exynos_hdmiphy.o exynos_drm_hdmi.o
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
new file mode 100644 (file)
index 0000000..84b614f
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ *     Seung-Woo Kim <sw0312.kim@samsung.com>
+ *     Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+
+#include "exynos_drm_drv.h"
+#include "exynos_hdmi.h"
+
+static int s5p_ddc_probe(struct i2c_client *client,
+                       const struct i2c_device_id *dev_id)
+{
+       hdmi_attach_ddc_client(client);
+
+       dev_info(&client->adapter->dev, "attached s5p_ddc "
+               "into i2c adapter successfully\n");
+
+       return 0;
+}
+
+static int s5p_ddc_remove(struct i2c_client *client)
+{
+       dev_info(&client->adapter->dev, "detached s5p_ddc "
+               "from i2c adapter successfully\n");
+
+       return 0;
+}
+
+static struct i2c_device_id ddc_idtable[] = {
+       {"s5p_ddc", 0},
+       { },
+};
+
+struct i2c_driver ddc_driver = {
+       .driver = {
+               .name = "s5p_ddc",
+               .owner = THIS_MODULE,
+       },
+       .id_table       = ddc_idtable,
+       .probe          = s5p_ddc_probe,
+       .remove         = __devexit_p(s5p_ddc_remove),
+       .command                = NULL,
+};
+EXPORT_SYMBOL(ddc_driver);
index 2bb07bca511a12b6949892186565cfde960db8ca..3cf785c58186da818eb1757584ef8e00e14efa38 100644 (file)
@@ -73,7 +73,7 @@ struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
        buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
        if (!buffer) {
                DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
-               return ERR_PTR(-ENOMEM);
+               return NULL;
        }
 
        buffer->size = size;
@@ -84,8 +84,7 @@ struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
         */
        if (lowlevel_buffer_allocate(dev, buffer) < 0) {
                kfree(buffer);
-               buffer = NULL;
-               return ERR_PTR(-ENOMEM);
+               return NULL;
        }
 
        return buffer;
index 6e91f9caa5dbdfe12aade5461c3ad253888f20bf..c913f2bad760f6235f86e2953b3c6d131daae243 100644 (file)
@@ -30,9 +30,6 @@
 struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
                unsigned int size);
 
-/* get memory information of a drm framebuffer. */
-struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
-
 /* remove allocated physical memory. */
 void exynos_drm_buf_destroy(struct drm_device *dev,
                struct exynos_drm_gem_buf *buffer);
index ee43cc22085304f7f07f267cd63a7d2be05d249a..e3861ac492950b97773ceacbdc86557c2a914e08 100644 (file)
@@ -34,7 +34,6 @@
 #include "exynos_drm_fb.h"
 #include "exynos_drm_encoder.h"
 #include "exynos_drm_gem.h"
-#include "exynos_drm_buf.h"
 
 #define to_exynos_crtc(x)      container_of(x, struct exynos_drm_crtc,\
                                drm_crtc)
  *     drm framework doesn't support multiple irq yet.
  *     we can refer to the crtc to current hardware interrupt occured through
  *     this pipe value.
+ * @dpms: store the crtc dpms value
  */
 struct exynos_drm_crtc {
        struct drm_crtc                 drm_crtc;
        struct exynos_drm_overlay       overlay;
        unsigned int                    pipe;
+       unsigned int                    dpms;
 };
 
 static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
@@ -78,19 +79,23 @@ int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
        struct exynos_drm_gem_buf *buffer;
        unsigned int actual_w;
        unsigned int actual_h;
+       int nr = exynos_drm_format_num_buffers(fb->pixel_format);
+       int i;
+
+       for (i = 0; i < nr; i++) {
+               buffer = exynos_drm_fb_buffer(fb, i);
+               if (!buffer) {
+                       DRM_LOG_KMS("buffer is null\n");
+                       return -EFAULT;
+               }
 
-       buffer = exynos_drm_fb_get_buf(fb);
-       if (!buffer) {
-               DRM_LOG_KMS("buffer is null.\n");
-               return -EFAULT;
-       }
-
-       overlay->dma_addr = buffer->dma_addr;
-       overlay->vaddr = buffer->kvaddr;
+               overlay->dma_addr[i] = buffer->dma_addr;
+               overlay->vaddr[i] = buffer->kvaddr;
 
-       DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
-                       (unsigned long)overlay->vaddr,
-                       (unsigned long)overlay->dma_addr);
+               DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
+                               i, (unsigned long)overlay->vaddr[i],
+                               (unsigned long)overlay->dma_addr[i]);
+       }
 
        actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
        actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
@@ -101,7 +106,8 @@ int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
        overlay->fb_width = fb->width;
        overlay->fb_height = fb->height;
        overlay->bpp = fb->bits_per_pixel;
-       overlay->pitch = fb->pitch;
+       overlay->pitch = fb->pitches[0];
+       overlay->pixel_format = fb->pixel_format;
 
        /* set overlay range to be displayed. */
        overlay->crtc_x = pos->crtc_x;
@@ -153,26 +159,37 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
 
 static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
+       struct drm_device *dev = crtc->dev;
        struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 
        DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
 
+       if (exynos_crtc->dpms == mode) {
+               DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+               return;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+
        switch (mode) {
        case DRM_MODE_DPMS_ON:
-               exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
-                               exynos_drm_encoder_crtc_commit);
+               exynos_drm_fn_encoder(crtc, &mode,
+                               exynos_drm_encoder_crtc_dpms);
+               exynos_crtc->dpms = mode;
                break;
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
-               /* TODO */
-               exynos_drm_fn_encoder(crtc, NULL,
-                               exynos_drm_encoder_crtc_disable);
+               exynos_drm_fn_encoder(crtc, &mode,
+                               exynos_drm_encoder_crtc_dpms);
+               exynos_crtc->dpms = mode;
                break;
        default:
-               DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+               DRM_ERROR("unspecified mode %d\n", mode);
                break;
        }
+
+       mutex_unlock(&dev->struct_mutex);
 }
 
 static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
@@ -188,6 +205,28 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
+       /*
+        * when set_crtc is requested from user or at booting time,
+        * crtc->commit would be called without dpms call so if dpms is
+        * no power on then crtc->dpms should be called
+        * with DRM_MODE_DPMS_ON for the hardware power to be on.
+        */
+       if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) {
+               int mode = DRM_MODE_DPMS_ON;
+
+               /*
+                * enable hardware(power on) to all encoders hdmi connected
+                * to current crtc.
+                */
+               exynos_drm_crtc_dpms(crtc, mode);
+               /*
+                * enable dma to all encoders connected to current crtc and
+                * lcd panel.
+                */
+               exynos_drm_fn_encoder(crtc, &mode,
+                                       exynos_drm_encoder_dpms_from_crtc);
+       }
+
        exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
                        exynos_drm_encoder_crtc_commit);
 }
@@ -344,6 +383,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
        }
 
        exynos_crtc->pipe = nr;
+       exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
+       exynos_crtc->overlay.zpos = DEFAULT_ZPOS;
        crtc = &exynos_crtc->drm_crtc;
 
        private->crtc[nr] = crtc;
@@ -357,9 +398,14 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
 int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
 {
        struct exynos_drm_private *private = dev->dev_private;
+       struct exynos_drm_crtc *exynos_crtc =
+               to_exynos_crtc(private->crtc[crtc]);
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
+       if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
+               return -EPERM;
+
        exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
                        exynos_drm_enable_vblank);
 
@@ -369,9 +415,14 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
 void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
 {
        struct exynos_drm_private *private = dev->dev_private;
+       struct exynos_drm_crtc *exynos_crtc =
+               to_exynos_crtc(private->crtc[crtc]);
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
+       if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
+               return;
+
        exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
                        exynos_drm_disable_vblank);
 }
index 53e2216de61dd5c42c35eaa7f0c23ca1e991a621..35889ca255e93996f6cc37094a44147f756051f8 100644 (file)
 #include "exynos_drm_fbdev.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_gem.h"
+#include "exynos_drm_plane.h"
 
-#define DRIVER_NAME    "exynos-drm"
+#define DRIVER_NAME    "exynos"
 #define DRIVER_DESC    "Samsung SoC DRM"
 #define DRIVER_DATE    "20110530"
 #define DRIVER_MAJOR   1
 #define DRIVER_MINOR   0
 
+#define VBLANK_OFF_DELAY       50000
+
 static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 {
        struct exynos_drm_private *private;
@@ -77,6 +80,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
                        goto err_crtc;
        }
 
+       for (nr = 0; nr < MAX_PLANE; nr++) {
+               ret = exynos_plane_init(dev, nr);
+               if (ret)
+                       goto err_crtc;
+       }
+
        ret = drm_vblank_init(dev, MAX_CRTC);
        if (ret)
                goto err_crtc;
@@ -100,6 +109,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
                goto err_drm_device;
        }
 
+       drm_vblank_offdelay = VBLANK_OFF_DELAY;
+
        return 0;
 
 err_drm_device:
@@ -163,6 +174,18 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
                        DRM_AUTH),
        DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
                        exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl,
+                       DRM_UNLOCKED | DRM_AUTH),
+};
+
+static const struct file_operations exynos_drm_driver_fops = {
+       .owner          = THIS_MODULE,
+       .open           = drm_open,
+       .mmap           = exynos_drm_gem_mmap,
+       .poll           = drm_poll,
+       .read           = drm_read,
+       .unlocked_ioctl = drm_ioctl,
+       .release        = drm_release,
 };
 
 static struct drm_driver exynos_drm_driver = {
@@ -182,15 +205,7 @@ static struct drm_driver exynos_drm_driver = {
        .dumb_map_offset        = exynos_drm_gem_dumb_map_offset,
        .dumb_destroy           = exynos_drm_gem_dumb_destroy,
        .ioctls                 = exynos_ioctls,
-       .fops = {
-               .owner          = THIS_MODULE,
-               .open           = drm_open,
-               .mmap           = exynos_drm_gem_mmap,
-               .poll           = drm_poll,
-               .read           = drm_read,
-               .unlocked_ioctl = drm_ioctl,
-               .release        = drm_release,
-       },
+       .fops                   = &exynos_drm_driver_fops,
        .name   = DRIVER_NAME,
        .desc   = DRIVER_DESC,
        .date   = DRIVER_DATE,
index 5e02e6ecc2e026955c9bf88bd5461bf893c1dbec..e685e1e33055fd21e08446da5215e368c46c02cc 100644 (file)
 #include "drm.h"
 
 #define MAX_CRTC       2
+#define MAX_PLANE      5
+#define MAX_FB_BUFFER  3
+#define DEFAULT_ZPOS   -1
 
 struct drm_device;
 struct exynos_drm_overlay;
 struct drm_connector;
 
+extern unsigned int drm_vblank_offdelay;
+
 /* this enumerates display type. */
 enum exynos_drm_output_type {
        EXYNOS_DISPLAY_TYPE_NONE,
@@ -57,8 +62,8 @@ enum exynos_drm_output_type {
 struct exynos_drm_overlay_ops {
        void (*mode_set)(struct device *subdrv_dev,
                         struct exynos_drm_overlay *overlay);
-       void (*commit)(struct device *subdrv_dev);
-       void (*disable)(struct device *subdrv_dev);
+       void (*commit)(struct device *subdrv_dev, int zpos);
+       void (*disable)(struct device *subdrv_dev, int zpos);
 };
 
 /*
@@ -80,9 +85,11 @@ struct exynos_drm_overlay_ops {
  * @scan_flag: interlace or progressive way.
  *     (it could be DRM_MODE_FLAG_*)
  * @bpp: pixel size.(in bit)
- * @dma_addr: bus(accessed by dma) address to the memory region allocated
- *     for a overlay.
- * @vaddr: virtual memory addresss to this overlay.
+ * @pixel_format: fourcc pixel format of this overlay
+ * @dma_addr: array of bus(accessed by dma) address to the memory region
+ *           allocated for a overlay.
+ * @vaddr: array of virtual memory addresss to this overlay.
+ * @zpos: order of overlay layer(z position).
  * @default_win: a window to be enabled.
  * @color_key: color key on or off.
  * @index_color: if using color key feature then this value would be used
@@ -109,8 +116,10 @@ struct exynos_drm_overlay {
        unsigned int scan_flag;
        unsigned int bpp;
        unsigned int pitch;
-       dma_addr_t dma_addr;
-       void __iomem *vaddr;
+       uint32_t pixel_format;
+       dma_addr_t dma_addr[MAX_FB_BUFFER];
+       void __iomem *vaddr[MAX_FB_BUFFER];
+       int zpos;
 
        bool default_win;
        bool color_key;
@@ -144,17 +153,19 @@ struct exynos_drm_display_ops {
 /*
  * Exynos drm manager ops
  *
+ * @dpms: control device power.
+ * @apply: set timing, vblank and overlay data to registers.
  * @mode_set: convert drm_display_mode to hw specific display mode and
  *           would be called by encoder->mode_set().
  * @commit: set current hw specific display mode to hw.
- * @disable: disable hardware specific display mode.
  * @enable_vblank: specific driver callback for enabling vblank interrupt.
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
  */
 struct exynos_drm_manager_ops {
+       void (*dpms)(struct device *subdrv_dev, int mode);
+       void (*apply)(struct device *subdrv_dev);
        void (*mode_set)(struct device *subdrv_dev, void *mode);
        void (*commit)(struct device *subdrv_dev);
-       void (*disable)(struct device *subdrv_dev);
        int (*enable_vblank)(struct device *subdrv_dev);
        void (*disable_vblank)(struct device *subdrv_dev);
 };
index 153061415bafba2d3c83d155a7e86a4901db0b52..86b93dde219a51a3a4e224fa800841ab76dcc92b 100644 (file)
  * @drm_encoder: encoder object.
  * @manager: specific encoder has its own manager to control a hardware
  *     appropriately and we can access a hardware drawing on this manager.
+ * @dpms: store the encoder dpms value.
  */
 struct exynos_drm_encoder {
        struct drm_encoder              drm_encoder;
        struct exynos_drm_manager       *manager;
+       int dpms;
 };
 
-static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void exynos_drm_display_power(struct drm_encoder *encoder, int mode)
 {
        struct drm_device *dev = encoder->dev;
        struct drm_connector *connector;
        struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (connector->encoder == encoder) {
+                       struct exynos_drm_display_ops *display_ops =
+                                                       manager->display_ops;
+
+                       DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
+                                       connector->base.id, mode);
+                       if (display_ops && display_ops->power_on)
+                               display_ops->power_on(manager->dev, mode);
+               }
+       }
+}
+
+static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
        struct exynos_drm_manager_ops *manager_ops = manager->ops;
+       struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
 
        DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
 
+       if (exynos_encoder->dpms == mode) {
+               DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+               return;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+
        switch (mode) {
        case DRM_MODE_DPMS_ON:
-               if (manager_ops && manager_ops->commit)
-                       manager_ops->commit(manager->dev);
+               if (manager_ops && manager_ops->apply)
+                       manager_ops->apply(manager->dev);
+               exynos_drm_display_power(encoder, mode);
+               exynos_encoder->dpms = mode;
                break;
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
-               /* TODO */
-               if (manager_ops && manager_ops->disable)
-                       manager_ops->disable(manager->dev);
+               exynos_drm_display_power(encoder, mode);
+               exynos_encoder->dpms = mode;
                break;
        default:
                DRM_ERROR("unspecified mode %d\n", mode);
                break;
        }
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       struct exynos_drm_display_ops *display_ops =
-                                                       manager->display_ops;
-
-                       DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
-                                       connector->base.id, mode);
-                       if (display_ops && display_ops->power_on)
-                               display_ops->power_on(manager->dev, mode);
-               }
-       }
+       mutex_unlock(&dev->struct_mutex);
 }
 
 static bool
@@ -169,7 +188,6 @@ static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
        exynos_encoder->manager->pipe = -1;
 
        drm_encoder_cleanup(encoder);
-       encoder->dev->mode_config.num_encoder--;
        kfree(exynos_encoder);
 }
 
@@ -199,6 +217,7 @@ exynos_drm_encoder_create(struct drm_device *dev,
                return NULL;
        }
 
+       exynos_encoder->dpms = DRM_MODE_DPMS_OFF;
        exynos_encoder->manager = manager;
        encoder = &exynos_encoder->drm_encoder;
        encoder->possible_crtcs = possible_crtcs;
@@ -275,12 +294,27 @@ void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data)
                manager_ops->disable_vblank(manager->dev);
 }
 
-void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
+void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
+                                         void *data)
 {
        struct exynos_drm_manager *manager =
                to_exynos_encoder(encoder)->manager;
        struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+       int zpos = DEFAULT_ZPOS;
+
+       if (data)
+               zpos = *(int *)data;
+
+       if (overlay_ops && overlay_ops->commit)
+               overlay_ops->commit(manager->dev, zpos);
+}
+
+void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
+{
+       struct exynos_drm_manager *manager =
+               to_exynos_encoder(encoder)->manager;
        int crtc = *(int *)data;
+       int zpos = DEFAULT_ZPOS;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -290,8 +324,53 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
         */
        manager->pipe = crtc;
 
-       if (overlay_ops && overlay_ops->commit)
-               overlay_ops->commit(manager->dev);
+       exynos_drm_encoder_crtc_plane_commit(encoder, &zpos);
+}
+
+void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder, void *data)
+{
+       struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+       int mode = *(int *)data;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       exynos_drm_encoder_dpms(encoder, mode);
+
+       exynos_encoder->dpms = mode;
+}
+
+void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
+{
+       struct drm_device *dev = encoder->dev;
+       struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+       struct exynos_drm_manager *manager = exynos_encoder->manager;
+       struct exynos_drm_manager_ops *manager_ops = manager->ops;
+       struct drm_connector *connector;
+       int mode = *(int *)data;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (manager_ops && manager_ops->dpms)
+               manager_ops->dpms(manager->dev, mode);
+
+       /*
+        * set current dpms mode to the connector connected to
+        * current encoder. connector->dpms would be checked
+        * at drm_helper_connector_dpms()
+        */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               if (connector->encoder == encoder)
+                       connector->dpms = mode;
+
+       /*
+        * if this condition is ok then it means that the crtc is already
+        * detached from encoder and last function for detaching is properly
+        * done, so clear pipe from manager to prevent repeated call.
+        */
+       if (mode > DRM_MODE_DPMS_ON) {
+               if (!encoder->crtc)
+                       manager->pipe = -1;
+       }
 }
 
 void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
@@ -310,19 +389,15 @@ void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
        struct exynos_drm_manager *manager =
                to_exynos_encoder(encoder)->manager;
        struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+       int zpos = DEFAULT_ZPOS;
 
        DRM_DEBUG_KMS("\n");
 
-       if (overlay_ops && overlay_ops->disable)
-               overlay_ops->disable(manager->dev);
+       if (data)
+               zpos = *(int *)data;
 
-       /*
-        * crtc is already detached from encoder and last
-        * function for detaching is properly done, so
-        * clear pipe from manager to prevent repeated call
-        */
-       if (!encoder->crtc)
-               manager->pipe = -1;
+       if (overlay_ops && overlay_ops->disable)
+               overlay_ops->disable(manager->dev, zpos);
 }
 
 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
index a22acfbf0e4ed6dc31894d03f7a5369158ded8a2..97b087a51cb6033a473716b687149c2e063e13e1 100644 (file)
@@ -39,7 +39,12 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
                            void (*fn)(struct drm_encoder *, void *));
 void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
 void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
+                                         void *data);
 void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder,
+                                       void *data);
+void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data);
 void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
 void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
 
index 5bf4a1ac7f828cd26509857e5ed9b024f094f831..3733fe6723d32cee58091135dad7abfd28672843 100644 (file)
@@ -33,7 +33,6 @@
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
-#include "exynos_drm_buf.h"
 #include "exynos_drm_gem.h"
 
 #define to_exynos_fb(x)        container_of(x, struct exynos_drm_fb, fb)
  * exynos specific framebuffer structure.
  *
  * @fb: drm framebuffer obejct.
- * @exynos_gem_obj: exynos specific gem object containing a gem object.
- * @buffer: pointer to exynos_drm_gem_buffer object.
- *     - contain the memory information to memory region allocated
- *     at default framebuffer creation.
+ * @exynos_gem_obj: array of exynos specific gem object containing a gem object.
  */
 struct exynos_drm_fb {
        struct drm_framebuffer          fb;
-       struct exynos_drm_gem_obj       *exynos_gem_obj;
-       struct exynos_drm_gem_buf       *buffer;
+       struct exynos_drm_gem_obj       *exynos_gem_obj[MAX_FB_BUFFER];
 };
 
 static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
@@ -61,13 +56,6 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
 
        drm_framebuffer_cleanup(fb);
 
-       /*
-        * default framebuffer has no gem object so
-        * a buffer of the default framebuffer should be released at here.
-        */
-       if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer)
-               exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer);
-
        kfree(exynos_fb);
        exynos_fb = NULL;
 }
@@ -81,7 +69,7 @@ static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        return drm_gem_handle_create(file_priv,
-                       &exynos_fb->exynos_gem_obj->base, handle);
+                       &exynos_fb->exynos_gem_obj[0]->base, handle);
 }
 
 static int exynos_drm_fb_dirty(struct drm_framebuffer *fb,
@@ -102,134 +90,88 @@ static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
        .dirty          = exynos_drm_fb_dirty,
 };
 
-static struct drm_framebuffer *
-exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
-                   struct drm_mode_fb_cmd *mode_cmd)
+struct drm_framebuffer *
+exynos_drm_framebuffer_init(struct drm_device *dev,
+                           struct drm_mode_fb_cmd2 *mode_cmd,
+                           struct drm_gem_object *obj)
 {
        struct exynos_drm_fb *exynos_fb;
-       struct drm_framebuffer *fb;
-       struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
-       struct drm_gem_object *obj;
-       unsigned int size;
        int ret;
 
-       DRM_DEBUG_KMS("%s\n", __FILE__);
-
-       mode_cmd->pitch = max(mode_cmd->pitch,
-                       mode_cmd->width * (mode_cmd->bpp >> 3));
-
-       DRM_LOG_KMS("drm fb create(%dx%d)\n",
-                       mode_cmd->width, mode_cmd->height);
-
        exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
        if (!exynos_fb) {
-               DRM_ERROR("failed to allocate exynos drm framebuffer.\n");
+               DRM_ERROR("failed to allocate exynos drm framebuffer\n");
                return ERR_PTR(-ENOMEM);
        }
 
-       fb = &exynos_fb->fb;
-       ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
+       ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
        if (ret) {
-               DRM_ERROR("failed to initialize framebuffer.\n");
-               goto err_init;
+               DRM_ERROR("failed to initialize framebuffer\n");
+               return ERR_PTR(ret);
        }
 
-       DRM_LOG_KMS("create: fb id: %d\n", fb->base.id);
+       drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+       exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
 
-       size = mode_cmd->pitch * mode_cmd->height;
+       return &exynos_fb->fb;
+}
 
-       /*
-        * mode_cmd->handle could be NULL at booting time or
-        * with user request. if NULL, a new buffer or a gem object
-        * would be allocated.
-        */
-       if (!mode_cmd->handle) {
-               if (!file_priv) {
-                       struct exynos_drm_gem_buf *buffer;
-
-                       /*
-                        * in case that file_priv is NULL, it allocates
-                        * only buffer and this buffer would be used
-                        * for default framebuffer.
-                        */
-                       buffer = exynos_drm_buf_create(dev, size);
-                       if (IS_ERR(buffer)) {
-                               ret = PTR_ERR(buffer);
-                               goto err_buffer;
-                       }
-
-                       exynos_fb->buffer = buffer;
-
-                       DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n",
-                                       (unsigned long)buffer->dma_addr, size);
-
-                       goto out;
-               } else {
-                       exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
-                                                       &mode_cmd->handle,
-                                                       size);
-                       if (IS_ERR(exynos_gem_obj)) {
-                               ret = PTR_ERR(exynos_gem_obj);
-                               goto err_buffer;
-                       }
-               }
-       } else {
-               obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
-               if (!obj) {
-                       DRM_ERROR("failed to lookup gem object.\n");
-                       goto err_buffer;
-               }
+static struct drm_framebuffer *
+exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+                     struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       struct drm_gem_object *obj;
+       struct drm_framebuffer *fb;
+       struct exynos_drm_fb *exynos_fb;
+       int nr;
+       int i;
 
-               exynos_gem_obj = to_exynos_gem_obj(obj);
+       DRM_DEBUG_KMS("%s\n", __FILE__);
 
-               drm_gem_object_unreference_unlocked(obj);
+       obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+       if (!obj) {
+               DRM_ERROR("failed to lookup gem object\n");
+               return ERR_PTR(-ENOENT);
        }
 
-       /*
-        * if got a exynos_gem_obj from either a handle or
-        * a new creation then exynos_fb->exynos_gem_obj is NULL
-        * so that default framebuffer has no its own gem object,
-        * only its own buffer object.
-        */
-       exynos_fb->buffer = exynos_gem_obj->buffer;
-
-       DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
-                       (unsigned long)exynos_fb->buffer->dma_addr, size,
-                       (unsigned int)&exynos_gem_obj->base);
+       drm_gem_object_unreference_unlocked(obj);
 
-out:
-       exynos_fb->exynos_gem_obj = exynos_gem_obj;
+       fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
+       if (IS_ERR(fb))
+               return fb;
 
-       drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+       exynos_fb = to_exynos_fb(fb);
+       nr = exynos_drm_format_num_buffers(fb->pixel_format);
 
-       return fb;
-
-err_buffer:
-       drm_framebuffer_cleanup(fb);
-
-err_init:
-       kfree(exynos_fb);
+       for (i = 1; i < nr; i++) {
+               obj = drm_gem_object_lookup(dev, file_priv,
+                               mode_cmd->handles[i]);
+               if (!obj) {
+                       DRM_ERROR("failed to lookup gem object\n");
+                       exynos_drm_fb_destroy(fb);
+                       return ERR_PTR(-ENOENT);
+               }
 
-       return ERR_PTR(ret);
-}
+               drm_gem_object_unreference_unlocked(obj);
 
-struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
-                                             struct drm_file *file_priv,
-                                             struct drm_mode_fb_cmd *mode_cmd)
-{
-       DRM_DEBUG_KMS("%s\n", __FILE__);
+               exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
+       }
 
-       return exynos_drm_fb_init(file_priv, dev, mode_cmd);
+       return fb;
 }
 
-struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
+struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+                                               int index)
 {
        struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
        struct exynos_drm_gem_buf *buffer;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       buffer = exynos_fb->buffer;
+       if (index >= MAX_FB_BUFFER)
+               return NULL;
+
+       buffer = exynos_fb->exynos_gem_obj[index]->buffer;
        if (!buffer)
                return NULL;
 
@@ -250,7 +192,7 @@ static void exynos_drm_output_poll_changed(struct drm_device *dev)
 }
 
 static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
-       .fb_create = exynos_drm_fb_create,
+       .fb_create = exynos_user_fb_create,
        .output_poll_changed = exynos_drm_output_poll_changed,
 };
 
index eb35931d302cd44115e1b86705ce8dd96818d68a..3ecb30d93552dbae84a7c555af2fda02ff754a1d 100644 (file)
 #ifndef _EXYNOS_DRM_FB_H_
 #define _EXYNOS_DRM_FB_H
 
-struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
-                                             struct drm_file *filp,
-                                             struct drm_mode_fb_cmd *mode_cmd);
+static inline int exynos_drm_format_num_buffers(uint32_t format)
+{
+       switch (format) {
+       case DRM_FORMAT_NV12M:
+       case DRM_FORMAT_NV12MT:
+               return 2;
+       case DRM_FORMAT_YUV420M:
+               return 3;
+       default:
+               return 1;
+       }
+}
+
+struct drm_framebuffer *
+exynos_drm_framebuffer_init(struct drm_device *dev,
+                           struct drm_mode_fb_cmd2 *mode_cmd,
+                           struct drm_gem_object *obj);
+
+/* get memory information of a drm framebuffer */
+struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+                                                int index);
 
 void exynos_drm_mode_config_init(struct drm_device *dev);
 
index 836f4100818710e830229b985591bfc85a047f03..d7ae29d2f3d68b0668f0979ade06e244e7ddf07c 100644 (file)
@@ -34,7 +34,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_gem.h"
-#include "exynos_drm_buf.h"
 
 #define MAX_CONNECTOR          4
 #define PREFERRED_BPP          32
@@ -43,8 +42,8 @@
                                drm_fb_helper)
 
 struct exynos_drm_fbdev {
-       struct drm_fb_helper    drm_fb_helper;
-       struct drm_framebuffer  *fb;
+       struct drm_fb_helper            drm_fb_helper;
+       struct exynos_drm_gem_obj       *exynos_gem_obj;
 };
 
 static int exynos_drm_fbdev_set_par(struct fb_info *info)
@@ -90,26 +89,24 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
 {
        struct fb_info *fbi = helper->fbdev;
        struct drm_device *dev = helper->dev;
-       struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
        struct exynos_drm_gem_buf *buffer;
        unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
        unsigned long offset;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       exynos_fb->fb = fb;
-
-       drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
+       drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
        drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
 
-       buffer = exynos_drm_fb_get_buf(fb);
+       /* RGB formats use only one buffer */
+       buffer = exynos_drm_fb_buffer(fb, 0);
        if (!buffer) {
                DRM_LOG_KMS("buffer is null.\n");
                return -EFAULT;
        }
 
        offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
-       offset += fbi->var.yoffset * fb->pitch;
+       offset += fbi->var.yoffset * fb->pitches[0];
 
        dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
        fbi->screen_base = buffer->kvaddr + offset;
@@ -124,10 +121,12 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
                                    struct drm_fb_helper_surface_size *sizes)
 {
        struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
+       struct exynos_drm_gem_obj *exynos_gem_obj;
        struct drm_device *dev = helper->dev;
        struct fb_info *fbi;
-       struct drm_mode_fb_cmd mode_cmd = { 0 };
+       struct drm_mode_fb_cmd2 mode_cmd = { 0 };
        struct platform_device *pdev = dev->platformdev;
+       unsigned long size;
        int ret;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -138,8 +137,9 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
 
        mode_cmd.width = sizes->surface_width;
        mode_cmd.height = sizes->surface_height;
-       mode_cmd.bpp = sizes->surface_bpp;
-       mode_cmd.depth = sizes->surface_depth;
+       mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+                                                         sizes->surface_depth);
 
        mutex_lock(&dev->struct_mutex);
 
@@ -150,14 +150,23 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
                goto out;
        }
 
-       exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd);
-       if (IS_ERR_OR_NULL(exynos_fbdev->fb)) {
+       size = mode_cmd.pitches[0] * mode_cmd.height;
+       exynos_gem_obj = exynos_drm_gem_create(dev, size);
+       if (IS_ERR(exynos_gem_obj)) {
+               ret = PTR_ERR(exynos_gem_obj);
+               goto out;
+       }
+
+       exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
+
+       helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
+                       &exynos_gem_obj->base);
+       if (IS_ERR_OR_NULL(helper->fb)) {
                DRM_ERROR("failed to create drm framebuffer.\n");
-               ret = PTR_ERR(exynos_fbdev->fb);
+               ret = PTR_ERR(helper->fb);
                goto out;
        }
 
-       helper->fb = exynos_fbdev->fb;
        helper->fbdev = fbi;
 
        fbi->par = helper;
@@ -171,8 +180,10 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
        }
 
        ret = exynos_drm_fbdev_update(helper, helper->fb);
-       if (ret < 0)
+       if (ret < 0) {
                fb_dealloc_cmap(&fbi->cmap);
+               goto out;
+       }
 
 /*
  * if failed, all resources allocated above would be released by
@@ -205,34 +216,42 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
 {
        struct drm_device *dev = helper->dev;
        struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
-       struct drm_framebuffer *fb = exynos_fbdev->fb;
-       struct drm_mode_fb_cmd mode_cmd = { 0 };
+       struct exynos_drm_gem_obj *exynos_gem_obj;
+       struct drm_framebuffer *fb = helper->fb;
+       struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+       unsigned long size;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       if (helper->fb != fb) {
-               DRM_ERROR("drm framebuffer is different\n");
-               return -EINVAL;
-       }
-
        if (exynos_drm_fbdev_is_samefb(fb, sizes))
                return 0;
 
        mode_cmd.width = sizes->surface_width;
        mode_cmd.height = sizes->surface_height;
-       mode_cmd.bpp = sizes->surface_bpp;
-       mode_cmd.depth = sizes->surface_depth;
+       mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+                                                         sizes->surface_depth);
+
+       if (exynos_fbdev->exynos_gem_obj)
+               exynos_drm_gem_destroy(exynos_fbdev->exynos_gem_obj);
 
        if (fb->funcs->destroy)
                fb->funcs->destroy(fb);
 
-       exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd);
-       if (IS_ERR(exynos_fbdev->fb)) {
-               DRM_ERROR("failed to allocate fb.\n");
-               return PTR_ERR(exynos_fbdev->fb);
+       size = mode_cmd.pitches[0] * mode_cmd.height;
+       exynos_gem_obj = exynos_drm_gem_create(dev, size);
+       if (IS_ERR(exynos_gem_obj))
+               return PTR_ERR(exynos_gem_obj);
+
+       exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
+
+       helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
+                       &exynos_gem_obj->base);
+       if (IS_ERR_OR_NULL(helper->fb)) {
+               DRM_ERROR("failed to create drm framebuffer.\n");
+               return PTR_ERR(helper->fb);
        }
 
-       helper->fb = exynos_fbdev->fb;
        return exynos_drm_fbdev_update(helper, helper->fb);
 }
 
@@ -366,6 +385,9 @@ void exynos_drm_fbdev_fini(struct drm_device *dev)
 
        fbdev = to_exynos_fbdev(private->fb_helper);
 
+       if (fbdev->exynos_gem_obj)
+               exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
+
        exynos_drm_fbdev_destroy(dev, private->fb_helper);
        kfree(fbdev);
        private->fb_helper = NULL;
index db3b3d9e731d86475d734884e2ec113a8be5f914..ca83139cd30997fcb2eb2d0df791e6bbea9df8ba 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/pm_runtime.h>
 
 #include <drm/exynos_drm.h>
 #include <plat/regs-fb-v4.h>
@@ -68,6 +69,7 @@ struct fimd_win_data {
        void __iomem            *vaddr;
        unsigned int            buf_offsize;
        unsigned int            line_size;      /* bytes */
+       bool                    enabled;
 };
 
 struct fimd_context {
@@ -84,6 +86,8 @@ struct fimd_context {
        unsigned long                   irq_flags;
        u32                             vidcon0;
        u32                             vidcon1;
+       bool                            suspended;
+       struct mutex                    lock;
 
        struct fb_videomode             *timing;
 };
@@ -119,7 +123,7 @@ static int fimd_display_power_on(struct device *dev, int mode)
 {
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       /* TODO. */
+       /* TODO */
 
        return 0;
 }
@@ -132,12 +136,68 @@ static struct exynos_drm_display_ops fimd_display_ops = {
        .power_on = fimd_display_power_on,
 };
 
+static void fimd_dpms(struct device *subdrv_dev, int mode)
+{
+       struct fimd_context *ctx = get_fimd_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
+
+       mutex_lock(&ctx->lock);
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               /*
+                * enable fimd hardware only if suspended status.
+                *
+                * P.S. fimd_dpms function would be called at booting time so
+                * clk_enable could be called double time.
+                */
+               if (ctx->suspended)
+                       pm_runtime_get_sync(subdrv_dev);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               pm_runtime_put_sync(subdrv_dev);
+               break;
+       default:
+               DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+               break;
+       }
+
+       mutex_unlock(&ctx->lock);
+}
+
+static void fimd_apply(struct device *subdrv_dev)
+{
+       struct fimd_context *ctx = get_fimd_context(subdrv_dev);
+       struct exynos_drm_manager *mgr = &ctx->subdrv.manager;
+       struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
+       struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
+       struct fimd_win_data *win_data;
+       int i;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       for (i = 0; i < WINDOWS_NR; i++) {
+               win_data = &ctx->win_data[i];
+               if (win_data->enabled && (ovl_ops && ovl_ops->commit))
+                       ovl_ops->commit(subdrv_dev, i);
+       }
+
+       if (mgr_ops && mgr_ops->commit)
+               mgr_ops->commit(subdrv_dev);
+}
+
 static void fimd_commit(struct device *dev)
 {
        struct fimd_context *ctx = get_fimd_context(dev);
        struct fb_videomode *timing = ctx->timing;
        u32 val;
 
+       if (ctx->suspended)
+               return;
+
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        /* setup polarity values from machine code. */
@@ -177,40 +237,6 @@ static void fimd_commit(struct device *dev)
        writel(val, ctx->regs + VIDCON0);
 }
 
-static void fimd_disable(struct device *dev)
-{
-       struct fimd_context *ctx = get_fimd_context(dev);
-       struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
-       struct drm_device *drm_dev = subdrv->drm_dev;
-       struct exynos_drm_manager *manager = &subdrv->manager;
-       u32 val;
-
-       DRM_DEBUG_KMS("%s\n", __FILE__);
-
-       /* fimd dma off */
-       val = readl(ctx->regs + VIDCON0);
-       val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F);
-       writel(val, ctx->regs + VIDCON0);
-
-       /*
-        * if vblank is enabled status with dma off then
-        * it disables vsync interrupt.
-        */
-       if (drm_dev->vblank_enabled[manager->pipe] &&
-               atomic_read(&drm_dev->vblank_refcount[manager->pipe])) {
-               drm_vblank_put(drm_dev, manager->pipe);
-
-               /*
-                * if vblank_disable_allowed is 0 then disable
-                * vsync interrupt right now else the vsync interrupt
-                * would be disabled by drm timer once a current process
-                * gives up ownershop of vblank event.
-                */
-               if (!drm_dev->vblank_disable_allowed)
-                       drm_vblank_off(drm_dev, manager->pipe);
-       }
-}
-
 static int fimd_enable_vblank(struct device *dev)
 {
        struct fimd_context *ctx = get_fimd_context(dev);
@@ -218,6 +244,9 @@ static int fimd_enable_vblank(struct device *dev)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
+       if (ctx->suspended)
+               return -EPERM;
+
        if (!test_and_set_bit(0, &ctx->irq_flags)) {
                val = readl(ctx->regs + VIDINTCON0);
 
@@ -242,6 +271,9 @@ static void fimd_disable_vblank(struct device *dev)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
+       if (ctx->suspended)
+               return;
+
        if (test_and_clear_bit(0, &ctx->irq_flags)) {
                val = readl(ctx->regs + VIDINTCON0);
 
@@ -253,8 +285,9 @@ static void fimd_disable_vblank(struct device *dev)
 }
 
 static struct exynos_drm_manager_ops fimd_manager_ops = {
+       .dpms = fimd_dpms,
+       .apply = fimd_apply,
        .commit = fimd_commit,
-       .disable = fimd_disable,
        .enable_vblank = fimd_enable_vblank,
        .disable_vblank = fimd_disable_vblank,
 };
@@ -264,6 +297,7 @@ static void fimd_win_mode_set(struct device *dev,
 {
        struct fimd_context *ctx = get_fimd_context(dev);
        struct fimd_win_data *win_data;
+       int win;
        unsigned long offset;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -273,12 +307,19 @@ static void fimd_win_mode_set(struct device *dev,
                return;
        }
 
+       win = overlay->zpos;
+       if (win == DEFAULT_ZPOS)
+               win = ctx->default_win;
+
+       if (win < 0 || win > WINDOWS_NR)
+               return;
+
        offset = overlay->fb_x * (overlay->bpp >> 3);
        offset += overlay->fb_y * overlay->pitch;
 
        DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
 
-       win_data = &ctx->win_data[ctx->default_win];
+       win_data = &ctx->win_data[win];
 
        win_data->offset_x = overlay->crtc_x;
        win_data->offset_y = overlay->crtc_y;
@@ -286,8 +327,8 @@ static void fimd_win_mode_set(struct device *dev,
        win_data->ovl_height = overlay->crtc_height;
        win_data->fb_width = overlay->fb_width;
        win_data->fb_height = overlay->fb_height;
-       win_data->dma_addr = overlay->dma_addr + offset;
-       win_data->vaddr = overlay->vaddr + offset;
+       win_data->dma_addr = overlay->dma_addr[0] + offset;
+       win_data->vaddr = overlay->vaddr[0] + offset;
        win_data->bpp = overlay->bpp;
        win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
                                (overlay->bpp >> 3);
@@ -381,15 +422,21 @@ static void fimd_win_set_colkey(struct device *dev, unsigned int win)
        writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
 }
 
-static void fimd_win_commit(struct device *dev)
+static void fimd_win_commit(struct device *dev, int zpos)
 {
        struct fimd_context *ctx = get_fimd_context(dev);
        struct fimd_win_data *win_data;
-       int win = ctx->default_win;
+       int win = zpos;
        unsigned long val, alpha, size;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
+       if (ctx->suspended)
+               return;
+
+       if (win == DEFAULT_ZPOS)
+               win = ctx->default_win;
+
        if (win < 0 || win > WINDOWS_NR)
                return;
 
@@ -472,24 +519,37 @@ static void fimd_win_commit(struct device *dev)
        if (win != 0)
                fimd_win_set_colkey(dev, win);
 
+       /* wincon */
+       val = readl(ctx->regs + WINCON(win));
+       val |= WINCONx_ENWIN;
+       writel(val, ctx->regs + WINCON(win));
+
        /* Enable DMA channel and unprotect windows */
        val = readl(ctx->regs + SHADOWCON);
        val |= SHADOWCON_CHx_ENABLE(win);
        val &= ~SHADOWCON_WINx_PROTECT(win);
        writel(val, ctx->regs + SHADOWCON);
+
+       win_data->enabled = true;
 }
 
-static void fimd_win_disable(struct device *dev)
+static void fimd_win_disable(struct device *dev, int zpos)
 {
        struct fimd_context *ctx = get_fimd_context(dev);
-       int win = ctx->default_win;
+       struct fimd_win_data *win_data;
+       int win = zpos;
        u32 val;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
+       if (win == DEFAULT_ZPOS)
+               win = ctx->default_win;
+
        if (win < 0 || win > WINDOWS_NR)
                return;
 
+       win_data = &ctx->win_data[win];
+
        /* protect windows */
        val = readl(ctx->regs + SHADOWCON);
        val |= SHADOWCON_WINx_PROTECT(win);
@@ -505,6 +565,8 @@ static void fimd_win_disable(struct device *dev)
        val &= ~SHADOWCON_CHx_ENABLE(win);
        val &= ~SHADOWCON_WINx_PROTECT(win);
        writel(val, ctx->regs + SHADOWCON);
+
+       win_data->enabled = false;
 }
 
 static struct exynos_drm_overlay_ops fimd_overlay_ops = {
@@ -540,9 +602,17 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
                wake_up_interruptible(&e->base.file_priv->event_wait);
        }
 
-       if (is_checked)
+       if (is_checked) {
                drm_vblank_put(drm_dev, crtc);
 
+               /*
+                * don't off vblank if vblank_disable_allowed is 1,
+                * because vblank would be off by timer handler.
+                */
+               if (!drm_dev->vblank_disable_allowed)
+                       drm_vblank_off(drm_dev, crtc);
+       }
+
        spin_unlock_irqrestore(&drm_dev->event_lock, flags);
 }
 
@@ -560,19 +630,14 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
                /* VSYNC interrupt */
                writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
 
-       /*
-        * in case that vblank_disable_allowed is 1, it could induce
-        * the problem that manager->pipe could be -1 because with
-        * disable callback, vsync interrupt isn't disabled and at this moment,
-        * vsync interrupt could occur. the vsync interrupt would be disabled
-        * by timer handler later.
-        */
-       if (manager->pipe == -1)
-               return IRQ_HANDLED;
+       /* check the crtc is detached already from encoder */
+       if (manager->pipe < 0)
+               goto out;
 
        drm_handle_vblank(drm_dev, manager->pipe);
        fimd_finish_pageflip(drm_dev, manager->pipe);
 
+out:
        return IRQ_HANDLED;
 }
 
@@ -590,6 +655,13 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
         */
        drm_dev->irq_enabled = 1;
 
+       /*
+        * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+        * by drm timer once a current process gives up ownership of
+        * vblank event.(after drm_vblank_put function is called)
+        */
+       drm_dev->vblank_disable_allowed = 1;
+
        return 0;
 }
 
@@ -739,9 +811,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
 
        ctx->irq = res->start;
 
-       for (win = 0; win < WINDOWS_NR; win++)
-               fimd_clear_win(ctx, win);
-
        ret = request_irq(ctx->irq, fimd_irq_handler, 0, "drm_fimd", ctx);
        if (ret < 0) {
                dev_err(dev, "irq request failed.\n");
@@ -769,7 +838,17 @@ static int __devinit fimd_probe(struct platform_device *pdev)
        subdrv->manager.display_ops = &fimd_display_ops;
        subdrv->manager.dev = dev;
 
+       mutex_init(&ctx->lock);
+
        platform_set_drvdata(pdev, ctx);
+
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+       pm_runtime_get_sync(dev);
+
+       for (win = 0; win < WINDOWS_NR; win++)
+               fimd_clear_win(ctx, win);
+
        exynos_drm_subdrv_register(subdrv);
 
        return 0;
@@ -797,14 +876,25 @@ err_clk_get:
 
 static int __devexit fimd_remove(struct platform_device *pdev)
 {
+       struct device *dev = &pdev->dev;
        struct fimd_context *ctx = platform_get_drvdata(pdev);
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        exynos_drm_subdrv_unregister(&ctx->subdrv);
 
+       if (ctx->suspended)
+               goto out;
+
        clk_disable(ctx->lcd_clk);
        clk_disable(ctx->bus_clk);
+
+       pm_runtime_set_suspended(dev);
+       pm_runtime_put_sync(dev);
+
+out:
+       pm_runtime_disable(dev);
+
        clk_put(ctx->lcd_clk);
        clk_put(ctx->bus_clk);
 
@@ -818,12 +908,102 @@ static int __devexit fimd_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int fimd_suspend(struct device *dev)
+{
+       int ret;
+
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       ret = pm_runtime_suspend(dev);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int fimd_resume(struct device *dev)
+{
+       int ret;
+
+       ret = pm_runtime_resume(dev);
+       if (ret < 0) {
+               DRM_ERROR("failed to resume runtime pm.\n");
+               return ret;
+       }
+
+       pm_runtime_disable(dev);
+
+       ret = pm_runtime_set_active(dev);
+       if (ret < 0) {
+               DRM_ERROR("failed to active runtime pm.\n");
+               pm_runtime_enable(dev);
+               pm_runtime_suspend(dev);
+               return ret;
+       }
+
+       pm_runtime_enable(dev);
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimd_runtime_suspend(struct device *dev)
+{
+       struct fimd_context *ctx = get_fimd_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       clk_disable(ctx->lcd_clk);
+       clk_disable(ctx->bus_clk);
+
+       ctx->suspended = true;
+       return 0;
+}
+
+static int fimd_runtime_resume(struct device *dev)
+{
+       struct fimd_context *ctx = get_fimd_context(dev);
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       ret = clk_enable(ctx->bus_clk);
+       if (ret < 0)
+               return ret;
+
+       ret = clk_enable(ctx->lcd_clk);
+       if  (ret < 0) {
+               clk_disable(ctx->bus_clk);
+               return ret;
+       }
+
+       ctx->suspended = false;
+
+       /* if vblank was enabled status, enable it again. */
+       if (test_and_clear_bit(0, &ctx->irq_flags))
+               fimd_enable_vblank(dev);
+
+       fimd_apply(dev);
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops fimd_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
+       SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
+};
+
 static struct platform_driver fimd_driver = {
        .probe          = fimd_probe,
        .remove         = __devexit_p(fimd_remove),
        .driver         = {
                .name   = "exynos4-fb",
                .owner  = THIS_MODULE,
+               .pm     = &fimd_pm_ops,
        },
 };
 
index aba0fe47f7eaae47c10ac4b0839eba8c1edf7f54..025abb3e3b67906948c1c68cf684333d915d9d75 100644 (file)
@@ -55,17 +55,54 @@ static unsigned int convert_to_vm_err_msg(int msg)
        return out_msg;
 }
 
-static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
+static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
+                                       struct drm_file *file_priv,
+                                       unsigned int *handle)
 {
+       int ret;
+
+       /*
+        * allocate a id of idr table where the obj is registered
+        * and handle has the id what user can see.
+        */
+       ret = drm_gem_handle_create(file_priv, obj, handle);
+       if (ret)
+               return ret;
+
+       DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
+
+       /* drop reference from allocate - handle holds it now. */
+       drm_gem_object_unreference_unlocked(obj);
+
+       return 0;
+}
+
+void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
+{
+       struct drm_gem_object *obj;
+
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
+       if (!exynos_gem_obj)
+               return;
+
+       obj = &exynos_gem_obj->base;
+
+       DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
+
+       exynos_drm_buf_destroy(obj->dev, exynos_gem_obj->buffer);
+
+       if (obj->map_list.map)
+               drm_gem_free_mmap_offset(obj);
+
+       /* release file pointer to gem object. */
+       drm_gem_object_release(obj);
+
+       kfree(exynos_gem_obj);
 }
 
-static struct exynos_drm_gem_obj
-               *exynos_drm_gem_init(struct drm_device *drm_dev,
-                       struct drm_file *file_priv, unsigned int *handle,
-                       unsigned int size)
+static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+                                                     unsigned long size)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
        struct drm_gem_object *obj;
@@ -73,75 +110,41 @@ static struct exynos_drm_gem_obj
 
        exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
        if (!exynos_gem_obj) {
-               DRM_ERROR("failed to allocate exynos gem object.\n");
-               return ERR_PTR(-ENOMEM);
+               DRM_ERROR("failed to allocate exynos gem object\n");
+               return NULL;
        }
 
        obj = &exynos_gem_obj->base;
 
-       ret = drm_gem_object_init(drm_dev, obj, size);
+       ret = drm_gem_object_init(dev, obj, size);
        if (ret < 0) {
-               DRM_ERROR("failed to initialize gem object.\n");
-               ret = -EINVAL;
-               goto err_object_init;
+               DRM_ERROR("failed to initialize gem object\n");
+               kfree(exynos_gem_obj);
+               return NULL;
        }
 
        DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
 
-       ret = drm_gem_create_mmap_offset(obj);
-       if (ret < 0) {
-               DRM_ERROR("failed to allocate mmap offset.\n");
-               goto err_create_mmap_offset;
-       }
-
-       /*
-        * allocate a id of idr table where the obj is registered
-        * and handle has the id what user can see.
-        */
-       ret = drm_gem_handle_create(file_priv, obj, handle);
-       if (ret)
-               goto err_handle_create;
-
-       DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
-
-       /* drop reference from allocate - handle holds it now. */
-       drm_gem_object_unreference_unlocked(obj);
-
        return exynos_gem_obj;
-
-err_handle_create:
-       drm_gem_free_mmap_offset(obj);
-
-err_create_mmap_offset:
-       drm_gem_object_release(obj);
-
-err_object_init:
-       kfree(exynos_gem_obj);
-
-       return ERR_PTR(ret);
 }
 
 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
-                               struct drm_file *file_priv,
-                               unsigned int *handle, unsigned long size)
+                                                unsigned long size)
 {
-
-       struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
        struct exynos_drm_gem_buf *buffer;
+       struct exynos_drm_gem_obj *exynos_gem_obj;
 
        size = roundup(size, PAGE_SIZE);
-
        DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
 
        buffer = exynos_drm_buf_create(dev, size);
-       if (IS_ERR(buffer)) {
-               return ERR_CAST(buffer);
-       }
+       if (!buffer)
+               return ERR_PTR(-ENOMEM);
 
-       exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size);
-       if (IS_ERR(exynos_gem_obj)) {
+       exynos_gem_obj = exynos_drm_gem_init(dev, size);
+       if (!exynos_gem_obj) {
                exynos_drm_buf_destroy(dev, buffer);
-               return exynos_gem_obj;
+               return ERR_PTR(-ENOMEM);
        }
 
        exynos_gem_obj->buffer = buffer;
@@ -150,23 +153,30 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
 }
 
 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
-                                       struct drm_file *file_priv)
+                               struct drm_file *file_priv)
 {
        struct drm_exynos_gem_create *args = data;
-       struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
+       struct exynos_drm_gem_obj *exynos_gem_obj;
+       int ret;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
-                                               &args->handle, args->size);
+       exynos_gem_obj = exynos_drm_gem_create(dev, args->size);
        if (IS_ERR(exynos_gem_obj))
                return PTR_ERR(exynos_gem_obj);
 
+       ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
+                       &args->handle);
+       if (ret) {
+               exynos_drm_gem_destroy(exynos_gem_obj);
+               return ret;
+       }
+
        return 0;
 }
 
 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
-               struct drm_file *file_priv)
+                                   struct drm_file *file_priv)
 {
        struct drm_exynos_gem_map_off *args = data;
 
@@ -185,7 +195,7 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
 }
 
 static int exynos_drm_gem_mmap_buffer(struct file *filp,
-               struct vm_area_struct *vma)
+                                     struct vm_area_struct *vma)
 {
        struct drm_gem_object *obj = filp->private_data;
        struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
@@ -196,6 +206,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
 
        vma->vm_flags |= (VM_IO | VM_RESERVED);
 
+       /* in case of direct mapping, always having non-cachable attribute */
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vma->vm_file = filp;
 
@@ -232,7 +243,7 @@ static const struct file_operations exynos_drm_gem_fops = {
 };
 
 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
-               struct drm_file *file_priv)
+                             struct drm_file *file_priv)
 {
        struct drm_exynos_gem_mmap *args = data;
        struct drm_gem_object *obj;
@@ -278,32 +289,19 @@ int exynos_drm_gem_init_object(struct drm_gem_object *obj)
        return 0;
 }
 
-void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
+void exynos_drm_gem_free_object(struct drm_gem_object *obj)
 {
-       struct exynos_drm_gem_obj *exynos_gem_obj;
-
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       DRM_DEBUG_KMS("handle count = %d\n",
-                       atomic_read(&gem_obj->handle_count));
-
-       if (gem_obj->map_list.map)
-               drm_gem_free_mmap_offset(gem_obj);
-
-       /* release file pointer to gem object. */
-       drm_gem_object_release(gem_obj);
-
-       exynos_gem_obj = to_exynos_gem_obj(gem_obj);
-
-       exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer);
-
-       kfree(exynos_gem_obj);
+       exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
 }
 
 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
-               struct drm_device *dev, struct drm_mode_create_dumb *args)
+                              struct drm_device *dev,
+                              struct drm_mode_create_dumb *args)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
+       int ret;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -316,19 +314,27 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
        args->pitch = args->width * args->bpp >> 3;
        args->size = args->pitch * args->height;
 
-       exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle,
-                                                       args->size);
+       exynos_gem_obj = exynos_drm_gem_create(dev, args->size);
        if (IS_ERR(exynos_gem_obj))
                return PTR_ERR(exynos_gem_obj);
 
+       ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
+                       &args->handle);
+       if (ret) {
+               exynos_drm_gem_destroy(exynos_gem_obj);
+               return ret;
+       }
+
        return 0;
 }
 
 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
-               struct drm_device *dev, uint32_t handle, uint64_t *offset)
+                                  struct drm_device *dev, uint32_t handle,
+                                  uint64_t *offset)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
        struct drm_gem_object *obj;
+       int ret = 0;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -343,19 +349,46 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
        obj = drm_gem_object_lookup(dev, file_priv, handle);
        if (!obj) {
                DRM_ERROR("failed to lookup gem object.\n");
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto unlock;
        }
 
        exynos_gem_obj = to_exynos_gem_obj(obj);
 
-       *offset = get_gem_mmap_offset(&exynos_gem_obj->base);
-
-       drm_gem_object_unreference(obj);
+       if (!exynos_gem_obj->base.map_list.map) {
+               ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
+               if (ret)
+                       goto out;
+       }
 
+       *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
        DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
 
+out:
+       drm_gem_object_unreference(obj);
+unlock:
        mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
+                               struct drm_device *dev,
+                               unsigned int handle)
+{
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       /*
+        * obj->refcount and obj->handle_count are decreased and
+        * if both them are 0 then exynos_drm_gem_free_object()
+        * would be called by callback to release resources.
+        */
+       ret = drm_gem_handle_delete(file_priv, handle);
+       if (ret < 0) {
+               DRM_ERROR("failed to delete drm_gem_handle.\n");
+               return ret;
+       }
 
        return 0;
 }
@@ -403,28 +436,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
        return ret;
 }
 
-
-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
-               struct drm_device *dev, unsigned int handle)
-{
-       int ret;
-
-       DRM_DEBUG_KMS("%s\n", __FILE__);
-
-       /*
-        * obj->refcount and obj->handle_count are decreased and
-        * if both them are 0 then exynos_drm_gem_free_object()
-        * would be called by callback to release resources.
-        */
-       ret = drm_gem_handle_delete(file_priv, handle);
-       if (ret < 0) {
-               DRM_ERROR("failed to delete drm_gem_handle.\n");
-               return ret;
-       }
-
-       return 0;
-}
-
 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
 MODULE_DESCRIPTION("Samsung SoC DRM GEM Module");
 MODULE_LICENSE("GPL");
index ef8797334e6da746c54eed761521aacdc7afd7a8..67cdc9168708b30eac9bd1af01f9cb552b23d46f 100644 (file)
@@ -60,14 +60,16 @@ struct exynos_drm_gem_buf {
  *     user can access the buffer through kms_bo.handle.
  */
 struct exynos_drm_gem_obj {
-       struct drm_gem_object base;
-       struct exynos_drm_gem_buf *buffer;
+       struct drm_gem_object           base;
+       struct exynos_drm_gem_buf       *buffer;
 };
 
-/* create a new buffer and get a new gem handle. */
+/* destroy a buffer with gem object */
+void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
+
+/* create a new buffer with gem object */
 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
-               struct drm_file *file_priv,
-               unsigned int *handle, unsigned long size);
+                                                unsigned long size);
 
 /*
  * request gem object creation and buffer allocation as the size
@@ -75,15 +77,18 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
  * height and bpp.
  */
 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
-               struct drm_file *file_priv);
+                               struct drm_file *file_priv);
 
 /* get buffer offset to map to user space. */
 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
-               struct drm_file *file_priv);
+                                   struct drm_file *file_priv);
 
-/* unmap a buffer from user space. */
-int exynos_drm_gem_munmap_ioctl(struct drm_device *dev, void *data,
-               struct drm_file *file_priv);
+/*
+ * mmap the physically continuous memory that a gem object contains
+ * to user space.
+ */
+int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv);
 
 /* initialize gem object. */
 int exynos_drm_gem_init_object(struct drm_gem_object *obj);
@@ -93,24 +98,13 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
 
 /* create memory region for drm framebuffer. */
 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
-               struct drm_device *dev, struct drm_mode_create_dumb *args);
+                              struct drm_device *dev,
+                              struct drm_mode_create_dumb *args);
 
 /* map memory region for drm framebuffer to user space. */
 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
-               struct drm_device *dev, uint32_t handle, uint64_t *offset);
-
-/* page fault handler and mmap fault address(virtual) to physical memory. */
-int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-
-/*
- * mmap the physically continuous memory that a gem object contains
- * to user space.
- */
-int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
-               struct drm_file *file_priv);
-
-/* set vm_flags and we can change the vm attribute to other one at here. */
-int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+                                  struct drm_device *dev, uint32_t handle,
+                                  uint64_t *offset);
 
 /*
  * destroy memory region allocated.
@@ -118,6 +112,13 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
  *     would be released by drm_gem_handle_delete().
  */
 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
-               struct drm_device *dev, unsigned int handle);
+                               struct drm_device *dev,
+                               unsigned int handle);
+
+/* page fault handler and mmap fault address(virtual) to physical memory. */
+int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+/* set vm_flags and we can change the vm attribute to other one at here. */
+int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
new file mode 100644 (file)
index 0000000..ed8a319
--- /dev/null
@@ -0,0 +1,439 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ *     Inki Dae <inki.dae@samsung.com>
+ *     Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_hdmi.h"
+
+#define to_context(dev)                platform_get_drvdata(to_platform_device(dev))
+#define to_subdrv(dev)         to_context(dev)
+#define get_ctx_from_subdrv(subdrv)    container_of(subdrv,\
+                                       struct drm_hdmi_context, subdrv);
+
+/* these callback points shoud be set by specific drivers. */
+static struct exynos_hdmi_display_ops *hdmi_display_ops;
+static struct exynos_hdmi_manager_ops *hdmi_manager_ops;
+static struct exynos_hdmi_overlay_ops *hdmi_overlay_ops;
+
+struct drm_hdmi_context {
+       struct exynos_drm_subdrv        subdrv;
+       struct exynos_drm_hdmi_context  *hdmi_ctx;
+       struct exynos_drm_hdmi_context  *mixer_ctx;
+       struct work_struct              work;
+};
+
+void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
+                                       *display_ops)
+{
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (display_ops)
+               hdmi_display_ops = display_ops;
+}
+EXPORT_SYMBOL(exynos_drm_display_ops_register);
+
+void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
+                                       *manager_ops)
+{
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (manager_ops)
+               hdmi_manager_ops = manager_ops;
+}
+EXPORT_SYMBOL(exynos_drm_manager_ops_register);
+
+void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
+                                       *overlay_ops)
+{
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (overlay_ops)
+               hdmi_overlay_ops = overlay_ops;
+}
+EXPORT_SYMBOL(exynos_drm_overlay_ops_register);
+
+static bool drm_hdmi_is_connected(struct device *dev)
+{
+       struct drm_hdmi_context *ctx = to_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_display_ops && hdmi_display_ops->is_connected)
+               return hdmi_display_ops->is_connected(ctx->hdmi_ctx->ctx);
+
+       return false;
+}
+
+static int drm_hdmi_get_edid(struct device *dev,
+               struct drm_connector *connector, u8 *edid, int len)
+{
+       struct drm_hdmi_context *ctx = to_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_display_ops && hdmi_display_ops->get_edid)
+               return hdmi_display_ops->get_edid(ctx->hdmi_ctx->ctx,
+                               connector, edid, len);
+
+       return 0;
+}
+
+static int drm_hdmi_check_timing(struct device *dev, void *timing)
+{
+       struct drm_hdmi_context *ctx = to_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_display_ops && hdmi_display_ops->check_timing)
+               return hdmi_display_ops->check_timing(ctx->hdmi_ctx->ctx,
+                               timing);
+
+       return 0;
+}
+
+static int drm_hdmi_power_on(struct device *dev, int mode)
+{
+       struct drm_hdmi_context *ctx = to_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_display_ops && hdmi_display_ops->power_on)
+               return hdmi_display_ops->power_on(ctx->hdmi_ctx->ctx, mode);
+
+       return 0;
+}
+
+static struct exynos_drm_display_ops drm_hdmi_display_ops = {
+       .type = EXYNOS_DISPLAY_TYPE_HDMI,
+       .is_connected = drm_hdmi_is_connected,
+       .get_edid = drm_hdmi_get_edid,
+       .check_timing = drm_hdmi_check_timing,
+       .power_on = drm_hdmi_power_on,
+};
+
+static int drm_hdmi_enable_vblank(struct device *subdrv_dev)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+       struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+       struct exynos_drm_manager *manager = &subdrv->manager;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_overlay_ops && hdmi_overlay_ops->enable_vblank)
+               return hdmi_overlay_ops->enable_vblank(ctx->mixer_ctx->ctx,
+                                                       manager->pipe);
+
+       return 0;
+}
+
+static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_overlay_ops && hdmi_overlay_ops->disable_vblank)
+               return hdmi_overlay_ops->disable_vblank(ctx->mixer_ctx->ctx);
+}
+
+static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_manager_ops && hdmi_manager_ops->mode_set)
+               hdmi_manager_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
+}
+
+static void drm_hdmi_commit(struct device *subdrv_dev)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_manager_ops && hdmi_manager_ops->commit)
+               hdmi_manager_ops->commit(ctx->hdmi_ctx->ctx);
+}
+
+static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               if (hdmi_manager_ops && hdmi_manager_ops->disable)
+                       hdmi_manager_ops->disable(ctx->hdmi_ctx->ctx);
+               break;
+       default:
+               DRM_DEBUG_KMS("unkown dps mode: %d\n", mode);
+               break;
+       }
+}
+
+static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
+       .dpms = drm_hdmi_dpms,
+       .enable_vblank = drm_hdmi_enable_vblank,
+       .disable_vblank = drm_hdmi_disable_vblank,
+       .mode_set = drm_hdmi_mode_set,
+       .commit = drm_hdmi_commit,
+};
+
+static void drm_mixer_mode_set(struct device *subdrv_dev,
+               struct exynos_drm_overlay *overlay)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_overlay_ops && hdmi_overlay_ops->win_mode_set)
+               hdmi_overlay_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
+}
+
+static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_overlay_ops && hdmi_overlay_ops->win_commit)
+               hdmi_overlay_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
+}
+
+static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_overlay_ops && hdmi_overlay_ops->win_disable)
+               hdmi_overlay_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
+}
+
+static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
+       .mode_set = drm_mixer_mode_set,
+       .commit = drm_mixer_commit,
+       .disable = drm_mixer_disable,
+};
+
+
+static int hdmi_subdrv_probe(struct drm_device *drm_dev,
+               struct device *dev)
+{
+       struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
+       struct drm_hdmi_context *ctx;
+       struct platform_device *pdev = to_platform_device(dev);
+       struct exynos_drm_common_hdmi_pd *pd;
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       pd = pdev->dev.platform_data;
+
+       if (!pd) {
+               DRM_DEBUG_KMS("platform data is null.\n");
+               return -EFAULT;
+       }
+
+       if (!pd->hdmi_dev) {
+               DRM_DEBUG_KMS("hdmi device is null.\n");
+               return -EFAULT;
+       }
+
+       if (!pd->mixer_dev) {
+               DRM_DEBUG_KMS("mixer device is null.\n");
+               return -EFAULT;
+       }
+
+       ret = platform_driver_register(&hdmi_driver);
+       if (ret) {
+               DRM_DEBUG_KMS("failed to register hdmi driver.\n");
+               return ret;
+       }
+
+       ret = platform_driver_register(&mixer_driver);
+       if (ret) {
+               DRM_DEBUG_KMS("failed to register mixer driver.\n");
+               goto err_hdmidrv;
+       }
+
+       ctx = get_ctx_from_subdrv(subdrv);
+
+       ctx->hdmi_ctx = (struct exynos_drm_hdmi_context *)
+                               to_context(pd->hdmi_dev);
+       if (!ctx->hdmi_ctx) {
+               DRM_DEBUG_KMS("hdmi context is null.\n");
+               ret = -EFAULT;
+               goto err_mixerdrv;
+       }
+
+       ctx->hdmi_ctx->drm_dev = drm_dev;
+
+       ctx->mixer_ctx = (struct exynos_drm_hdmi_context *)
+                               to_context(pd->mixer_dev);
+       if (!ctx->mixer_ctx) {
+               DRM_DEBUG_KMS("mixer context is null.\n");
+               ret = -EFAULT;
+               goto err_mixerdrv;
+       }
+
+       ctx->mixer_ctx->drm_dev = drm_dev;
+
+       return 0;
+
+err_mixerdrv:
+       platform_driver_unregister(&mixer_driver);
+err_hdmidrv:
+       platform_driver_unregister(&hdmi_driver);
+       return ret;
+}
+
+static void hdmi_subdrv_remove(struct drm_device *drm_dev)
+{
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       platform_driver_unregister(&hdmi_driver);
+       platform_driver_unregister(&mixer_driver);
+}
+
+static void exynos_drm_hdmi_late_probe(struct work_struct *work)
+{
+       struct drm_hdmi_context *ctx = container_of(work,
+                               struct drm_hdmi_context, work);
+
+       /*
+        * this function calls subdrv->probe() so this must be called
+        * after probe context.
+        *
+        * PS. subdrv->probe() will call platform_driver_register() to probe
+        * hdmi and mixer driver.
+        */
+       exynos_drm_subdrv_register(&ctx->subdrv);
+}
+
+static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct exynos_drm_subdrv *subdrv;
+       struct drm_hdmi_context *ctx;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx) {
+               DRM_LOG_KMS("failed to alloc common hdmi context.\n");
+               return -ENOMEM;
+       }
+
+       subdrv = &ctx->subdrv;
+
+       subdrv->probe = hdmi_subdrv_probe;
+       subdrv->remove = hdmi_subdrv_remove;
+       subdrv->manager.pipe = -1;
+       subdrv->manager.ops = &drm_hdmi_manager_ops;
+       subdrv->manager.overlay_ops = &drm_hdmi_overlay_ops;
+       subdrv->manager.display_ops = &drm_hdmi_display_ops;
+       subdrv->manager.dev = dev;
+
+       platform_set_drvdata(pdev, subdrv);
+
+       INIT_WORK(&ctx->work, exynos_drm_hdmi_late_probe);
+
+       schedule_work(&ctx->work);
+
+       return 0;
+}
+
+static int hdmi_runtime_suspend(struct device *dev)
+{
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       return 0;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+       .runtime_suspend = hdmi_runtime_suspend,
+       .runtime_resume  = hdmi_runtime_resume,
+};
+
+static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
+{
+       struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       exynos_drm_subdrv_unregister(&ctx->subdrv);
+       kfree(ctx);
+
+       return 0;
+}
+
+static struct platform_driver exynos_drm_common_hdmi_driver = {
+       .probe          = exynos_drm_hdmi_probe,
+       .remove         = __devexit_p(exynos_drm_hdmi_remove),
+       .driver         = {
+               .name   = "exynos-drm-hdmi",
+               .owner  = THIS_MODULE,
+               .pm = &hdmi_pm_ops,
+       },
+};
+
+static int __init exynos_drm_hdmi_init(void)
+{
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
+       if (ret) {
+               DRM_DEBUG_KMS("failed to register hdmi common driver.\n");
+               return ret;
+       }
+
+       return ret;
+}
+
+static void __exit exynos_drm_hdmi_exit(void)
+{
+       platform_driver_unregister(&exynos_drm_common_hdmi_driver);
+}
+
+module_init(exynos_drm_hdmi_init);
+module_exit(exynos_drm_hdmi_exit);
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM HDMI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
new file mode 100644 (file)
index 0000000..3c29f79
--- /dev/null
@@ -0,0 +1,73 @@
+/* exynos_drm_hdmi.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_HDMI_H_
+#define _EXYNOS_DRM_HDMI_H_
+
+/*
+ * exynos hdmi common context structure.
+ *
+ * @drm_dev: pointer to drm_device.
+ * @ctx: pointer to the context of specific device driver.
+ *     this context should be hdmi_context or mixer_context.
+ */
+struct exynos_drm_hdmi_context {
+       struct drm_device       *drm_dev;
+       void                    *ctx;
+};
+
+struct exynos_hdmi_display_ops {
+       bool (*is_connected)(void *ctx);
+       int (*get_edid)(void *ctx, struct drm_connector *connector,
+                       u8 *edid, int len);
+       int (*check_timing)(void *ctx, void *timing);
+       int (*power_on)(void *ctx, int mode);
+};
+
+struct exynos_hdmi_manager_ops {
+       void (*mode_set)(void *ctx, void *mode);
+       void (*commit)(void *ctx);
+       void (*disable)(void *ctx);
+};
+
+struct exynos_hdmi_overlay_ops {
+       int (*enable_vblank)(void *ctx, int pipe);
+       void (*disable_vblank)(void *ctx);
+       void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
+       void (*win_commit)(void *ctx, int zpos);
+       void (*win_disable)(void *ctx, int zpos);
+};
+
+extern struct platform_driver hdmi_driver;
+extern struct platform_driver mixer_driver;
+
+void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
+                                       *display_ops);
+void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
+                                       *manager_ops);
+void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
+                                       *overlay_ops);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
new file mode 100644 (file)
index 0000000..bdcf770
--- /dev/null
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include "exynos_drm.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_encoder.h"
+
+struct exynos_plane {
+       struct drm_plane                base;
+       struct exynos_drm_overlay       overlay;
+       bool                            enabled;
+};
+
+static int
+exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+                    struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+                    unsigned int crtc_w, unsigned int crtc_h,
+                    uint32_t src_x, uint32_t src_y,
+                    uint32_t src_w, uint32_t src_h)
+{
+       struct exynos_plane *exynos_plane =
+               container_of(plane, struct exynos_plane, base);
+       struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+       struct exynos_drm_crtc_pos pos;
+       unsigned int x = src_x >> 16;
+       unsigned int y = src_y >> 16;
+       int ret;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos));
+       pos.crtc_x = crtc_x;
+       pos.crtc_y = crtc_y;
+       pos.crtc_w = crtc_w;
+       pos.crtc_h = crtc_h;
+
+       pos.fb_x = x;
+       pos.fb_y = y;
+
+       /* TODO: scale feature */
+       ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos);
+       if (ret < 0)
+               return ret;
+
+       exynos_drm_fn_encoder(crtc, overlay,
+                       exynos_drm_encoder_crtc_mode_set);
+       exynos_drm_fn_encoder(crtc, &overlay->zpos,
+                       exynos_drm_encoder_crtc_plane_commit);
+
+       exynos_plane->enabled = true;
+
+       return 0;
+}
+
+static int exynos_disable_plane(struct drm_plane *plane)
+{
+       struct exynos_plane *exynos_plane =
+               container_of(plane, struct exynos_plane, base);
+       struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (!exynos_plane->enabled)
+               return 0;
+
+       exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
+                       exynos_drm_encoder_crtc_disable);
+
+       exynos_plane->enabled = false;
+       exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+
+       return 0;
+}
+
+static void exynos_plane_destroy(struct drm_plane *plane)
+{
+       struct exynos_plane *exynos_plane =
+               container_of(plane, struct exynos_plane, base);
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       exynos_disable_plane(plane);
+       drm_plane_cleanup(plane);
+       kfree(exynos_plane);
+}
+
+static struct drm_plane_funcs exynos_plane_funcs = {
+       .update_plane   = exynos_update_plane,
+       .disable_plane  = exynos_disable_plane,
+       .destroy        = exynos_plane_destroy,
+};
+
+int exynos_plane_init(struct drm_device *dev, unsigned int nr)
+{
+       struct exynos_plane *exynos_plane;
+       uint32_t possible_crtcs;
+
+       exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
+       if (!exynos_plane)
+               return -ENOMEM;
+
+       /* all CRTCs are available */
+       possible_crtcs = (1 << MAX_CRTC) - 1;
+
+       exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+
+       /* TODO: format */
+       return drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
+                             &exynos_plane_funcs, NULL, 0, false);
+}
+
+int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       struct drm_exynos_plane_set_zpos *zpos_req = data;
+       struct drm_mode_object *obj;
+       struct drm_plane *plane;
+       struct exynos_plane *exynos_plane;
+       int ret = 0;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       if (zpos_req->zpos < 0 || zpos_req->zpos >= MAX_PLANE) {
+               if (zpos_req->zpos != DEFAULT_ZPOS) {
+                       DRM_ERROR("zpos not within limits\n");
+                       return -EINVAL;
+               }
+       }
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, zpos_req->plane_id,
+                       DRM_MODE_OBJECT_PLANE);
+       if (!obj) {
+               DRM_DEBUG_KMS("Unknown plane ID %d\n",
+                             zpos_req->plane_id);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       plane = obj_to_plane(obj);
+       exynos_plane = container_of(plane, struct exynos_plane, base);
+
+       exynos_plane->overlay.zpos = zpos_req->zpos;
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
new file mode 100644 (file)
index 0000000..16b71f8
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+int exynos_plane_init(struct drm_device *dev, unsigned int nr);
+int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
new file mode 100644 (file)
index 0000000..f48f7ce
--- /dev/null
@@ -0,0 +1,1176 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *     Inki Dae <inki.dae@samsung.com>
+ *     Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * Based on drivers/media/video/s5p-tv/hdmi_drv.c
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_edid.h"
+#include "drm_crtc_helper.h"
+
+#include "regs-hdmi.h"
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_hdmi.h"
+
+#include "exynos_hdmi.h"
+
+#define HDMI_OVERLAY_NUMBER    3
+#define get_hdmi_context(dev)  platform_get_drvdata(to_platform_device(dev))
+
+static const u8 hdmiphy_conf27[32] = {
+       0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+       0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+       0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+       0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf27_027[32] = {
+       0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
+       0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+       0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+       0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf74_175[32] = {
+       0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
+       0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
+       0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+       0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf74_25[32] = {
+       0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
+       0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
+       0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
+       0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf148_5[32] = {
+       0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
+       0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
+       0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+       0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
+};
+
+struct hdmi_tg_regs {
+       u8 cmd;
+       u8 h_fsz_l;
+       u8 h_fsz_h;
+       u8 hact_st_l;
+       u8 hact_st_h;
+       u8 hact_sz_l;
+       u8 hact_sz_h;
+       u8 v_fsz_l;
+       u8 v_fsz_h;
+       u8 vsync_l;
+       u8 vsync_h;
+       u8 vsync2_l;
+       u8 vsync2_h;
+       u8 vact_st_l;
+       u8 vact_st_h;
+       u8 vact_sz_l;
+       u8 vact_sz_h;
+       u8 field_chg_l;
+       u8 field_chg_h;
+       u8 vact_st2_l;
+       u8 vact_st2_h;
+       u8 vsync_top_hdmi_l;
+       u8 vsync_top_hdmi_h;
+       u8 vsync_bot_hdmi_l;
+       u8 vsync_bot_hdmi_h;
+       u8 field_top_hdmi_l;
+       u8 field_top_hdmi_h;
+       u8 field_bot_hdmi_l;
+       u8 field_bot_hdmi_h;
+};
+
+struct hdmi_core_regs {
+       u8 h_blank[2];
+       u8 v_blank[3];
+       u8 h_v_line[3];
+       u8 vsync_pol[1];
+       u8 int_pro_mode[1];
+       u8 v_blank_f[3];
+       u8 h_sync_gen[3];
+       u8 v_sync_gen1[3];
+       u8 v_sync_gen2[3];
+       u8 v_sync_gen3[3];
+};
+
+struct hdmi_preset_conf {
+       struct hdmi_core_regs core;
+       struct hdmi_tg_regs tg;
+};
+
+static const struct hdmi_preset_conf hdmi_conf_480p = {
+       .core = {
+               .h_blank = {0x8a, 0x00},
+               .v_blank = {0x0d, 0x6a, 0x01},
+               .h_v_line = {0x0d, 0xa2, 0x35},
+               .vsync_pol = {0x01},
+               .int_pro_mode = {0x00},
+               .v_blank_f = {0x00, 0x00, 0x00},
+               .h_sync_gen = {0x0e, 0x30, 0x11},
+               .v_sync_gen1 = {0x0f, 0x90, 0x00},
+               /* other don't care */
+       },
+       .tg = {
+               0x00, /* cmd */
+               0x5a, 0x03, /* h_fsz */
+               0x8a, 0x00, 0xd0, 0x02, /* hact */
+               0x0d, 0x02, /* v_fsz */
+               0x01, 0x00, 0x33, 0x02, /* vsync */
+               0x2d, 0x00, 0xe0, 0x01, /* vact */
+               0x33, 0x02, /* field_chg */
+               0x49, 0x02, /* vact_st2 */
+               0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+               0x01, 0x00, 0x33, 0x02, /* field top/bot */
+       },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_720p60 = {
+       .core = {
+               .h_blank = {0x72, 0x01},
+               .v_blank = {0xee, 0xf2, 0x00},
+               .h_v_line = {0xee, 0x22, 0x67},
+               .vsync_pol = {0x00},
+               .int_pro_mode = {0x00},
+               .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+               .h_sync_gen = {0x6c, 0x50, 0x02},
+               .v_sync_gen1 = {0x0a, 0x50, 0x00},
+               .v_sync_gen2 = {0x01, 0x10, 0x00},
+               .v_sync_gen3 = {0x01, 0x10, 0x00},
+               /* other don't care */
+       },
+       .tg = {
+               0x00, /* cmd */
+               0x72, 0x06, /* h_fsz */
+               0x71, 0x01, 0x01, 0x05, /* hact */
+               0xee, 0x02, /* v_fsz */
+               0x01, 0x00, 0x33, 0x02, /* vsync */
+               0x1e, 0x00, 0xd0, 0x02, /* vact */
+               0x33, 0x02, /* field_chg */
+               0x49, 0x02, /* vact_st2 */
+               0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+               0x01, 0x00, 0x33, 0x02, /* field top/bot */
+       },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080i50 = {
+       .core = {
+               .h_blank = {0xd0, 0x02},
+               .v_blank = {0x32, 0xB2, 0x00},
+               .h_v_line = {0x65, 0x04, 0xa5},
+               .vsync_pol = {0x00},
+               .int_pro_mode = {0x01},
+               .v_blank_f = {0x49, 0x2A, 0x23},
+               .h_sync_gen = {0x0E, 0xEA, 0x08},
+               .v_sync_gen1 = {0x07, 0x20, 0x00},
+               .v_sync_gen2 = {0x39, 0x42, 0x23},
+               .v_sync_gen3 = {0x38, 0x87, 0x73},
+               /* other don't care */
+       },
+       .tg = {
+               0x00, /* cmd */
+               0x50, 0x0A, /* h_fsz */
+               0xCF, 0x02, 0x81, 0x07, /* hact */
+               0x65, 0x04, /* v_fsz */
+               0x01, 0x00, 0x33, 0x02, /* vsync */
+               0x16, 0x00, 0x1c, 0x02, /* vact */
+               0x33, 0x02, /* field_chg */
+               0x49, 0x02, /* vact_st2 */
+               0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+               0x01, 0x00, 0x33, 0x02, /* field top/bot */
+       },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
+       .core = {
+               .h_blank = {0xd0, 0x02},
+               .v_blank = {0x65, 0x6c, 0x01},
+               .h_v_line = {0x65, 0x04, 0xa5},
+               .vsync_pol = {0x00},
+               .int_pro_mode = {0x00},
+               .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+               .h_sync_gen = {0x0e, 0xea, 0x08},
+               .v_sync_gen1 = {0x09, 0x40, 0x00},
+               .v_sync_gen2 = {0x01, 0x10, 0x00},
+               .v_sync_gen3 = {0x01, 0x10, 0x00},
+               /* other don't care */
+       },
+       .tg = {
+               0x00, /* cmd */
+               0x50, 0x0A, /* h_fsz */
+               0xCF, 0x02, 0x81, 0x07, /* hact */
+               0x65, 0x04, /* v_fsz */
+               0x01, 0x00, 0x33, 0x02, /* vsync */
+               0x2d, 0x00, 0x38, 0x04, /* vact */
+               0x33, 0x02, /* field_chg */
+               0x48, 0x02, /* vact_st2 */
+               0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+               0x01, 0x00, 0x33, 0x02, /* field top/bot */
+       },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
+       .core = {
+               .h_blank = {0x18, 0x01},
+               .v_blank = {0x32, 0xB2, 0x00},
+               .h_v_line = {0x65, 0x84, 0x89},
+               .vsync_pol = {0x00},
+               .int_pro_mode = {0x01},
+               .v_blank_f = {0x49, 0x2A, 0x23},
+               .h_sync_gen = {0x56, 0x08, 0x02},
+               .v_sync_gen1 = {0x07, 0x20, 0x00},
+               .v_sync_gen2 = {0x39, 0x42, 0x23},
+               .v_sync_gen3 = {0xa4, 0x44, 0x4a},
+               /* other don't care */
+       },
+       .tg = {
+               0x00, /* cmd */
+               0x98, 0x08, /* h_fsz */
+               0x17, 0x01, 0x81, 0x07, /* hact */
+               0x65, 0x04, /* v_fsz */
+               0x01, 0x00, 0x33, 0x02, /* vsync */
+               0x16, 0x00, 0x1c, 0x02, /* vact */
+               0x33, 0x02, /* field_chg */
+               0x49, 0x02, /* vact_st2 */
+               0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+               0x01, 0x00, 0x33, 0x02, /* field top/bot */
+       },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
+       .core = {
+               .h_blank = {0x18, 0x01},
+               .v_blank = {0x65, 0x6c, 0x01},
+               .h_v_line = {0x65, 0x84, 0x89},
+               .vsync_pol = {0x00},
+               .int_pro_mode = {0x00},
+               .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+               .h_sync_gen = {0x56, 0x08, 0x02},
+               .v_sync_gen1 = {0x09, 0x40, 0x00},
+               .v_sync_gen2 = {0x01, 0x10, 0x00},
+               .v_sync_gen3 = {0x01, 0x10, 0x00},
+               /* other don't care */
+       },
+       .tg = {
+               0x00, /* cmd */
+               0x98, 0x08, /* h_fsz */
+               0x17, 0x01, 0x81, 0x07, /* hact */
+               0x65, 0x04, /* v_fsz */
+               0x01, 0x00, 0x33, 0x02, /* vsync */
+               0x2d, 0x00, 0x38, 0x04, /* vact */
+               0x33, 0x02, /* field_chg */
+               0x48, 0x02, /* vact_st2 */
+               0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+               0x01, 0x00, 0x33, 0x02, /* field top/bot */
+       },
+};
+
+static const struct hdmi_conf hdmi_confs[] = {
+       { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+       { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+       { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p },
+       { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
+       { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
+       { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+       { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
+};
+
+
+static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
+{
+       return readl(hdata->regs + reg_id);
+}
+
+static inline void hdmi_reg_writeb(struct hdmi_context *hdata,
+                                u32 reg_id, u8 value)
+{
+       writeb(value, hdata->regs + reg_id);
+}
+
+static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
+                                u32 reg_id, u32 value, u32 mask)
+{
+       u32 old = readl(hdata->regs + reg_id);
+       value = (value & mask) | (old & ~mask);
+       writel(value, hdata->regs + reg_id);
+}
+
+static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
+{
+#define DUMPREG(reg_id) \
+       DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
+       readl(hdata->regs + reg_id))
+       DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
+       DUMPREG(HDMI_INTC_FLAG);
+       DUMPREG(HDMI_INTC_CON);
+       DUMPREG(HDMI_HPD_STATUS);
+       DUMPREG(HDMI_PHY_RSTOUT);
+       DUMPREG(HDMI_PHY_VPLL);
+       DUMPREG(HDMI_PHY_CMU);
+       DUMPREG(HDMI_CORE_RSTOUT);
+
+       DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
+       DUMPREG(HDMI_CON_0);
+       DUMPREG(HDMI_CON_1);
+       DUMPREG(HDMI_CON_2);
+       DUMPREG(HDMI_SYS_STATUS);
+       DUMPREG(HDMI_PHY_STATUS);
+       DUMPREG(HDMI_STATUS_EN);
+       DUMPREG(HDMI_HPD);
+       DUMPREG(HDMI_MODE_SEL);
+       DUMPREG(HDMI_HPD_GEN);
+       DUMPREG(HDMI_DC_CONTROL);
+       DUMPREG(HDMI_VIDEO_PATTERN_GEN);
+
+       DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
+       DUMPREG(HDMI_H_BLANK_0);
+       DUMPREG(HDMI_H_BLANK_1);
+       DUMPREG(HDMI_V_BLANK_0);
+       DUMPREG(HDMI_V_BLANK_1);
+       DUMPREG(HDMI_V_BLANK_2);
+       DUMPREG(HDMI_H_V_LINE_0);
+       DUMPREG(HDMI_H_V_LINE_1);
+       DUMPREG(HDMI_H_V_LINE_2);
+       DUMPREG(HDMI_VSYNC_POL);
+       DUMPREG(HDMI_INT_PRO_MODE);
+       DUMPREG(HDMI_V_BLANK_F_0);
+       DUMPREG(HDMI_V_BLANK_F_1);
+       DUMPREG(HDMI_V_BLANK_F_2);
+       DUMPREG(HDMI_H_SYNC_GEN_0);
+       DUMPREG(HDMI_H_SYNC_GEN_1);
+       DUMPREG(HDMI_H_SYNC_GEN_2);
+       DUMPREG(HDMI_V_SYNC_GEN_1_0);
+       DUMPREG(HDMI_V_SYNC_GEN_1_1);
+       DUMPREG(HDMI_V_SYNC_GEN_1_2);
+       DUMPREG(HDMI_V_SYNC_GEN_2_0);
+       DUMPREG(HDMI_V_SYNC_GEN_2_1);
+       DUMPREG(HDMI_V_SYNC_GEN_2_2);
+       DUMPREG(HDMI_V_SYNC_GEN_3_0);
+       DUMPREG(HDMI_V_SYNC_GEN_3_1);
+       DUMPREG(HDMI_V_SYNC_GEN_3_2);
+
+       DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
+       DUMPREG(HDMI_TG_CMD);
+       DUMPREG(HDMI_TG_H_FSZ_L);
+       DUMPREG(HDMI_TG_H_FSZ_H);
+       DUMPREG(HDMI_TG_HACT_ST_L);
+       DUMPREG(HDMI_TG_HACT_ST_H);
+       DUMPREG(HDMI_TG_HACT_SZ_L);
+       DUMPREG(HDMI_TG_HACT_SZ_H);
+       DUMPREG(HDMI_TG_V_FSZ_L);
+       DUMPREG(HDMI_TG_V_FSZ_H);
+       DUMPREG(HDMI_TG_VSYNC_L);
+       DUMPREG(HDMI_TG_VSYNC_H);
+       DUMPREG(HDMI_TG_VSYNC2_L);
+       DUMPREG(HDMI_TG_VSYNC2_H);
+       DUMPREG(HDMI_TG_VACT_ST_L);
+       DUMPREG(HDMI_TG_VACT_ST_H);
+       DUMPREG(HDMI_TG_VACT_SZ_L);
+       DUMPREG(HDMI_TG_VACT_SZ_H);
+       DUMPREG(HDMI_TG_FIELD_CHG_L);
+       DUMPREG(HDMI_TG_FIELD_CHG_H);
+       DUMPREG(HDMI_TG_VACT_ST2_L);
+       DUMPREG(HDMI_TG_VACT_ST2_H);
+       DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
+       DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
+       DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
+       DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
+       DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
+       DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
+       DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
+       DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
+#undef DUMPREG
+}
+
+static int hdmi_conf_index(struct drm_display_mode *mode)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
+               if (hdmi_confs[i].width == mode->hdisplay &&
+                               hdmi_confs[i].height == mode->vdisplay &&
+                               hdmi_confs[i].vrefresh == mode->vrefresh &&
+                               hdmi_confs[i].interlace ==
+                               ((mode->flags & DRM_MODE_FLAG_INTERLACE) ?
+                                true : false))
+                       return i;
+
+       return -1;
+}
+
+static bool hdmi_is_connected(void *ctx)
+{
+       struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+       u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
+
+       if (val)
+               return true;
+
+       return false;
+}
+
+static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
+                               u8 *edid, int len)
+{
+       struct edid *raw_edid;
+       struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (!hdata->ddc_port)
+               return -ENODEV;
+
+       raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
+       if (raw_edid) {
+               memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
+                                       * EDID_LENGTH, len));
+               DRM_DEBUG_KMS("width[%d] x height[%d]\n",
+                               raw_edid->width_cm, raw_edid->height_cm);
+       } else {
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int hdmi_check_timing(void *ctx, void *timing)
+{
+       struct fb_videomode *check_timing = timing;
+       int i;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", check_timing->xres,
+                       check_timing->yres, check_timing->refresh,
+                       check_timing->vmode);
+
+       for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
+               if (hdmi_confs[i].width == check_timing->xres &&
+                       hdmi_confs[i].height == check_timing->yres &&
+                       hdmi_confs[i].vrefresh == check_timing->refresh &&
+                       hdmi_confs[i].interlace ==
+                       ((check_timing->vmode & FB_VMODE_INTERLACED) ?
+                        true : false))
+                       return 0;
+
+       return -EINVAL;
+}
+
+static int hdmi_display_power_on(void *ctx, int mode)
+{
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               DRM_DEBUG_KMS("hdmi [on]\n");
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+               break;
+       case DRM_MODE_DPMS_SUSPEND:
+               break;
+       case DRM_MODE_DPMS_OFF:
+               DRM_DEBUG_KMS("hdmi [off]\n");
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static struct exynos_hdmi_display_ops display_ops = {
+       .is_connected   = hdmi_is_connected,
+       .get_edid       = hdmi_get_edid,
+       .check_timing   = hdmi_check_timing,
+       .power_on       = hdmi_display_power_on,
+};
+
+static void hdmi_conf_reset(struct hdmi_context *hdata)
+{
+       /* disable hpd handle for drm */
+       hdata->hpd_handle = false;
+
+       /* resetting HDMI core */
+       hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT,  0, HDMI_CORE_SW_RSTOUT);
+       mdelay(10);
+       hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, ~0, HDMI_CORE_SW_RSTOUT);
+       mdelay(10);
+
+       /* enable hpd handle for drm */
+       hdata->hpd_handle = true;
+}
+
+static void hdmi_conf_init(struct hdmi_context *hdata)
+{
+       /* disable hpd handle for drm */
+       hdata->hpd_handle = false;
+
+       /* enable HPD interrupts */
+       hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
+               HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
+       mdelay(10);
+       hdmi_reg_writemask(hdata, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL |
+               HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
+
+       /* choose HDMI mode */
+       hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
+               HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
+       /* disable bluescreen */
+       hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
+       /* choose bluescreen (fecal) color */
+       hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_0, 0x12);
+       hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_1, 0x34);
+       hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_2, 0x56);
+       /* enable AVI packet every vsync, fixes purple line problem */
+       hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
+       /* force RGB, look to CEA-861-D, table 7 for more detail */
+       hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(0), 0 << 5);
+       hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5);
+
+       hdmi_reg_writeb(hdata, HDMI_SPD_CON, 0x02);
+       hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
+       hdmi_reg_writeb(hdata, HDMI_ACR_CON, 0x04);
+
+       /* enable hpd handle for drm */
+       hdata->hpd_handle = true;
+}
+
+static void hdmi_timing_apply(struct hdmi_context *hdata,
+                                const struct hdmi_preset_conf *conf)
+{
+       const struct hdmi_core_regs *core = &conf->core;
+       const struct hdmi_tg_regs *tg = &conf->tg;
+       int tries;
+
+       /* setting core registers */
+       hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
+       hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
+       hdmi_reg_writeb(hdata, HDMI_V_BLANK_0, core->v_blank[0]);
+       hdmi_reg_writeb(hdata, HDMI_V_BLANK_1, core->v_blank[1]);
+       hdmi_reg_writeb(hdata, HDMI_V_BLANK_2, core->v_blank[2]);
+       hdmi_reg_writeb(hdata, HDMI_H_V_LINE_0, core->h_v_line[0]);
+       hdmi_reg_writeb(hdata, HDMI_H_V_LINE_1, core->h_v_line[1]);
+       hdmi_reg_writeb(hdata, HDMI_H_V_LINE_2, core->h_v_line[2]);
+       hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
+       hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
+       hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_0, core->v_blank_f[0]);
+       hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_1, core->v_blank_f[1]);
+       hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_2, core->v_blank_f[2]);
+       hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_0, core->h_sync_gen[0]);
+       hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_1, core->h_sync_gen[1]);
+       hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_2, core->h_sync_gen[2]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+       /* Timing generator registers */
+       hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
+
+       /* waiting for HDMIPHY's PLL to get to steady state */
+       for (tries = 100; tries; --tries) {
+               u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS);
+               if (val & HDMI_PHY_STATUS_READY)
+                       break;
+               mdelay(1);
+       }
+       /* steady state not achieved */
+       if (tries == 0) {
+               DRM_ERROR("hdmiphy's pll could not reach steady state.\n");
+               hdmi_regs_dump(hdata, "timing apply");
+       }
+
+       clk_disable(hdata->res.sclk_hdmi);
+       clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy);
+       clk_enable(hdata->res.sclk_hdmi);
+
+       /* enable HDMI and timing generator */
+       hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN);
+       if (core->int_pro_mode[0])
+               hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN |
+                               HDMI_FIELD_EN);
+       else
+               hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
+}
+
+static void hdmiphy_conf_reset(struct hdmi_context *hdata)
+{
+       u8 buffer[2];
+
+       clk_disable(hdata->res.sclk_hdmi);
+       clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_pixel);
+       clk_enable(hdata->res.sclk_hdmi);
+
+       /* operation mode */
+       buffer[0] = 0x1f;
+       buffer[1] = 0x00;
+
+       if (hdata->hdmiphy_port)
+               i2c_master_send(hdata->hdmiphy_port, buffer, 2);
+
+       /* reset hdmiphy */
+       hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
+       mdelay(10);
+       hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT,  0, HDMI_PHY_SW_RSTOUT);
+       mdelay(10);
+}
+
+static void hdmiphy_conf_apply(struct hdmi_context *hdata)
+{
+       u8 buffer[32];
+       u8 operation[2];
+       u8 read_buffer[32] = {0, };
+       int ret;
+       int i;
+
+       if (!hdata->hdmiphy_port) {
+               DRM_ERROR("hdmiphy is not attached\n");
+               return;
+       }
+
+       /* pixel clock */
+       memcpy(buffer, hdmi_confs[hdata->cur_conf].hdmiphy_data, 32);
+       ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
+       if (ret != 32) {
+               DRM_ERROR("failed to configure HDMIPHY via I2C\n");
+               return;
+       }
+
+       mdelay(10);
+
+       /* operation mode */
+       operation[0] = 0x1f;
+       operation[1] = 0x80;
+
+       ret = i2c_master_send(hdata->hdmiphy_port, operation, 2);
+       if (ret != 2) {
+               DRM_ERROR("failed to enable hdmiphy\n");
+               return;
+       }
+
+       ret = i2c_master_recv(hdata->hdmiphy_port, read_buffer, 32);
+       if (ret < 0) {
+               DRM_ERROR("failed to read hdmiphy config\n");
+               return;
+       }
+
+       for (i = 0; i < ret; i++)
+               DRM_DEBUG_KMS("hdmiphy[0x%02x] write[0x%02x] - "
+                       "recv [0x%02x]\n", i, buffer[i], read_buffer[i]);
+}
+
+static void hdmi_conf_apply(struct hdmi_context *hdata)
+{
+       const struct hdmi_preset_conf *conf =
+                 hdmi_confs[hdata->cur_conf].conf;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       hdmiphy_conf_reset(hdata);
+       hdmiphy_conf_apply(hdata);
+
+       hdmi_conf_reset(hdata);
+       hdmi_conf_init(hdata);
+
+       /* setting core registers */
+       hdmi_timing_apply(hdata, conf);
+
+       hdmi_regs_dump(hdata, "start");
+}
+
+static void hdmi_mode_set(void *ctx, void *mode)
+{
+       struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+       int conf_idx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       conf_idx = hdmi_conf_index(mode);
+       if (conf_idx >= 0 && conf_idx < ARRAY_SIZE(hdmi_confs))
+               hdata->cur_conf = conf_idx;
+       else
+               DRM_DEBUG_KMS("not supported mode\n");
+}
+
+static void hdmi_commit(void *ctx)
+{
+       struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       hdmi_conf_apply(hdata);
+
+       hdata->enabled = true;
+}
+
+static void hdmi_disable(void *ctx)
+{
+       struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (hdata->enabled) {
+               hdmiphy_conf_reset(hdata);
+               hdmi_conf_reset(hdata);
+       }
+}
+
+static struct exynos_hdmi_manager_ops manager_ops = {
+       .mode_set       = hdmi_mode_set,
+       .commit         = hdmi_commit,
+       .disable        = hdmi_disable,
+};
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void hdmi_hotplug_func(struct work_struct *work)
+{
+       struct hdmi_context *hdata =
+               container_of(work, struct hdmi_context, hotplug_work);
+       struct exynos_drm_hdmi_context *ctx =
+               (struct exynos_drm_hdmi_context *)hdata->parent_ctx;
+
+       drm_helper_hpd_irq_event(ctx->drm_dev);
+}
+
+static irqreturn_t hdmi_irq_handler(int irq, void *arg)
+{
+       struct exynos_drm_hdmi_context *ctx = arg;
+       struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
+       u32 intc_flag;
+
+       intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
+       /* clearing flags for HPD plug/unplug */
+       if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
+               DRM_DEBUG_KMS("unplugged, handling:%d\n", hdata->hpd_handle);
+               hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
+                       HDMI_INTC_FLAG_HPD_UNPLUG);
+       }
+       if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
+               DRM_DEBUG_KMS("plugged, handling:%d\n", hdata->hpd_handle);
+               hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
+                       HDMI_INTC_FLAG_HPD_PLUG);
+       }
+
+       if (ctx->drm_dev && hdata->hpd_handle)
+               queue_work(hdata->wq, &hdata->hotplug_work);
+
+       return IRQ_HANDLED;
+}
+
+static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
+{
+       struct device *dev = hdata->dev;
+       struct hdmi_resources *res = &hdata->res;
+       static char *supply[] = {
+               "hdmi-en",
+               "vdd",
+               "vdd_osc",
+               "vdd_pll",
+       };
+       int i, ret;
+
+       DRM_DEBUG_KMS("HDMI resource init\n");
+
+       memset(res, 0, sizeof *res);
+
+       /* get clocks, power */
+       res->hdmi = clk_get(dev, "hdmi");
+       if (IS_ERR_OR_NULL(res->hdmi)) {
+               DRM_ERROR("failed to get clock 'hdmi'\n");
+               goto fail;
+       }
+       res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+       if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
+               DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
+               goto fail;
+       }
+       res->sclk_pixel = clk_get(dev, "sclk_pixel");
+       if (IS_ERR_OR_NULL(res->sclk_pixel)) {
+               DRM_ERROR("failed to get clock 'sclk_pixel'\n");
+               goto fail;
+       }
+       res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
+       if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
+               DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
+               goto fail;
+       }
+       res->hdmiphy = clk_get(dev, "hdmiphy");
+       if (IS_ERR_OR_NULL(res->hdmiphy)) {
+               DRM_ERROR("failed to get clock 'hdmiphy'\n");
+               goto fail;
+       }
+
+       clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
+
+       res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
+               sizeof res->regul_bulk[0], GFP_KERNEL);
+       if (!res->regul_bulk) {
+               DRM_ERROR("failed to get memory for regulators\n");
+               goto fail;
+       }
+       for (i = 0; i < ARRAY_SIZE(supply); ++i) {
+               res->regul_bulk[i].supply = supply[i];
+               res->regul_bulk[i].consumer = NULL;
+       }
+       ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+       if (ret) {
+               DRM_ERROR("failed to get regulators\n");
+               goto fail;
+       }
+       res->regul_count = ARRAY_SIZE(supply);
+
+       return 0;
+fail:
+       DRM_ERROR("HDMI resource init - failed\n");
+       return -ENODEV;
+}
+
+static int hdmi_resources_cleanup(struct hdmi_context *hdata)
+{
+       struct hdmi_resources *res = &hdata->res;
+
+       regulator_bulk_free(res->regul_count, res->regul_bulk);
+       /* kfree is NULL-safe */
+       kfree(res->regul_bulk);
+       if (!IS_ERR_OR_NULL(res->hdmiphy))
+               clk_put(res->hdmiphy);
+       if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
+               clk_put(res->sclk_hdmiphy);
+       if (!IS_ERR_OR_NULL(res->sclk_pixel))
+               clk_put(res->sclk_pixel);
+       if (!IS_ERR_OR_NULL(res->sclk_hdmi))
+               clk_put(res->sclk_hdmi);
+       if (!IS_ERR_OR_NULL(res->hdmi))
+               clk_put(res->hdmi);
+       memset(res, 0, sizeof *res);
+
+       return 0;
+}
+
+static void hdmi_resource_poweron(struct hdmi_context *hdata)
+{
+       struct hdmi_resources *res = &hdata->res;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       /* turn HDMI power on */
+       regulator_bulk_enable(res->regul_count, res->regul_bulk);
+       /* power-on hdmi physical interface */
+       clk_enable(res->hdmiphy);
+       /* turn clocks on */
+       clk_enable(res->hdmi);
+       clk_enable(res->sclk_hdmi);
+
+       hdmiphy_conf_reset(hdata);
+       hdmi_conf_reset(hdata);
+       hdmi_conf_init(hdata);
+
+}
+
+static void hdmi_resource_poweroff(struct hdmi_context *hdata)
+{
+       struct hdmi_resources *res = &hdata->res;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       /* turn clocks off */
+       clk_disable(res->sclk_hdmi);
+       clk_disable(res->hdmi);
+       /* power-off hdmiphy */
+       clk_disable(res->hdmiphy);
+       /* turn HDMI power off */
+       regulator_bulk_disable(res->regul_count, res->regul_bulk);
+}
+
+static int hdmi_runtime_suspend(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       hdmi_resource_poweroff((struct hdmi_context *)ctx->ctx);
+
+       return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       hdmi_resource_poweron((struct hdmi_context *)ctx->ctx);
+
+       return 0;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+       .runtime_suspend = hdmi_runtime_suspend,
+       .runtime_resume  = hdmi_runtime_resume,
+};
+
+static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
+
+void hdmi_attach_ddc_client(struct i2c_client *ddc)
+{
+       if (ddc)
+               hdmi_ddc = ddc;
+}
+EXPORT_SYMBOL(hdmi_attach_ddc_client);
+
+void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy)
+{
+       if (hdmiphy)
+               hdmi_hdmiphy = hdmiphy;
+}
+EXPORT_SYMBOL(hdmi_attach_hdmiphy_client);
+
+static int __devinit hdmi_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+       struct hdmi_context *hdata;
+       struct exynos_drm_hdmi_pdata *pdata;
+       struct resource *res;
+       int ret;
+
+       DRM_DEBUG_KMS("[%d]\n", __LINE__);
+
+       pdata = pdev->dev.platform_data;
+       if (!pdata) {
+               DRM_ERROR("no platform data specified\n");
+               return -EINVAL;
+       }
+
+       drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL);
+       if (!drm_hdmi_ctx) {
+               DRM_ERROR("failed to allocate common hdmi context.\n");
+               return -ENOMEM;
+       }
+
+       hdata = kzalloc(sizeof(struct hdmi_context), GFP_KERNEL);
+       if (!hdata) {
+               DRM_ERROR("out of memory\n");
+               kfree(drm_hdmi_ctx);
+               return -ENOMEM;
+       }
+
+       drm_hdmi_ctx->ctx = (void *)hdata;
+       hdata->parent_ctx = (void *)drm_hdmi_ctx;
+
+       platform_set_drvdata(pdev, drm_hdmi_ctx);
+
+       hdata->default_win = pdata->default_win;
+       hdata->default_timing = &pdata->timing;
+       hdata->default_bpp = pdata->bpp;
+       hdata->dev = dev;
+
+       ret = hdmi_resources_init(hdata);
+       if (ret) {
+               ret = -EINVAL;
+               goto err_data;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               DRM_ERROR("failed to find registers\n");
+               ret = -ENOENT;
+               goto err_resource;
+       }
+
+       hdata->regs_res = request_mem_region(res->start, resource_size(res),
+                                          dev_name(dev));
+       if (!hdata->regs_res) {
+               DRM_ERROR("failed to claim register region\n");
+               ret = -ENOENT;
+               goto err_resource;
+       }
+
+       hdata->regs = ioremap(res->start, resource_size(res));
+       if (!hdata->regs) {
+               DRM_ERROR("failed to map registers\n");
+               ret = -ENXIO;
+               goto err_req_region;
+       }
+
+       /* DDC i2c driver */
+       if (i2c_add_driver(&ddc_driver)) {
+               DRM_ERROR("failed to register ddc i2c driver\n");
+               ret = -ENOENT;
+               goto err_iomap;
+       }
+
+       hdata->ddc_port = hdmi_ddc;
+
+       /* hdmiphy i2c driver */
+       if (i2c_add_driver(&hdmiphy_driver)) {
+               DRM_ERROR("failed to register hdmiphy i2c driver\n");
+               ret = -ENOENT;
+               goto err_ddc;
+       }
+
+       hdata->hdmiphy_port = hdmi_hdmiphy;
+
+       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (res == NULL) {
+               DRM_ERROR("get interrupt resource failed.\n");
+               ret = -ENXIO;
+               goto err_hdmiphy;
+       }
+
+       /* create workqueue and hotplug work */
+       hdata->wq = alloc_workqueue("exynos-drm-hdmi",
+                       WQ_UNBOUND | WQ_NON_REENTRANT, 1);
+       if (hdata->wq == NULL) {
+               DRM_ERROR("Failed to create workqueue.\n");
+               ret = -ENOMEM;
+               goto err_hdmiphy;
+       }
+       INIT_WORK(&hdata->hotplug_work, hdmi_hotplug_func);
+
+       /* register hpd interrupt */
+       ret = request_irq(res->start, hdmi_irq_handler, 0, "drm_hdmi",
+                               drm_hdmi_ctx);
+       if (ret) {
+               DRM_ERROR("request interrupt failed.\n");
+               goto err_workqueue;
+       }
+       hdata->irq = res->start;
+
+       /* register specific callbacks to common hdmi. */
+       exynos_drm_display_ops_register(&display_ops);
+       exynos_drm_manager_ops_register(&manager_ops);
+
+       hdmi_resource_poweron(hdata);
+
+       return 0;
+
+err_workqueue:
+       destroy_workqueue(hdata->wq);
+err_hdmiphy:
+       i2c_del_driver(&hdmiphy_driver);
+err_ddc:
+       i2c_del_driver(&ddc_driver);
+err_iomap:
+       iounmap(hdata->regs);
+err_req_region:
+       release_resource(hdata->regs_res);
+       kfree(hdata->regs_res);
+err_resource:
+       hdmi_resources_cleanup(hdata);
+err_data:
+       kfree(hdata);
+       kfree(drm_hdmi_ctx);
+       return ret;
+}
+
+static int __devexit hdmi_remove(struct platform_device *pdev)
+{
+       struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
+       struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       hdmi_resource_poweroff(hdata);
+
+       disable_irq(hdata->irq);
+       free_irq(hdata->irq, hdata);
+
+       cancel_work_sync(&hdata->hotplug_work);
+       destroy_workqueue(hdata->wq);
+
+       hdmi_resources_cleanup(hdata);
+
+       iounmap(hdata->regs);
+
+       release_resource(hdata->regs_res);
+       kfree(hdata->regs_res);
+
+       /* hdmiphy i2c driver */
+       i2c_del_driver(&hdmiphy_driver);
+       /* DDC i2c driver */
+       i2c_del_driver(&ddc_driver);
+
+       kfree(hdata);
+
+       return 0;
+}
+
+struct platform_driver hdmi_driver = {
+       .probe          = hdmi_probe,
+       .remove         = __devexit_p(hdmi_remove),
+       .driver         = {
+               .name   = "exynos4-hdmi",
+               .owner  = THIS_MODULE,
+               .pm = &hdmi_pm_ops,
+       },
+};
+EXPORT_SYMBOL(hdmi_driver);
+
+MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_DESCRIPTION("Samsung DRM HDMI core Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.h b/drivers/gpu/drm/exynos/exynos_hdmi.h
new file mode 100644 (file)
index 0000000..31d6cf8
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *     Inki Dae <inki.dae@samsung.com>
+ *     Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_HDMI_H_
+#define _EXYNOS_HDMI_H_
+
+struct hdmi_conf {
+       int width;
+       int height;
+       int vrefresh;
+       bool interlace;
+       const u8 *hdmiphy_data;
+       const struct hdmi_preset_conf *conf;
+};
+
+struct hdmi_resources {
+       struct clk *hdmi;
+       struct clk *sclk_hdmi;
+       struct clk *sclk_pixel;
+       struct clk *sclk_hdmiphy;
+       struct clk *hdmiphy;
+       struct regulator_bulk_data *regul_bulk;
+       int regul_count;
+};
+
+struct hdmi_context {
+       struct device                   *dev;
+       struct drm_device               *drm_dev;
+       struct fb_videomode             *default_timing;
+       unsigned int                    default_win;
+       unsigned int                    default_bpp;
+       bool                            hpd_handle;
+       bool                            enabled;
+
+       struct resource                 *regs_res;
+       /** base address of HDMI registers */
+       void __iomem *regs;
+       /** HDMI hotplug interrupt */
+       unsigned int irq;
+       /** workqueue for delayed work */
+       struct workqueue_struct *wq;
+       /** hotplug handling work */
+       struct work_struct hotplug_work;
+
+       struct i2c_client *ddc_port;
+       struct i2c_client *hdmiphy_port;
+
+       /** current hdmiphy conf index */
+       int cur_conf;
+       /** other resources */
+       struct hdmi_resources res;
+
+       void *parent_ctx;
+};
+
+
+void hdmi_attach_ddc_client(struct i2c_client *ddc);
+void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy);
+
+extern struct i2c_driver hdmiphy_driver;
+extern struct i2c_driver ddc_driver;
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
new file mode 100644 (file)
index 0000000..9fe2995
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ *     Seung-Woo Kim <sw0312.kim@samsung.com>
+ *     Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_hdmi.h"
+
+
+static int hdmiphy_probe(struct i2c_client *client,
+       const struct i2c_device_id *id)
+{
+       hdmi_attach_hdmiphy_client(client);
+
+       dev_info(&client->adapter->dev, "attached s5p_hdmiphy "
+               "into i2c adapter successfully\n");
+
+       return 0;
+}
+
+static int hdmiphy_remove(struct i2c_client *client)
+{
+       dev_info(&client->adapter->dev, "detached s5p_hdmiphy "
+               "from i2c adapter successfully\n");
+
+       return 0;
+}
+
+static const struct i2c_device_id hdmiphy_id[] = {
+       { "s5p_hdmiphy", 0 },
+       { },
+};
+
+struct i2c_driver hdmiphy_driver = {
+       .driver = {
+               .name   = "s5p-hdmiphy",
+               .owner  = THIS_MODULE,
+       },
+       .id_table = hdmiphy_id,
+       .probe          = hdmiphy_probe,
+       .remove         = __devexit_p(hdmiphy_remove),
+       .command                = NULL,
+};
+EXPORT_SYMBOL(hdmiphy_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
new file mode 100644 (file)
index 0000000..ac24cff
--- /dev/null
@@ -0,0 +1,1070 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *     Inki Dae <inki.dae@samsung.com>
+ *     Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * Based on drivers/media/video/s5p-tv/mixer_reg.c
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include "regs-mixer.h"
+#include "regs-vp.h"
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_hdmi.h"
+#include "exynos_hdmi.h"
+#include "exynos_mixer.h"
+
+#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
+
+static const u8 filter_y_horiz_tap8[] = {
+       0,      -1,     -1,     -1,     -1,     -1,     -1,     -1,
+       -1,     -1,     -1,     -1,     -1,     0,      0,      0,
+       0,      2,      4,      5,      6,      6,      6,      6,
+       6,      5,      5,      4,      3,      2,      1,      1,
+       0,      -6,     -12,    -16,    -18,    -20,    -21,    -20,
+       -20,    -18,    -16,    -13,    -10,    -8,     -5,     -2,
+       127,    126,    125,    121,    114,    107,    99,     89,
+       79,     68,     57,     46,     35,     25,     16,     8,
+};
+
+static const u8 filter_y_vert_tap4[] = {
+       0,      -3,     -6,     -8,     -8,     -8,     -8,     -7,
+       -6,     -5,     -4,     -3,     -2,     -1,     -1,     0,
+       127,    126,    124,    118,    111,    102,    92,     81,
+       70,     59,     48,     37,     27,     19,     11,     5,
+       0,      5,      11,     19,     27,     37,     48,     59,
+       70,     81,     92,     102,    111,    118,    124,    126,
+       0,      0,      -1,     -1,     -2,     -3,     -4,     -5,
+       -6,     -7,     -8,     -8,     -8,     -8,     -6,     -3,
+};
+
+static const u8 filter_cr_horiz_tap4[] = {
+       0,      -3,     -6,     -8,     -8,     -8,     -8,     -7,
+       -6,     -5,     -4,     -3,     -2,     -1,     -1,     0,
+       127,    126,    124,    118,    111,    102,    92,     81,
+       70,     59,     48,     37,     27,     19,     11,     5,
+};
+
+static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id)
+{
+       return readl(res->vp_regs + reg_id);
+}
+
+static inline void vp_reg_write(struct mixer_resources *res, u32 reg_id,
+                                u32 val)
+{
+       writel(val, res->vp_regs + reg_id);
+}
+
+static inline void vp_reg_writemask(struct mixer_resources *res, u32 reg_id,
+                                u32 val, u32 mask)
+{
+       u32 old = vp_reg_read(res, reg_id);
+
+       val = (val & mask) | (old & ~mask);
+       writel(val, res->vp_regs + reg_id);
+}
+
+static inline u32 mixer_reg_read(struct mixer_resources *res, u32 reg_id)
+{
+       return readl(res->mixer_regs + reg_id);
+}
+
+static inline void mixer_reg_write(struct mixer_resources *res, u32 reg_id,
+                                u32 val)
+{
+       writel(val, res->mixer_regs + reg_id);
+}
+
+static inline void mixer_reg_writemask(struct mixer_resources *res,
+                                u32 reg_id, u32 val, u32 mask)
+{
+       u32 old = mixer_reg_read(res, reg_id);
+
+       val = (val & mask) | (old & ~mask);
+       writel(val, res->mixer_regs + reg_id);
+}
+
+static void mixer_regs_dump(struct mixer_context *ctx)
+{
+#define DUMPREG(reg_id) \
+do { \
+       DRM_DEBUG_KMS(#reg_id " = %08x\n", \
+               (u32)readl(ctx->mixer_res.mixer_regs + reg_id)); \
+} while (0)
+
+       DUMPREG(MXR_STATUS);
+       DUMPREG(MXR_CFG);
+       DUMPREG(MXR_INT_EN);
+       DUMPREG(MXR_INT_STATUS);
+
+       DUMPREG(MXR_LAYER_CFG);
+       DUMPREG(MXR_VIDEO_CFG);
+
+       DUMPREG(MXR_GRAPHIC0_CFG);
+       DUMPREG(MXR_GRAPHIC0_BASE);
+       DUMPREG(MXR_GRAPHIC0_SPAN);
+       DUMPREG(MXR_GRAPHIC0_WH);
+       DUMPREG(MXR_GRAPHIC0_SXY);
+       DUMPREG(MXR_GRAPHIC0_DXY);
+
+       DUMPREG(MXR_GRAPHIC1_CFG);
+       DUMPREG(MXR_GRAPHIC1_BASE);
+       DUMPREG(MXR_GRAPHIC1_SPAN);
+       DUMPREG(MXR_GRAPHIC1_WH);
+       DUMPREG(MXR_GRAPHIC1_SXY);
+       DUMPREG(MXR_GRAPHIC1_DXY);
+#undef DUMPREG
+}
+
+static void vp_regs_dump(struct mixer_context *ctx)
+{
+#define DUMPREG(reg_id) \
+do { \
+       DRM_DEBUG_KMS(#reg_id " = %08x\n", \
+               (u32) readl(ctx->mixer_res.vp_regs + reg_id)); \
+} while (0)
+
+       DUMPREG(VP_ENABLE);
+       DUMPREG(VP_SRESET);
+       DUMPREG(VP_SHADOW_UPDATE);
+       DUMPREG(VP_FIELD_ID);
+       DUMPREG(VP_MODE);
+       DUMPREG(VP_IMG_SIZE_Y);
+       DUMPREG(VP_IMG_SIZE_C);
+       DUMPREG(VP_PER_RATE_CTRL);
+       DUMPREG(VP_TOP_Y_PTR);
+       DUMPREG(VP_BOT_Y_PTR);
+       DUMPREG(VP_TOP_C_PTR);
+       DUMPREG(VP_BOT_C_PTR);
+       DUMPREG(VP_ENDIAN_MODE);
+       DUMPREG(VP_SRC_H_POSITION);
+       DUMPREG(VP_SRC_V_POSITION);
+       DUMPREG(VP_SRC_WIDTH);
+       DUMPREG(VP_SRC_HEIGHT);
+       DUMPREG(VP_DST_H_POSITION);
+       DUMPREG(VP_DST_V_POSITION);
+       DUMPREG(VP_DST_WIDTH);
+       DUMPREG(VP_DST_HEIGHT);
+       DUMPREG(VP_H_RATIO);
+       DUMPREG(VP_V_RATIO);
+
+#undef DUMPREG
+}
+
+static inline void vp_filter_set(struct mixer_resources *res,
+               int reg_id, const u8 *data, unsigned int size)
+{
+       /* assure 4-byte align */
+       BUG_ON(size & 3);
+       for (; size; size -= 4, reg_id += 4, data += 4) {
+               u32 val = (data[0] << 24) |  (data[1] << 16) |
+                       (data[2] << 8) | data[3];
+               vp_reg_write(res, reg_id, val);
+       }
+}
+
+static void vp_default_filter(struct mixer_resources *res)
+{
+       vp_filter_set(res, VP_POLY8_Y0_LL,
+               filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
+       vp_filter_set(res, VP_POLY4_Y0_LL,
+               filter_y_vert_tap4, sizeof filter_y_vert_tap4);
+       vp_filter_set(res, VP_POLY4_C0_LL,
+               filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
+}
+
+static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+
+       /* block update on vsync */
+       mixer_reg_writemask(res, MXR_STATUS, enable ?
+                       MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
+
+       vp_reg_write(res, VP_SHADOW_UPDATE, enable ?
+                       VP_SHADOW_UPDATE_ENABLE : 0);
+}
+
+static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+       u32 val;
+
+       /* choosing between interlace and progressive mode */
+       val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE :
+                               MXR_CFG_SCAN_PROGRASSIVE);
+
+       /* choosing between porper HD and SD mode */
+       if (height == 480)
+               val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
+       else if (height == 576)
+               val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
+       else if (height == 720)
+               val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
+       else if (height == 1080)
+               val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
+       else
+               val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
+
+       mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_SCAN_MASK);
+}
+
+static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+       u32 val;
+
+       if (height == 480) {
+               val = MXR_CFG_RGB601_0_255;
+       } else if (height == 576) {
+               val = MXR_CFG_RGB601_0_255;
+       } else if (height == 720) {
+               val = MXR_CFG_RGB709_16_235;
+               mixer_reg_write(res, MXR_CM_COEFF_Y,
+                               (1 << 30) | (94 << 20) | (314 << 10) |
+                               (32 << 0));
+               mixer_reg_write(res, MXR_CM_COEFF_CB,
+                               (972 << 20) | (851 << 10) | (225 << 0));
+               mixer_reg_write(res, MXR_CM_COEFF_CR,
+                               (225 << 20) | (820 << 10) | (1004 << 0));
+       } else if (height == 1080) {
+               val = MXR_CFG_RGB709_16_235;
+               mixer_reg_write(res, MXR_CM_COEFF_Y,
+                               (1 << 30) | (94 << 20) | (314 << 10) |
+                               (32 << 0));
+               mixer_reg_write(res, MXR_CM_COEFF_CB,
+                               (972 << 20) | (851 << 10) | (225 << 0));
+               mixer_reg_write(res, MXR_CM_COEFF_CR,
+                               (225 << 20) | (820 << 10) | (1004 << 0));
+       } else {
+               val = MXR_CFG_RGB709_16_235;
+               mixer_reg_write(res, MXR_CM_COEFF_Y,
+                               (1 << 30) | (94 << 20) | (314 << 10) |
+                               (32 << 0));
+               mixer_reg_write(res, MXR_CM_COEFF_CB,
+                               (972 << 20) | (851 << 10) | (225 << 0));
+               mixer_reg_write(res, MXR_CM_COEFF_CR,
+                               (225 << 20) | (820 << 10) | (1004 << 0));
+       }
+
+       mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
+}
+
+static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+       u32 val = enable ? ~0 : 0;
+
+       switch (win) {
+       case 0:
+               mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
+               break;
+       case 1:
+               mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
+               break;
+       case 2:
+               vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
+               mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_VP_ENABLE);
+               break;
+       }
+}
+
+static void mixer_run(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+
+       mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
+
+       mixer_regs_dump(ctx);
+}
+
+static void vp_video_buffer(struct mixer_context *ctx, int win)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+       unsigned long flags;
+       struct hdmi_win_data *win_data;
+       unsigned int full_width, full_height, width, height;
+       unsigned int x_ratio, y_ratio;
+       unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
+       unsigned int mode_width, mode_height;
+       unsigned int buf_num;
+       dma_addr_t luma_addr[2], chroma_addr[2];
+       bool tiled_mode = false;
+       bool crcb_mode = false;
+       u32 val;
+
+       win_data = &ctx->win_data[win];
+
+       switch (win_data->pixel_format) {
+       case DRM_FORMAT_NV12MT:
+               tiled_mode = true;
+       case DRM_FORMAT_NV12M:
+               crcb_mode = false;
+               buf_num = 2;
+               break;
+       /* TODO: single buffer format NV12, NV21 */
+       default:
+               /* ignore pixel format at disable time */
+               if (!win_data->dma_addr)
+                       break;
+
+               DRM_ERROR("pixel format for vp is wrong [%d].\n",
+                               win_data->pixel_format);
+               return;
+       }
+
+       full_width = win_data->fb_width;
+       full_height = win_data->fb_height;
+       width = win_data->crtc_width;
+       height = win_data->crtc_height;
+       mode_width = win_data->mode_width;
+       mode_height = win_data->mode_height;
+
+       /* scaling feature: (src << 16) / dst */
+       x_ratio = (width << 16) / width;
+       y_ratio = (height << 16) / height;
+
+       src_x_offset = win_data->fb_x;
+       src_y_offset = win_data->fb_y;
+       dst_x_offset = win_data->crtc_x;
+       dst_y_offset = win_data->crtc_y;
+
+       if (buf_num == 2) {
+               luma_addr[0] = win_data->dma_addr;
+               chroma_addr[0] = win_data->chroma_dma_addr;
+       } else {
+               luma_addr[0] = win_data->dma_addr;
+               chroma_addr[0] = win_data->dma_addr
+                       + (full_width * full_height);
+       }
+
+       if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
+               ctx->interlace = true;
+               if (tiled_mode) {
+                       luma_addr[1] = luma_addr[0] + 0x40;
+                       chroma_addr[1] = chroma_addr[0] + 0x40;
+               } else {
+                       luma_addr[1] = luma_addr[0] + full_width;
+                       chroma_addr[1] = chroma_addr[0] + full_width;
+               }
+       } else {
+               ctx->interlace = false;
+               luma_addr[1] = 0;
+               chroma_addr[1] = 0;
+       }
+
+       spin_lock_irqsave(&res->reg_slock, flags);
+       mixer_vsync_set_update(ctx, false);
+
+       /* interlace or progressive scan mode */
+       val = (ctx->interlace ? ~0 : 0);
+       vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);
+
+       /* setup format */
+       val = (crcb_mode ? VP_MODE_NV21 : VP_MODE_NV12);
+       val |= (tiled_mode ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
+       vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
+
+       /* setting size of input image */
+       vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(full_width) |
+               VP_IMG_VSIZE(full_height));
+       /* chroma height has to reduced by 2 to avoid chroma distorions */
+       vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(full_width) |
+               VP_IMG_VSIZE(full_height / 2));
+
+       vp_reg_write(res, VP_SRC_WIDTH, width);
+       vp_reg_write(res, VP_SRC_HEIGHT, height);
+       vp_reg_write(res, VP_SRC_H_POSITION,
+                       VP_SRC_H_POSITION_VAL(src_x_offset));
+       vp_reg_write(res, VP_SRC_V_POSITION, src_y_offset);
+
+       vp_reg_write(res, VP_DST_WIDTH, width);
+       vp_reg_write(res, VP_DST_H_POSITION, dst_x_offset);
+       if (ctx->interlace) {
+               vp_reg_write(res, VP_DST_HEIGHT, height / 2);
+               vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset / 2);
+       } else {
+               vp_reg_write(res, VP_DST_HEIGHT, height);
+               vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset);
+       }
+
+       vp_reg_write(res, VP_H_RATIO, x_ratio);
+       vp_reg_write(res, VP_V_RATIO, y_ratio);
+
+       vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
+
+       /* set buffer address to vp */
+       vp_reg_write(res, VP_TOP_Y_PTR, luma_addr[0]);
+       vp_reg_write(res, VP_BOT_Y_PTR, luma_addr[1]);
+       vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
+       vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
+
+       mixer_cfg_scan(ctx, mode_height);
+       mixer_cfg_rgb_fmt(ctx, mode_height);
+       mixer_cfg_layer(ctx, win, true);
+       mixer_run(ctx);
+
+       mixer_vsync_set_update(ctx, true);
+       spin_unlock_irqrestore(&res->reg_slock, flags);
+
+       vp_regs_dump(ctx);
+}
+
+static void mixer_graph_buffer(struct mixer_context *ctx, int win)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+       unsigned long flags;
+       struct hdmi_win_data *win_data;
+       unsigned int full_width, width, height;
+       unsigned int x_ratio, y_ratio;
+       unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
+       unsigned int mode_width, mode_height;
+       dma_addr_t dma_addr;
+       unsigned int fmt;
+       u32 val;
+
+       win_data = &ctx->win_data[win];
+
+       #define RGB565 4
+       #define ARGB1555 5
+       #define ARGB4444 6
+       #define ARGB8888 7
+
+       switch (win_data->bpp) {
+       case 16:
+               fmt = ARGB4444;
+               break;
+       case 32:
+               fmt = ARGB8888;
+               break;
+       default:
+               fmt = ARGB8888;
+       }
+
+       dma_addr = win_data->dma_addr;
+       full_width = win_data->fb_width;
+       width = win_data->crtc_width;
+       height = win_data->crtc_height;
+       mode_width = win_data->mode_width;
+       mode_height = win_data->mode_height;
+
+       /* 2x scaling feature */
+       x_ratio = 0;
+       y_ratio = 0;
+
+       src_x_offset = win_data->fb_x;
+       src_y_offset = win_data->fb_y;
+       dst_x_offset = win_data->crtc_x;
+       dst_y_offset = win_data->crtc_y;
+
+       /* converting dma address base and source offset */
+       dma_addr = dma_addr
+               + (src_x_offset * win_data->bpp >> 3)
+               + (src_y_offset * full_width * win_data->bpp >> 3);
+       src_x_offset = 0;
+       src_y_offset = 0;
+
+       if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE)
+               ctx->interlace = true;
+       else
+               ctx->interlace = false;
+
+       spin_lock_irqsave(&res->reg_slock, flags);
+       mixer_vsync_set_update(ctx, false);
+
+       /* setup format */
+       mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win),
+               MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
+
+       /* setup geometry */
+       mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), full_width);
+
+       val  = MXR_GRP_WH_WIDTH(width);
+       val |= MXR_GRP_WH_HEIGHT(height);
+       val |= MXR_GRP_WH_H_SCALE(x_ratio);
+       val |= MXR_GRP_WH_V_SCALE(y_ratio);
+       mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
+
+       /* setup offsets in source image */
+       val  = MXR_GRP_SXY_SX(src_x_offset);
+       val |= MXR_GRP_SXY_SY(src_y_offset);
+       mixer_reg_write(res, MXR_GRAPHIC_SXY(win), val);
+
+       /* setup offsets in display image */
+       val  = MXR_GRP_DXY_DX(dst_x_offset);
+       val |= MXR_GRP_DXY_DY(dst_y_offset);
+       mixer_reg_write(res, MXR_GRAPHIC_DXY(win), val);
+
+       /* set buffer address to mixer */
+       mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
+
+       mixer_cfg_scan(ctx, mode_height);
+       mixer_cfg_rgb_fmt(ctx, mode_height);
+       mixer_cfg_layer(ctx, win, true);
+       mixer_run(ctx);
+
+       mixer_vsync_set_update(ctx, true);
+       spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static void vp_win_reset(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+       int tries = 100;
+
+       vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING);
+       for (tries = 100; tries; --tries) {
+               /* waiting until VP_SRESET_PROCESSING is 0 */
+               if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
+                       break;
+               mdelay(10);
+       }
+       WARN(tries == 0, "failed to reset Video Processor\n");
+}
+
+static int mixer_enable_vblank(void *ctx, int pipe)
+{
+       struct mixer_context *mixer_ctx = ctx;
+       struct mixer_resources *res = &mixer_ctx->mixer_res;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       mixer_ctx->pipe = pipe;
+
+       /* enable vsync interrupt */
+       mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
+                       MXR_INT_EN_VSYNC);
+
+       return 0;
+}
+
+static void mixer_disable_vblank(void *ctx)
+{
+       struct mixer_context *mixer_ctx = ctx;
+       struct mixer_resources *res = &mixer_ctx->mixer_res;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       /* disable vsync interrupt */
+       mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
+}
+
+static void mixer_win_mode_set(void *ctx,
+                             struct exynos_drm_overlay *overlay)
+{
+       struct mixer_context *mixer_ctx = ctx;
+       struct hdmi_win_data *win_data;
+       int win;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (!overlay) {
+               DRM_ERROR("overlay is NULL\n");
+               return;
+       }
+
+       DRM_DEBUG_KMS("set [%d]x[%d] at (%d,%d) to [%d]x[%d] at (%d,%d)\n",
+                                overlay->fb_width, overlay->fb_height,
+                                overlay->fb_x, overlay->fb_y,
+                                overlay->crtc_width, overlay->crtc_height,
+                                overlay->crtc_x, overlay->crtc_y);
+
+       win = overlay->zpos;
+       if (win == DEFAULT_ZPOS)
+               win = mixer_ctx->default_win;
+
+       if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+               DRM_ERROR("overlay plane[%d] is wrong\n", win);
+               return;
+       }
+
+       win_data = &mixer_ctx->win_data[win];
+
+       win_data->dma_addr = overlay->dma_addr[0];
+       win_data->vaddr = overlay->vaddr[0];
+       win_data->chroma_dma_addr = overlay->dma_addr[1];
+       win_data->chroma_vaddr = overlay->vaddr[1];
+       win_data->pixel_format = overlay->pixel_format;
+       win_data->bpp = overlay->bpp;
+
+       win_data->crtc_x = overlay->crtc_x;
+       win_data->crtc_y = overlay->crtc_y;
+       win_data->crtc_width = overlay->crtc_width;
+       win_data->crtc_height = overlay->crtc_height;
+
+       win_data->fb_x = overlay->fb_x;
+       win_data->fb_y = overlay->fb_y;
+       win_data->fb_width = overlay->fb_width;
+       win_data->fb_height = overlay->fb_height;
+
+       win_data->mode_width = overlay->mode_width;
+       win_data->mode_height = overlay->mode_height;
+
+       win_data->scan_flags = overlay->scan_flag;
+}
+
+static void mixer_win_commit(void *ctx, int zpos)
+{
+       struct mixer_context *mixer_ctx = ctx;
+       int win = zpos;
+
+       DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+
+       if (win == DEFAULT_ZPOS)
+               win = mixer_ctx->default_win;
+
+       if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+               DRM_ERROR("overlay plane[%d] is wrong\n", win);
+               return;
+       }
+
+       if (win > 1)
+               vp_video_buffer(mixer_ctx, win);
+       else
+               mixer_graph_buffer(mixer_ctx, win);
+}
+
+static void mixer_win_disable(void *ctx, int zpos)
+{
+       struct mixer_context *mixer_ctx = ctx;
+       struct mixer_resources *res = &mixer_ctx->mixer_res;
+       unsigned long flags;
+       int win = zpos;
+
+       DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+
+       if (win == DEFAULT_ZPOS)
+               win = mixer_ctx->default_win;
+
+       if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+               DRM_ERROR("overlay plane[%d] is wrong\n", win);
+               return;
+       }
+
+       spin_lock_irqsave(&res->reg_slock, flags);
+       mixer_vsync_set_update(mixer_ctx, false);
+
+       mixer_cfg_layer(mixer_ctx, win, false);
+
+       mixer_vsync_set_update(mixer_ctx, true);
+       spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static struct exynos_hdmi_overlay_ops overlay_ops = {
+       .enable_vblank          = mixer_enable_vblank,
+       .disable_vblank         = mixer_disable_vblank,
+       .win_mode_set           = mixer_win_mode_set,
+       .win_commit             = mixer_win_commit,
+       .win_disable            = mixer_win_disable,
+};
+
+/* for pageflip event */
+static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
+{
+       struct exynos_drm_private *dev_priv = drm_dev->dev_private;
+       struct drm_pending_vblank_event *e, *t;
+       struct timeval now;
+       unsigned long flags;
+       bool is_checked = false;
+
+       spin_lock_irqsave(&drm_dev->event_lock, flags);
+
+       list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
+                       base.link) {
+               /* if event's pipe isn't same as crtc then ignore it. */
+               if (crtc != e->pipe)
+                       continue;
+
+               is_checked = true;
+               do_gettimeofday(&now);
+               e->event.sequence = 0;
+               e->event.tv_sec = now.tv_sec;
+               e->event.tv_usec = now.tv_usec;
+
+               list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+               wake_up_interruptible(&e->base.file_priv->event_wait);
+       }
+
+       if (is_checked)
+               drm_vblank_put(drm_dev, crtc);
+
+       spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+}
+
+static irqreturn_t mixer_irq_handler(int irq, void *arg)
+{
+       struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
+       struct mixer_context *ctx =
+                       (struct mixer_context *)drm_hdmi_ctx->ctx;
+       struct mixer_resources *res = &ctx->mixer_res;
+       u32 val, val_base;
+
+       spin_lock(&res->reg_slock);
+
+       /* read interrupt status for handling and clearing flags for VSYNC */
+       val = mixer_reg_read(res, MXR_INT_STATUS);
+
+       /* handling VSYNC */
+       if (val & MXR_INT_STATUS_VSYNC) {
+               /* interlace scan need to check shadow register */
+               if (ctx->interlace) {
+                       val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
+                       if (ctx->win_data[0].dma_addr != val_base)
+                               goto out;
+
+                       val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
+                       if (ctx->win_data[1].dma_addr != val_base)
+                               goto out;
+               }
+
+               drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
+               mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
+       }
+
+out:
+       /* clear interrupts */
+       if (~val & MXR_INT_EN_VSYNC) {
+               /* vsync interrupt use different bit for read and clear */
+               val &= ~MXR_INT_EN_VSYNC;
+               val |= MXR_INT_CLEAR_VSYNC;
+       }
+       mixer_reg_write(res, MXR_INT_STATUS, val);
+
+       spin_unlock(&res->reg_slock);
+
+       return IRQ_HANDLED;
+}
+
+static void mixer_win_reset(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+       unsigned long flags;
+       u32 val; /* value stored to register */
+
+       spin_lock_irqsave(&res->reg_slock, flags);
+       mixer_vsync_set_update(ctx, false);
+
+       mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
+
+       /* set output in RGB888 mode */
+       mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
+
+       /* 16 beat burst in DMA */
+       mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
+               MXR_STATUS_BURST_MASK);
+
+       /* setting default layer priority: layer1 > video > layer0
+        * because typical usage scenario would be
+        * layer0 - framebuffer
+        * video - video overlay
+        * layer1 - OSD
+        */
+       val  = MXR_LAYER_CFG_GRP0_VAL(1);
+       val |= MXR_LAYER_CFG_VP_VAL(2);
+       val |= MXR_LAYER_CFG_GRP1_VAL(3);
+       mixer_reg_write(res, MXR_LAYER_CFG, val);
+
+       /* setting background color */
+       mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
+       mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
+       mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
+
+       /* setting graphical layers */
+
+       val  = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
+       val |= MXR_GRP_CFG_WIN_BLEND_EN;
+       val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
+
+       /* the same configuration for both layers */
+       mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
+
+       val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+       val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
+       mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
+
+       /* configuration of Video Processor Registers */
+       vp_win_reset(ctx);
+       vp_default_filter(res);
+
+       /* disable all layers */
+       mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
+       mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
+       mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
+
+       mixer_vsync_set_update(ctx, true);
+       spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static void mixer_resource_poweron(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       clk_enable(res->mixer);
+       clk_enable(res->vp);
+       clk_enable(res->sclk_mixer);
+
+       mixer_win_reset(ctx);
+}
+
+static void mixer_resource_poweroff(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       clk_disable(res->mixer);
+       clk_disable(res->vp);
+       clk_disable(res->sclk_mixer);
+}
+
+static int mixer_runtime_resume(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
+
+       DRM_DEBUG_KMS("resume - start\n");
+
+       mixer_resource_poweron((struct mixer_context *)ctx->ctx);
+
+       return 0;
+}
+
+static int mixer_runtime_suspend(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
+
+       DRM_DEBUG_KMS("suspend - start\n");
+
+       mixer_resource_poweroff((struct mixer_context *)ctx->ctx);
+
+       return 0;
+}
+
+static const struct dev_pm_ops mixer_pm_ops = {
+       .runtime_suspend = mixer_runtime_suspend,
+       .runtime_resume  = mixer_runtime_resume,
+};
+
+static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
+                                struct platform_device *pdev)
+{
+       struct mixer_context *mixer_ctx =
+                       (struct mixer_context *)ctx->ctx;
+       struct device *dev = &pdev->dev;
+       struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
+       struct resource *res;
+       int ret;
+
+       mixer_res->dev = dev;
+       spin_lock_init(&mixer_res->reg_slock);
+
+       mixer_res->mixer = clk_get(dev, "mixer");
+       if (IS_ERR_OR_NULL(mixer_res->mixer)) {
+               dev_err(dev, "failed to get clock 'mixer'\n");
+               ret = -ENODEV;
+               goto fail;
+       }
+       mixer_res->vp = clk_get(dev, "vp");
+       if (IS_ERR_OR_NULL(mixer_res->vp)) {
+               dev_err(dev, "failed to get clock 'vp'\n");
+               ret = -ENODEV;
+               goto fail;
+       }
+       mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
+       if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
+               dev_err(dev, "failed to get clock 'sclk_mixer'\n");
+               ret = -ENODEV;
+               goto fail;
+       }
+       mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+       if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
+               dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
+               ret = -ENODEV;
+               goto fail;
+       }
+       mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
+       if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
+               dev_err(dev, "failed to get clock 'sclk_dac'\n");
+               ret = -ENODEV;
+               goto fail;
+       }
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr");
+       if (res == NULL) {
+               dev_err(dev, "get memory resource failed.\n");
+               ret = -ENXIO;
+               goto fail;
+       }
+
+       clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
+
+       mixer_res->mixer_regs = ioremap(res->start, resource_size(res));
+       if (mixer_res->mixer_regs == NULL) {
+               dev_err(dev, "register mapping failed.\n");
+               ret = -ENXIO;
+               goto fail;
+       }
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp");
+       if (res == NULL) {
+               dev_err(dev, "get memory resource failed.\n");
+               ret = -ENXIO;
+               goto fail_mixer_regs;
+       }
+
+       mixer_res->vp_regs = ioremap(res->start, resource_size(res));
+       if (mixer_res->vp_regs == NULL) {
+               dev_err(dev, "register mapping failed.\n");
+               ret = -ENXIO;
+               goto fail_mixer_regs;
+       }
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq");
+       if (res == NULL) {
+               dev_err(dev, "get interrupt resource failed.\n");
+               ret = -ENXIO;
+               goto fail_vp_regs;
+       }
+
+       ret = request_irq(res->start, mixer_irq_handler, 0, "drm_mixer", ctx);
+       if (ret) {
+               dev_err(dev, "request interrupt failed.\n");
+               goto fail_vp_regs;
+       }
+       mixer_res->irq = res->start;
+
+       return 0;
+
+fail_vp_regs:
+       iounmap(mixer_res->vp_regs);
+
+fail_mixer_regs:
+       iounmap(mixer_res->mixer_regs);
+
+fail:
+       if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
+               clk_put(mixer_res->sclk_dac);
+       if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
+               clk_put(mixer_res->sclk_hdmi);
+       if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
+               clk_put(mixer_res->sclk_mixer);
+       if (!IS_ERR_OR_NULL(mixer_res->vp))
+               clk_put(mixer_res->vp);
+       if (!IS_ERR_OR_NULL(mixer_res->mixer))
+               clk_put(mixer_res->mixer);
+       mixer_res->dev = NULL;
+       return ret;
+}
+
+static void mixer_resources_cleanup(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+
+       disable_irq(res->irq);
+       free_irq(res->irq, ctx);
+
+       iounmap(res->vp_regs);
+       iounmap(res->mixer_regs);
+}
+
+static int __devinit mixer_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+       struct mixer_context *ctx;
+       int ret;
+
+       dev_info(dev, "probe start\n");
+
+       drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL);
+       if (!drm_hdmi_ctx) {
+               DRM_ERROR("failed to allocate common hdmi context.\n");
+               return -ENOMEM;
+       }
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx) {
+               DRM_ERROR("failed to alloc mixer context.\n");
+               kfree(drm_hdmi_ctx);
+               return -ENOMEM;
+       }
+
+       drm_hdmi_ctx->ctx = (void *)ctx;
+
+       platform_set_drvdata(pdev, drm_hdmi_ctx);
+
+       /* acquire resources: regs, irqs, clocks */
+       ret = mixer_resources_init(drm_hdmi_ctx, pdev);
+       if (ret)
+               goto fail;
+
+       /* register specific callback point to common hdmi. */
+       exynos_drm_overlay_ops_register(&overlay_ops);
+
+       mixer_resource_poweron(ctx);
+
+       return 0;
+
+
+fail:
+       dev_info(dev, "probe failed\n");
+       return ret;
+}
+
+static int mixer_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct exynos_drm_hdmi_context *drm_hdmi_ctx =
+                                       platform_get_drvdata(pdev);
+       struct mixer_context *ctx = (struct mixer_context *)drm_hdmi_ctx->ctx;
+
+       dev_info(dev, "remove sucessful\n");
+
+       mixer_resource_poweroff(ctx);
+       mixer_resources_cleanup(ctx);
+
+       return 0;
+}
+
+struct platform_driver mixer_driver = {
+       .driver = {
+               .name = "s5p-mixer",
+               .owner = THIS_MODULE,
+               .pm = &mixer_pm_ops,
+       },
+       .probe = mixer_probe,
+       .remove = __devexit_p(mixer_remove),
+};
+EXPORT_SYMBOL(mixer_driver);
+
+MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_DESCRIPTION("Samsung DRM HDMI mixer Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.h b/drivers/gpu/drm/exynos/exynos_mixer.h
new file mode 100644 (file)
index 0000000..cebacfe
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *     Seung-Woo Kim <sw0312.kim@samsung.com>
+ *     Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_MIXER_H_
+#define _EXYNOS_MIXER_H_
+
+#define HDMI_OVERLAY_NUMBER    3
+
+struct hdmi_win_data {
+       dma_addr_t              dma_addr;
+       void __iomem            *vaddr;
+       dma_addr_t              chroma_dma_addr;
+       void __iomem            *chroma_vaddr;
+       uint32_t                pixel_format;
+       unsigned int            bpp;
+       unsigned int            crtc_x;
+       unsigned int            crtc_y;
+       unsigned int            crtc_width;
+       unsigned int            crtc_height;
+       unsigned int            fb_x;
+       unsigned int            fb_y;
+       unsigned int            fb_width;
+       unsigned int            fb_height;
+       unsigned int            mode_width;
+       unsigned int            mode_height;
+       unsigned int            scan_flags;
+};
+
+struct mixer_resources {
+       struct device *dev;
+       /** interrupt index */
+       int irq;
+       /** pointer to Mixer registers */
+       void __iomem *mixer_regs;
+       /** pointer to Video Processor registers */
+       void __iomem *vp_regs;
+       /** spinlock for protection of registers */
+       spinlock_t reg_slock;
+       /** other resources */
+       struct clk *mixer;
+       struct clk *vp;
+       struct clk *sclk_mixer;
+       struct clk *sclk_hdmi;
+       struct clk *sclk_dac;
+};
+
+struct mixer_context {
+       unsigned int                    default_win;
+       struct fb_videomode             *default_timing;
+       unsigned int                    default_bpp;
+
+       /** mixer interrupt */
+       unsigned int irq;
+       /** current crtc pipe for vblank */
+       int pipe;
+       /** interlace scan mode */
+       bool interlace;
+       /** vp enabled status */
+       bool vp_enabled;
+
+       /** mixer and vp resources */
+       struct mixer_resources mixer_res;
+
+       /** overlay window data */
+       struct hdmi_win_data            win_data[HDMI_OVERLAY_NUMBER];
+};
+
+#endif
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
new file mode 100644 (file)
index 0000000..72e6b52
--- /dev/null
@@ -0,0 +1,147 @@
+/*
+ *
+ *  Cloned from drivers/media/video/s5p-tv/regs-hdmi.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * HDMI register header file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef SAMSUNG_REGS_HDMI_H
+#define SAMSUNG_REGS_HDMI_H
+
+/*
+ * Register part
+*/
+
+#define HDMI_CTRL_BASE(x)              ((x) + 0x00000000)
+#define HDMI_CORE_BASE(x)              ((x) + 0x00010000)
+#define HDMI_TG_BASE(x)                        ((x) + 0x00050000)
+
+/* Control registers */
+#define HDMI_INTC_CON                  HDMI_CTRL_BASE(0x0000)
+#define HDMI_INTC_FLAG                 HDMI_CTRL_BASE(0x0004)
+#define HDMI_HPD_STATUS                        HDMI_CTRL_BASE(0x000C)
+#define HDMI_PHY_RSTOUT                        HDMI_CTRL_BASE(0x0014)
+#define HDMI_PHY_VPLL                  HDMI_CTRL_BASE(0x0018)
+#define HDMI_PHY_CMU                   HDMI_CTRL_BASE(0x001C)
+#define HDMI_CORE_RSTOUT               HDMI_CTRL_BASE(0x0020)
+
+/* Core registers */
+#define HDMI_CON_0                     HDMI_CORE_BASE(0x0000)
+#define HDMI_CON_1                     HDMI_CORE_BASE(0x0004)
+#define HDMI_CON_2                     HDMI_CORE_BASE(0x0008)
+#define HDMI_SYS_STATUS                        HDMI_CORE_BASE(0x0010)
+#define HDMI_PHY_STATUS                        HDMI_CORE_BASE(0x0014)
+#define HDMI_STATUS_EN                 HDMI_CORE_BASE(0x0020)
+#define HDMI_HPD                       HDMI_CORE_BASE(0x0030)
+#define HDMI_MODE_SEL                  HDMI_CORE_BASE(0x0040)
+#define HDMI_BLUE_SCREEN_0             HDMI_CORE_BASE(0x0050)
+#define HDMI_BLUE_SCREEN_1             HDMI_CORE_BASE(0x0054)
+#define HDMI_BLUE_SCREEN_2             HDMI_CORE_BASE(0x0058)
+#define HDMI_H_BLANK_0                 HDMI_CORE_BASE(0x00A0)
+#define HDMI_H_BLANK_1                 HDMI_CORE_BASE(0x00A4)
+#define HDMI_V_BLANK_0                 HDMI_CORE_BASE(0x00B0)
+#define HDMI_V_BLANK_1                 HDMI_CORE_BASE(0x00B4)
+#define HDMI_V_BLANK_2                 HDMI_CORE_BASE(0x00B8)
+#define HDMI_H_V_LINE_0                        HDMI_CORE_BASE(0x00C0)
+#define HDMI_H_V_LINE_1                        HDMI_CORE_BASE(0x00C4)
+#define HDMI_H_V_LINE_2                        HDMI_CORE_BASE(0x00C8)
+#define HDMI_VSYNC_POL                 HDMI_CORE_BASE(0x00E4)
+#define HDMI_INT_PRO_MODE              HDMI_CORE_BASE(0x00E8)
+#define HDMI_V_BLANK_F_0               HDMI_CORE_BASE(0x0110)
+#define HDMI_V_BLANK_F_1               HDMI_CORE_BASE(0x0114)
+#define HDMI_V_BLANK_F_2               HDMI_CORE_BASE(0x0118)
+#define HDMI_H_SYNC_GEN_0              HDMI_CORE_BASE(0x0120)
+#define HDMI_H_SYNC_GEN_1              HDMI_CORE_BASE(0x0124)
+#define HDMI_H_SYNC_GEN_2              HDMI_CORE_BASE(0x0128)
+#define HDMI_V_SYNC_GEN_1_0            HDMI_CORE_BASE(0x0130)
+#define HDMI_V_SYNC_GEN_1_1            HDMI_CORE_BASE(0x0134)
+#define HDMI_V_SYNC_GEN_1_2            HDMI_CORE_BASE(0x0138)
+#define HDMI_V_SYNC_GEN_2_0            HDMI_CORE_BASE(0x0140)
+#define HDMI_V_SYNC_GEN_2_1            HDMI_CORE_BASE(0x0144)
+#define HDMI_V_SYNC_GEN_2_2            HDMI_CORE_BASE(0x0148)
+#define HDMI_V_SYNC_GEN_3_0            HDMI_CORE_BASE(0x0150)
+#define HDMI_V_SYNC_GEN_3_1            HDMI_CORE_BASE(0x0154)
+#define HDMI_V_SYNC_GEN_3_2            HDMI_CORE_BASE(0x0158)
+#define HDMI_ACR_CON                   HDMI_CORE_BASE(0x0180)
+#define HDMI_AVI_CON                   HDMI_CORE_BASE(0x0300)
+#define HDMI_AVI_BYTE(n)               HDMI_CORE_BASE(0x0320 + 4 * (n))
+#define HDMI_DC_CONTROL                        HDMI_CORE_BASE(0x05C0)
+#define HDMI_VIDEO_PATTERN_GEN         HDMI_CORE_BASE(0x05C4)
+#define HDMI_HPD_GEN                   HDMI_CORE_BASE(0x05C8)
+#define HDMI_AUI_CON                   HDMI_CORE_BASE(0x0360)
+#define HDMI_SPD_CON                   HDMI_CORE_BASE(0x0400)
+
+/* Timing generator registers */
+#define HDMI_TG_CMD                    HDMI_TG_BASE(0x0000)
+#define HDMI_TG_H_FSZ_L                        HDMI_TG_BASE(0x0018)
+#define HDMI_TG_H_FSZ_H                        HDMI_TG_BASE(0x001C)
+#define HDMI_TG_HACT_ST_L              HDMI_TG_BASE(0x0020)
+#define HDMI_TG_HACT_ST_H              HDMI_TG_BASE(0x0024)
+#define HDMI_TG_HACT_SZ_L              HDMI_TG_BASE(0x0028)
+#define HDMI_TG_HACT_SZ_H              HDMI_TG_BASE(0x002C)
+#define HDMI_TG_V_FSZ_L                        HDMI_TG_BASE(0x0030)
+#define HDMI_TG_V_FSZ_H                        HDMI_TG_BASE(0x0034)
+#define HDMI_TG_VSYNC_L                        HDMI_TG_BASE(0x0038)
+#define HDMI_TG_VSYNC_H                        HDMI_TG_BASE(0x003C)
+#define HDMI_TG_VSYNC2_L               HDMI_TG_BASE(0x0040)
+#define HDMI_TG_VSYNC2_H               HDMI_TG_BASE(0x0044)
+#define HDMI_TG_VACT_ST_L              HDMI_TG_BASE(0x0048)
+#define HDMI_TG_VACT_ST_H              HDMI_TG_BASE(0x004C)
+#define HDMI_TG_VACT_SZ_L              HDMI_TG_BASE(0x0050)
+#define HDMI_TG_VACT_SZ_H              HDMI_TG_BASE(0x0054)
+#define HDMI_TG_FIELD_CHG_L            HDMI_TG_BASE(0x0058)
+#define HDMI_TG_FIELD_CHG_H            HDMI_TG_BASE(0x005C)
+#define HDMI_TG_VACT_ST2_L             HDMI_TG_BASE(0x0060)
+#define HDMI_TG_VACT_ST2_H             HDMI_TG_BASE(0x0064)
+#define HDMI_TG_VSYNC_TOP_HDMI_L       HDMI_TG_BASE(0x0078)
+#define HDMI_TG_VSYNC_TOP_HDMI_H       HDMI_TG_BASE(0x007C)
+#define HDMI_TG_VSYNC_BOT_HDMI_L       HDMI_TG_BASE(0x0080)
+#define HDMI_TG_VSYNC_BOT_HDMI_H       HDMI_TG_BASE(0x0084)
+#define HDMI_TG_FIELD_TOP_HDMI_L       HDMI_TG_BASE(0x0088)
+#define HDMI_TG_FIELD_TOP_HDMI_H       HDMI_TG_BASE(0x008C)
+#define HDMI_TG_FIELD_BOT_HDMI_L       HDMI_TG_BASE(0x0090)
+#define HDMI_TG_FIELD_BOT_HDMI_H       HDMI_TG_BASE(0x0094)
+
+/*
+ * Bit definition part
+ */
+
+/* HDMI_INTC_CON */
+#define HDMI_INTC_EN_GLOBAL            (1 << 6)
+#define HDMI_INTC_EN_HPD_PLUG          (1 << 3)
+#define HDMI_INTC_EN_HPD_UNPLUG                (1 << 2)
+
+/* HDMI_INTC_FLAG */
+#define HDMI_INTC_FLAG_HPD_PLUG                (1 << 3)
+#define HDMI_INTC_FLAG_HPD_UNPLUG      (1 << 2)
+
+/* HDMI_PHY_RSTOUT */
+#define HDMI_PHY_SW_RSTOUT             (1 << 0)
+
+/* HDMI_CORE_RSTOUT */
+#define HDMI_CORE_SW_RSTOUT            (1 << 0)
+
+/* HDMI_CON_0 */
+#define HDMI_BLUE_SCR_EN               (1 << 5)
+#define HDMI_EN                                (1 << 0)
+
+/* HDMI_PHY_STATUS */
+#define HDMI_PHY_STATUS_READY          (1 << 0)
+
+/* HDMI_MODE_SEL */
+#define HDMI_MODE_HDMI_EN              (1 << 1)
+#define HDMI_MODE_DVI_EN               (1 << 0)
+#define HDMI_MODE_MASK                 (3 << 0)
+
+/* HDMI_TG_CMD */
+#define HDMI_TG_EN                     (1 << 0)
+#define HDMI_FIELD_EN                  (1 << 1)
+
+#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
new file mode 100644 (file)
index 0000000..fd2f4d1
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ *
+ *  Cloned from drivers/media/video/s5p-tv/regs-mixer.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Mixer register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#ifndef SAMSUNG_REGS_MIXER_H
+#define SAMSUNG_REGS_MIXER_H
+
+/*
+ * Register part
+ */
+#define MXR_STATUS                     0x0000
+#define MXR_CFG                                0x0004
+#define MXR_INT_EN                     0x0008
+#define MXR_INT_STATUS                 0x000C
+#define MXR_LAYER_CFG                  0x0010
+#define MXR_VIDEO_CFG                  0x0014
+#define MXR_GRAPHIC0_CFG               0x0020
+#define MXR_GRAPHIC0_BASE              0x0024
+#define MXR_GRAPHIC0_SPAN              0x0028
+#define MXR_GRAPHIC0_SXY               0x002C
+#define MXR_GRAPHIC0_WH                        0x0030
+#define MXR_GRAPHIC0_DXY               0x0034
+#define MXR_GRAPHIC0_BLANK             0x0038
+#define MXR_GRAPHIC1_CFG               0x0040
+#define MXR_GRAPHIC1_BASE              0x0044
+#define MXR_GRAPHIC1_SPAN              0x0048
+#define MXR_GRAPHIC1_SXY               0x004C
+#define MXR_GRAPHIC1_WH                        0x0050
+#define MXR_GRAPHIC1_DXY               0x0054
+#define MXR_GRAPHIC1_BLANK             0x0058
+#define MXR_BG_CFG                     0x0060
+#define MXR_BG_COLOR0                  0x0064
+#define MXR_BG_COLOR1                  0x0068
+#define MXR_BG_COLOR2                  0x006C
+#define MXR_CM_COEFF_Y                 0x0080
+#define MXR_CM_COEFF_CB                        0x0084
+#define MXR_CM_COEFF_CR                        0x0088
+#define MXR_GRAPHIC0_BASE_S            0x2024
+#define MXR_GRAPHIC1_BASE_S            0x2044
+
+/* for parametrized access to layer registers */
+#define MXR_GRAPHIC_CFG(i)             (0x0020 + (i) * 0x20)
+#define MXR_GRAPHIC_BASE(i)            (0x0024 + (i) * 0x20)
+#define MXR_GRAPHIC_SPAN(i)            (0x0028 + (i) * 0x20)
+#define MXR_GRAPHIC_SXY(i)             (0x002C + (i) * 0x20)
+#define MXR_GRAPHIC_WH(i)              (0x0030 + (i) * 0x20)
+#define MXR_GRAPHIC_DXY(i)             (0x0034 + (i) * 0x20)
+#define MXR_GRAPHIC_BLANK(i)           (0x0038 + (i) * 0x20)
+#define MXR_GRAPHIC_BASE_S(i)          (0x2024 + (i) * 0x20)
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+#define MXR_MASK(high_bit, low_bit) \
+       (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define MXR_MASK_VAL(val, high_bit, low_bit) \
+       (((val) << (low_bit)) & MXR_MASK(high_bit, low_bit))
+
+/* bits for MXR_STATUS */
+#define MXR_STATUS_16_BURST            (1 << 7)
+#define MXR_STATUS_BURST_MASK          (1 << 7)
+#define MXR_STATUS_BIG_ENDIAN          (1 << 3)
+#define MXR_STATUS_ENDIAN_MASK         (1 << 3)
+#define MXR_STATUS_SYNC_ENABLE         (1 << 2)
+#define MXR_STATUS_REG_RUN             (1 << 0)
+
+/* bits for MXR_CFG */
+#define MXR_CFG_RGB601_0_255           (0 << 9)
+#define MXR_CFG_RGB601_16_235          (1 << 9)
+#define MXR_CFG_RGB709_0_255           (2 << 9)
+#define MXR_CFG_RGB709_16_235          (3 << 9)
+#define MXR_CFG_RGB_FMT_MASK           0x600
+#define MXR_CFG_OUT_YUV444             (0 << 8)
+#define MXR_CFG_OUT_RGB888             (1 << 8)
+#define MXR_CFG_OUT_MASK               (1 << 8)
+#define MXR_CFG_DST_SDO                        (0 << 7)
+#define MXR_CFG_DST_HDMI               (1 << 7)
+#define MXR_CFG_DST_MASK               (1 << 7)
+#define MXR_CFG_SCAN_HD_720            (0 << 6)
+#define MXR_CFG_SCAN_HD_1080           (1 << 6)
+#define MXR_CFG_GRP1_ENABLE            (1 << 5)
+#define MXR_CFG_GRP0_ENABLE            (1 << 4)
+#define MXR_CFG_VP_ENABLE              (1 << 3)
+#define MXR_CFG_SCAN_INTERLACE         (0 << 2)
+#define MXR_CFG_SCAN_PROGRASSIVE       (1 << 2)
+#define MXR_CFG_SCAN_NTSC              (0 << 1)
+#define MXR_CFG_SCAN_PAL               (1 << 1)
+#define MXR_CFG_SCAN_SD                        (0 << 0)
+#define MXR_CFG_SCAN_HD                        (1 << 0)
+#define MXR_CFG_SCAN_MASK              0x47
+
+/* bits for MXR_GRAPHICn_CFG */
+#define MXR_GRP_CFG_COLOR_KEY_DISABLE  (1 << 21)
+#define MXR_GRP_CFG_BLEND_PRE_MUL      (1 << 20)
+#define MXR_GRP_CFG_WIN_BLEND_EN       (1 << 17)
+#define MXR_GRP_CFG_PIXEL_BLEND_EN     (1 << 16)
+#define MXR_GRP_CFG_FORMAT_VAL(x)      MXR_MASK_VAL(x, 11, 8)
+#define MXR_GRP_CFG_FORMAT_MASK                MXR_GRP_CFG_FORMAT_VAL(~0)
+#define MXR_GRP_CFG_ALPHA_VAL(x)       MXR_MASK_VAL(x, 7, 0)
+
+/* bits for MXR_GRAPHICn_WH */
+#define MXR_GRP_WH_H_SCALE(x)          MXR_MASK_VAL(x, 28, 28)
+#define MXR_GRP_WH_V_SCALE(x)          MXR_MASK_VAL(x, 12, 12)
+#define MXR_GRP_WH_WIDTH(x)            MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_WH_HEIGHT(x)           MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_SXY */
+#define MXR_GRP_SXY_SX(x)              MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_SXY_SY(x)              MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_DXY */
+#define MXR_GRP_DXY_DX(x)              MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_DXY_DY(x)              MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_INT_EN */
+#define MXR_INT_EN_VSYNC               (1 << 11)
+#define MXR_INT_EN_ALL                 (0x0f << 8)
+
+/* bit for MXR_INT_STATUS */
+#define MXR_INT_CLEAR_VSYNC            (1 << 11)
+#define MXR_INT_STATUS_VSYNC           (1 << 0)
+
+/* bit for MXR_LAYER_CFG */
+#define MXR_LAYER_CFG_GRP1_VAL(x)      MXR_MASK_VAL(x, 11, 8)
+#define MXR_LAYER_CFG_GRP0_VAL(x)      MXR_MASK_VAL(x, 7, 4)
+#define MXR_LAYER_CFG_VP_VAL(x)                MXR_MASK_VAL(x, 3, 0)
+
+#endif /* SAMSUNG_REGS_MIXER_H */
+
diff --git a/drivers/gpu/drm/exynos/regs-vp.h b/drivers/gpu/drm/exynos/regs-vp.h
new file mode 100644 (file)
index 0000000..10b737a
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ *
+ *  Cloned from drivers/media/video/s5p-tv/regs-vp.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * Video processor register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SAMSUNG_REGS_VP_H
+#define SAMSUNG_REGS_VP_H
+
+/*
+ * Register part
+ */
+
+#define VP_ENABLE                      0x0000
+#define VP_SRESET                      0x0004
+#define VP_SHADOW_UPDATE               0x0008
+#define VP_FIELD_ID                    0x000C
+#define VP_MODE                                0x0010
+#define VP_IMG_SIZE_Y                  0x0014
+#define VP_IMG_SIZE_C                  0x0018
+#define VP_PER_RATE_CTRL               0x001C
+#define VP_TOP_Y_PTR                   0x0028
+#define VP_BOT_Y_PTR                   0x002C
+#define VP_TOP_C_PTR                   0x0030
+#define VP_BOT_C_PTR                   0x0034
+#define VP_ENDIAN_MODE                 0x03CC
+#define VP_SRC_H_POSITION              0x0044
+#define VP_SRC_V_POSITION              0x0048
+#define VP_SRC_WIDTH                   0x004C
+#define VP_SRC_HEIGHT                  0x0050
+#define VP_DST_H_POSITION              0x0054
+#define VP_DST_V_POSITION              0x0058
+#define VP_DST_WIDTH                   0x005C
+#define VP_DST_HEIGHT                  0x0060
+#define VP_H_RATIO                     0x0064
+#define VP_V_RATIO                     0x0068
+#define VP_POLY8_Y0_LL                 0x006C
+#define VP_POLY4_Y0_LL                 0x00EC
+#define VP_POLY4_C0_LL                 0x012C
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+
+#define VP_MASK(high_bit, low_bit) \
+       (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define VP_MASK_VAL(val, high_bit, low_bit) \
+       (((val) << (low_bit)) & VP_MASK(high_bit, low_bit))
+
+ /* VP_ENABLE */
+#define VP_ENABLE_ON                   (1 << 0)
+
+/* VP_SRESET */
+#define VP_SRESET_PROCESSING           (1 << 0)
+
+/* VP_SHADOW_UPDATE */
+#define VP_SHADOW_UPDATE_ENABLE                (1 << 0)
+
+/* VP_MODE */
+#define VP_MODE_NV12                   (0 << 6)
+#define VP_MODE_NV21                   (1 << 6)
+#define VP_MODE_LINE_SKIP              (1 << 5)
+#define VP_MODE_MEM_LINEAR             (0 << 4)
+#define VP_MODE_MEM_TILED              (1 << 4)
+#define VP_MODE_FMT_MASK               (5 << 4)
+#define VP_MODE_FIELD_ID_AUTO_TOGGLING (1 << 2)
+#define VP_MODE_2D_IPC                 (1 << 1)
+
+/* VP_IMG_SIZE_Y */
+/* VP_IMG_SIZE_C */
+#define VP_IMG_HSIZE(x)                        VP_MASK_VAL(x, 29, 16)
+#define VP_IMG_VSIZE(x)                        VP_MASK_VAL(x, 13, 0)
+
+/* VP_SRC_H_POSITION */
+#define VP_SRC_H_POSITION_VAL(x)       VP_MASK_VAL(x, 14, 4)
+
+/* VP_ENDIAN_MODE */
+#define VP_ENDIAN_MODE_LITTLE          (1 << 0)
+
+#endif /* SAMSUNG_REGS_VP_H */
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
new file mode 100644 (file)
index 0000000..754e14b
--- /dev/null
@@ -0,0 +1,27 @@
+config DRM_GMA500
+       tristate "Intel GMA5/600 KMS Framebuffer"
+       depends on DRM && PCI && X86 && EXPERIMENTAL
+       select FB_CFB_COPYAREA
+        select FB_CFB_FILLRECT
+        select FB_CFB_IMAGEBLIT
+        select DRM_KMS_HELPER
+        select DRM_TTM
+       help
+         Say yes for an experimental 2D KMS framebuffer driver for the
+         Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
+         devices.
+
+config DRM_GMA600
+       bool "Intel GMA600 support (Experimental)"
+       depends on DRM_GMA500
+       help
+         Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
+         platforms with LVDS ports. HDMI and MIPI are not currently
+         supported.
+
+config DRM_GMA3600
+       bool "Intel GMA3600/3650 support (Experimental)"
+       depends on DRM_GMA500
+       help
+         Say yes to include basic support for Intel GMA3600/3650 (Intel
+         Cedar Trail) platforms.
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
new file mode 100644 (file)
index 0000000..81c103b
--- /dev/null
@@ -0,0 +1,40 @@
+#
+#      KMS driver for the GMA500
+#
+ccflags-y += -Iinclude/drm
+
+gma500_gfx-y += gem_glue.o \
+         accel_2d.o \
+         backlight.o \
+         framebuffer.o \
+         gem.o \
+         gtt.o \
+         intel_bios.o \
+         intel_i2c.o \
+         intel_gmbus.o \
+         intel_opregion.o \
+         mmu.o \
+         power.o \
+         psb_drv.o \
+         psb_intel_display.o \
+         psb_intel_lvds.o \
+         psb_intel_modes.o \
+         psb_intel_sdvo.o \
+         psb_lid.o \
+         psb_irq.o \
+         psb_device.o \
+         mid_bios.o
+
+gma500_gfx-$(CONFIG_DRM_GMA3600) +=  cdv_device.o \
+         cdv_intel_crt.o \
+         cdv_intel_display.o \
+         cdv_intel_hdmi.o \
+         cdv_intel_lvds.o
+
+gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
+         oaktrail_crtc.o \
+         oaktrail_lvds.o \
+         oaktrail_hdmi.o \
+         oaktrail_hdmi_i2c.o
+
+obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
new file mode 100644 (file)
index 0000000..d5ef1a5
--- /dev/null
@@ -0,0 +1,364 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/console.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "framebuffer.h"
+
+/**
+ *     psb_spank               -       reset the 2D engine
+ *     @dev_priv: our PSB DRM device
+ *
+ *     Soft reset the graphics engine and then reload the necessary registers.
+ *     We use this at initialisation time but it will become relevant for
+ *     accelerated X later
+ */
+void psb_spank(struct drm_psb_private *dev_priv)
+{
+       PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
+               _PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
+               _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET |
+               _PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET);
+       PSB_RSGX32(PSB_CR_SOFT_RESET);
+
+       msleep(1);
+
+       PSB_WSGX32(0, PSB_CR_SOFT_RESET);
+       wmb();
+       PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
+                  PSB_CR_BIF_CTRL);
+       wmb();
+       (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
+
+       msleep(1);
+       PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
+                  PSB_CR_BIF_CTRL);
+       (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
+       PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+}
+
+/**
+ *     psb2_2d_wait_available  -       wait for FIFO room
+ *     @dev_priv: our DRM device
+ *     @size: size (in dwords) of the command we want to issue
+ *
+ *     Wait until there is room to load the FIFO with our data. If the
+ *     device is not responding then reset it
+ */
+static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
+                         unsigned size)
+{
+       uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
+       unsigned long t = jiffies + HZ;
+
+       while (avail < size) {
+               avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
+               if (time_after(jiffies, t)) {
+                       psb_spank(dev_priv);
+                       return -EIO;
+               }
+       }
+       return 0;
+}
+
+/**
+ *     psb_2d_submit           -       submit a 2D command
+ *     @dev_priv: our DRM device
+ *     @cmdbuf: command to issue
+ *     @size: length (in dwords)
+ *
+ *     Issue one or more 2D commands to the accelerator. This needs to be
+ *     serialized later when we add the GEM interfaces for acceleration
+ */
+static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
+                                                               unsigned size)
+{
+       int ret = 0;
+       int i;
+       unsigned submit_size;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_priv->lock_2d, flags);
+       while (size > 0) {
+               submit_size = (size < 0x60) ? size : 0x60;
+               size -= submit_size;
+               ret = psb_2d_wait_available(dev_priv, submit_size);
+               if (ret)
+                       break;
+
+               submit_size <<= 2;
+
+               for (i = 0; i < submit_size; i += 4)
+                       PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
+
+               (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
+       }
+       spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
+       return ret;
+}
+
+
+/**
+ *     psb_accel_2d_copy_direction     -       compute blit order
+ *     @xdir: X direction of move
+ *     @ydir: Y direction of move
+ *
+ *     Compute the correct order setings to ensure that an overlapping blit
+ *     correctly copies all the pixels.
+ */
+static u32 psb_accel_2d_copy_direction(int xdir, int ydir)
+{
+       if (xdir < 0)
+               return (ydir < 0) ? PSB_2D_COPYORDER_BR2TL :
+                                               PSB_2D_COPYORDER_TR2BL;
+       else
+               return (ydir < 0) ? PSB_2D_COPYORDER_BL2TR :
+                                               PSB_2D_COPYORDER_TL2BR;
+}
+
+/**
+ *     psb_accel_2d_copy               -       accelerated 2D copy
+ *     @dev_priv: our DRM device
+ *     @src_offset in bytes
+ *     @src_stride in bytes
+ *     @src_format psb 2D format defines
+ *     @dst_offset in bytes
+ *     @dst_stride in bytes
+ *     @dst_format psb 2D format defines
+ *     @src_x offset in pixels
+ *     @src_y offset in pixels
+ *     @dst_x offset in pixels
+ *     @dst_y offset in pixels
+ *     @size_x of the copied area
+ *     @size_y of the copied area
+ *
+ *     Format and issue a 2D accelerated copy command.
+ */
+static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
+                            uint32_t src_offset, uint32_t src_stride,
+                            uint32_t src_format, uint32_t dst_offset,
+                            uint32_t dst_stride, uint32_t dst_format,
+                            uint16_t src_x, uint16_t src_y,
+                            uint16_t dst_x, uint16_t dst_y,
+                            uint16_t size_x, uint16_t size_y)
+{
+       uint32_t blit_cmd;
+       uint32_t buffer[10];
+       uint32_t *buf;
+       uint32_t direction;
+
+       buf = buffer;
+
+       direction =
+           psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
+
+       if (direction == PSB_2D_COPYORDER_BR2TL ||
+           direction == PSB_2D_COPYORDER_TR2BL) {
+               src_x += size_x - 1;
+               dst_x += size_x - 1;
+       }
+       if (direction == PSB_2D_COPYORDER_BR2TL ||
+           direction == PSB_2D_COPYORDER_BL2TR) {
+               src_y += size_y - 1;
+               dst_y += size_y - 1;
+       }
+
+       blit_cmd =
+           PSB_2D_BLIT_BH |
+           PSB_2D_ROT_NONE |
+           PSB_2D_DSTCK_DISABLE |
+           PSB_2D_SRCCK_DISABLE |
+           PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
+
+       *buf++ = PSB_2D_FENCE_BH;
+       *buf++ =
+           PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
+                                              PSB_2D_DST_STRIDE_SHIFT);
+       *buf++ = dst_offset;
+       *buf++ =
+           PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
+                                              PSB_2D_SRC_STRIDE_SHIFT);
+       *buf++ = src_offset;
+       *buf++ =
+           PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
+           (src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
+       *buf++ = blit_cmd;
+       *buf++ =
+           (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
+                                                 PSB_2D_DST_YSTART_SHIFT);
+       *buf++ =
+           (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
+                                                 PSB_2D_DST_YSIZE_SHIFT);
+       *buf++ = PSB_2D_FLUSH_BH;
+
+       return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
+}
+
+/**
+ *     psbfb_copyarea_accel    -       copyarea acceleration for /dev/fb
+ *     @info: our framebuffer
+ *     @a: copyarea parameters from the framebuffer core
+ *
+ *     Perform a 2D copy via the accelerator
+ */
+static void psbfb_copyarea_accel(struct fb_info *info,
+                                const struct fb_copyarea *a)
+{
+       struct psb_fbdev *fbdev = info->par;
+       struct psb_framebuffer *psbfb = &fbdev->pfb;
+       struct drm_device *dev = psbfb->base.dev;
+       struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       uint32_t offset;
+       uint32_t stride;
+       uint32_t src_format;
+       uint32_t dst_format;
+
+       if (!fb)
+               return;
+
+       offset = psbfb->gtt->offset;
+       stride = fb->pitches[0];
+
+       switch (fb->depth) {
+       case 8:
+               src_format = PSB_2D_SRC_332RGB;
+               dst_format = PSB_2D_DST_332RGB;
+               break;
+       case 15:
+               src_format = PSB_2D_SRC_555RGB;
+               dst_format = PSB_2D_DST_555RGB;
+               break;
+       case 16:
+               src_format = PSB_2D_SRC_565RGB;
+               dst_format = PSB_2D_DST_565RGB;
+               break;
+       case 24:
+       case 32:
+               /* this is wrong but since we don't do blending its okay */
+               src_format = PSB_2D_SRC_8888ARGB;
+               dst_format = PSB_2D_DST_8888ARGB;
+               break;
+       default:
+               /* software fallback */
+               cfb_copyarea(info, a);
+               return;
+       }
+
+       if (!gma_power_begin(dev, false)) {
+               cfb_copyarea(info, a);
+               return;
+       }
+       psb_accel_2d_copy(dev_priv,
+                         offset, stride, src_format,
+                         offset, stride, dst_format,
+                         a->sx, a->sy, a->dx, a->dy, a->width, a->height);
+       gma_power_end(dev);
+}
+
+/**
+ *     psbfb_copyarea  -       2D copy interface
+ *     @info: our framebuffer
+ *     @region: region to copy
+ *
+ *     Copy an area of the framebuffer console either by the accelerator
+ *     or directly using the cfb helpers according to the request
+ */
+void psbfb_copyarea(struct fb_info *info,
+                          const struct fb_copyarea *region)
+{
+       if (unlikely(info->state != FBINFO_STATE_RUNNING))
+               return;
+
+       /* Avoid the 8 pixel erratum */
+       if (region->width == 8 || region->height == 8 ||
+               (info->flags & FBINFO_HWACCEL_DISABLED))
+               return cfb_copyarea(info, region);
+
+       psbfb_copyarea_accel(info, region);
+}
+
+/**
+ *     psbfb_sync      -       synchronize 2D
+ *     @info: our framebuffer
+ *
+ *     Wait for the 2D engine to quiesce so that we can do CPU
+ *     access to the framebuffer again
+ */
+int psbfb_sync(struct fb_info *info)
+{
+       struct psb_fbdev *fbdev = info->par;
+       struct psb_framebuffer *psbfb = &fbdev->pfb;
+       struct drm_device *dev = psbfb->base.dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long _end = jiffies + DRM_HZ;
+       int busy = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_priv->lock_2d, flags);
+       /*
+        * First idle the 2D engine.
+        */
+
+       if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
+           ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
+               goto out;
+
+       do {
+               busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+               cpu_relax();
+       } while (busy && !time_after_eq(jiffies, _end));
+
+       if (busy)
+               busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+       if (busy)
+               goto out;
+
+       do {
+               busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+                                               _PSB_C2B_STATUS_BUSY) != 0);
+               cpu_relax();
+       } while (busy && !time_after_eq(jiffies, _end));
+       if (busy)
+               busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+                                       _PSB_C2B_STATUS_BUSY) != 0);
+
+out:
+       spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
+       return (busy) ? -EBUSY : 0;
+}
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
new file mode 100644 (file)
index 0000000..2079395
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * GMA500 Backlight Interface
+ *
+ * Copyright (c) 2009-2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Eric Knopp
+ *
+ */
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_drv.h"
+#include "intel_bios.h"
+#include "power.h"
+
+int gma_backlight_init(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       return dev_priv->ops->backlight_init(dev);
+#else
+       return 0;
+#endif
+}
+
+void gma_backlight_exit(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       if (dev_priv->backlight_device) {
+               dev_priv->backlight_device->props.brightness = 0;
+               backlight_update_status(dev_priv->backlight_device);
+               backlight_device_unregister(dev_priv->backlight_device);
+       }
+#endif
+}
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
new file mode 100644 (file)
index 0000000..4a5b099
--- /dev/null
@@ -0,0 +1,351 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+#include "cdv_device.h"
+
+#define VGA_SR_INDEX           0x3c4
+#define VGA_SR_DATA            0x3c5
+
+static void cdv_disable_vga(struct drm_device *dev)
+{
+       u8 sr1;
+       u32 vga_reg;
+
+       vga_reg = VGACNTRL;
+
+       outb(1, VGA_SR_INDEX);
+       sr1 = inb(VGA_SR_DATA);
+       outb(sr1 | 1<<5, VGA_SR_DATA);
+       udelay(300);
+
+       REG_WRITE(vga_reg, VGA_DISP_DISABLE);
+       REG_READ(vga_reg);
+}
+
+static int cdv_output_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       cdv_disable_vga(dev);
+
+       cdv_intel_crt_init(dev, &dev_priv->mode_dev);
+       cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
+
+       /* These bits indicate HDMI not SDVO on CDV, but we don't yet support
+          the HDMI interface */
+       if (REG_READ(SDVOB) & SDVO_DETECTED)
+               cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
+       if (REG_READ(SDVOC) & SDVO_DETECTED)
+               cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
+       return 0;
+}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+/*
+ *     Poulsbo Backlight Interfaces
+ */
+
+#define BLC_PWM_PRECISION_FACTOR 100   /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+
+#define PSB_BLC_PWM_PRECISION_FACTOR    10
+#define PSB_BLC_MAX_PWM_REG_FREQ        0xFFFE
+#define PSB_BLC_MIN_PWM_REG_FREQ        0x2
+
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT    (16)
+
+static int cdv_brightness;
+static struct backlight_device *cdv_backlight_device;
+
+static int cdv_get_brightness(struct backlight_device *bd)
+{
+       /* return locally cached var instead of HW read (due to DPST etc.) */
+       /* FIXME: ideally return actual value in case firmware fiddled with
+          it */
+       return cdv_brightness;
+}
+
+
+static int cdv_backlight_setup(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long core_clock;
+       /* u32 bl_max_freq; */
+       /* unsigned long value; */
+       u16 bl_max_freq;
+       uint32_t value;
+       uint32_t blc_pwm_precision_factor;
+
+       /* get bl_max_freq and pol from dev_priv*/
+       if (!dev_priv->lvds_bl) {
+               dev_err(dev->dev, "Has no valid LVDS backlight info\n");
+               return -ENOENT;
+       }
+       bl_max_freq = dev_priv->lvds_bl->freq;
+       blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
+
+       core_clock = dev_priv->core_freq;
+
+       value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+       value *= blc_pwm_precision_factor;
+       value /= bl_max_freq;
+       value /= blc_pwm_precision_factor;
+
+       if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
+                value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
+                               return -ERANGE;
+       else {
+               /* FIXME */
+       }
+       return 0;
+}
+
+static int cdv_set_brightness(struct backlight_device *bd)
+{
+       int level = bd->props.brightness;
+
+       /* Percentage 1-100% being valid */
+       if (level < 1)
+               level = 1;
+
+       /*cdv_intel_lvds_set_brightness(dev, level); FIXME */
+       cdv_brightness = level;
+       return 0;
+}
+
+static const struct backlight_ops cdv_ops = {
+       .get_brightness = cdv_get_brightness,
+       .update_status  = cdv_set_brightness,
+};
+
+static int cdv_backlight_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int ret;
+       struct backlight_properties props;
+
+       memset(&props, 0, sizeof(struct backlight_properties));
+       props.max_brightness = 100;
+       props.type = BACKLIGHT_PLATFORM;
+
+       cdv_backlight_device = backlight_device_register("psb-bl",
+                                       NULL, (void *)dev, &cdv_ops, &props);
+       if (IS_ERR(cdv_backlight_device))
+               return PTR_ERR(cdv_backlight_device);
+
+       ret = cdv_backlight_setup(dev);
+       if (ret < 0) {
+               backlight_device_unregister(cdv_backlight_device);
+               cdv_backlight_device = NULL;
+               return ret;
+       }
+       cdv_backlight_device->props.brightness = 100;
+       cdv_backlight_device->props.max_brightness = 100;
+       backlight_update_status(cdv_backlight_device);
+       dev_priv->backlight_device = cdv_backlight_device;
+       return 0;
+}
+
+#endif
+
+/*
+ *     Provide the Cedarview specific chip logic and low level methods
+ *     for power management
+ *
+ *     FIXME: we need to implement the apm/ospm base management bits
+ *     for this and the MID devices.
+ */
+
+static inline u32 CDV_MSG_READ32(uint port, uint offset)
+{
+       int mcr = (0x10<<24) | (port << 16) | (offset << 8);
+       uint32_t ret_val = 0;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       pci_write_config_dword(pci_root, 0xD0, mcr);
+       pci_read_config_dword(pci_root, 0xD4, &ret_val);
+       pci_dev_put(pci_root);
+       return ret_val;
+}
+
+static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+       int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       pci_write_config_dword(pci_root, 0xD4, value);
+       pci_write_config_dword(pci_root, 0xD0, mcr);
+       pci_dev_put(pci_root);
+}
+
+#define PSB_APM_CMD                    0x0
+#define PSB_APM_STS                    0x04
+#define PSB_PM_SSC                     0x20
+#define PSB_PM_SSS                     0x30
+#define PSB_PWRGT_GFX_MASK             0x3
+#define CDV_PWRGT_DISPLAY_CNTR         0x000fc00c
+#define CDV_PWRGT_DISPLAY_STS          0x000fc00c
+
+static void cdv_init_pm(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 pwr_cnt;
+       int i;
+
+       dev_priv->apm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+                                                       PSB_APMBA) & 0xFFFF;
+       dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+                                                       PSB_OSPMBA) & 0xFFFF;
+
+       /* Force power on for now */
+       pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
+       pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
+
+       outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
+       for (i = 0; i < 5; i++) {
+               u32 pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
+               if ((pwr_sts & PSB_PWRGT_GFX_MASK) == 0)
+                       break;
+               udelay(10);
+       }
+       pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
+       pwr_cnt &= ~CDV_PWRGT_DISPLAY_CNTR;
+       outl(pwr_cnt, dev_priv->ospm_base + PSB_PM_SSC);
+       for (i = 0; i < 5; i++) {
+               u32 pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+               if ((pwr_sts & CDV_PWRGT_DISPLAY_STS) == 0)
+                       break;
+               udelay(10);
+       }
+}
+
+/**
+ *     cdv_save_display_registers      -       save registers lost on suspend
+ *     @dev: our DRM device
+ *
+ *     Save the state we need in order to be able to restore the interface
+ *     upon resume from suspend
+ *
+ *     FIXME: review
+ */
+static int cdv_save_display_registers(struct drm_device *dev)
+{
+       return 0;
+}
+
+/**
+ *     cdv_restore_display_registers   -       restore lost register state
+ *     @dev: our DRM device
+ *
+ *     Restore register state that was lost during suspend and resume.
+ *
+ *     FIXME: review
+ */
+static int cdv_restore_display_registers(struct drm_device *dev)
+{
+       return 0;
+}
+
+static int cdv_power_down(struct drm_device *dev)
+{
+       return 0;
+}
+
+static int cdv_power_up(struct drm_device *dev)
+{
+       return 0;
+}
+
+/* FIXME ? - shared with Poulsbo */
+static void cdv_get_core_freq(struct drm_device *dev)
+{
+       uint32_t clock;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
+       pci_read_config_dword(pci_root, 0xD4, &clock);
+       pci_dev_put(pci_root);
+
+       switch (clock & 0x07) {
+       case 0:
+               dev_priv->core_freq = 100;
+               break;
+       case 1:
+               dev_priv->core_freq = 133;
+               break;
+       case 2:
+               dev_priv->core_freq = 150;
+               break;
+       case 3:
+               dev_priv->core_freq = 178;
+               break;
+       case 4:
+               dev_priv->core_freq = 200;
+               break;
+       case 5:
+       case 6:
+       case 7:
+               dev_priv->core_freq = 266;
+       default:
+               dev_priv->core_freq = 0;
+       }
+}
+
+static int cdv_chip_setup(struct drm_device *dev)
+{
+       cdv_get_core_freq(dev);
+       gma_intel_opregion_init(dev);
+       psb_intel_init_bios(dev);
+       return 0;
+}
+
+/* CDV is much like Poulsbo but has MID like SGX offsets and PM */
+
+const struct psb_ops cdv_chip_ops = {
+       .name = "GMA3600/3650",
+       .accel_2d = 0,
+       .pipes = 2,
+       .crtcs = 2,
+       .sgx_offset = MRST_SGX_OFFSET,
+       .chip_setup = cdv_chip_setup,
+
+       .crtc_helper = &cdv_intel_helper_funcs,
+       .crtc_funcs = &cdv_intel_crtc_funcs,
+
+       .output_init = cdv_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+       .backlight_init = cdv_backlight_init,
+#endif
+
+       .init_pm = cdv_init_pm,
+       .save_regs = cdv_save_display_registers,
+       .restore_regs = cdv_restore_display_registers,
+       .power_down = cdv_power_down,
+       .power_up = cdv_power_up,
+};
diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h
new file mode 100644 (file)
index 0000000..2a88b7b
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright Â© 2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
+extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
+extern void cdv_intel_crt_init(struct drm_device *dev,
+                       struct psb_intel_mode_device *mode_dev);
+extern void cdv_intel_lvds_init(struct drm_device *dev,
+                       struct psb_intel_mode_device *mode_dev);
+extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev,
+                       int reg);
+extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
+                                            struct drm_crtc *crtc);
+
+extern inline void cdv_intel_wait_for_vblank(struct drm_device *dev)
+{
+       /* Wait for 20ms, i.e. one cycle at 50hz. */
+        /* FIXME: msleep ?? */
+       mdelay(20);
+}
+
+
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
new file mode 100644 (file)
index 0000000..6d0f10b
--- /dev/null
@@ -0,0 +1,333 @@
+/*
+ * Copyright Â© 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+
+static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       u32 temp, reg;
+       reg = ADPA;
+
+       temp = REG_READ(reg);
+       temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+       temp &= ~ADPA_DAC_ENABLE;
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               temp |= ADPA_DAC_ENABLE;
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+               temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+               break;
+       case DRM_MODE_DPMS_SUSPEND:
+               temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+               break;
+       case DRM_MODE_DPMS_OFF:
+               temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+               break;
+       }
+
+       REG_WRITE(reg, temp);
+}
+
+static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
+                               struct drm_display_mode *mode)
+{
+       int max_clock = 0;
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       /* The lowest clock for CDV is 20000KHz */
+       if (mode->clock < 20000)
+               return MODE_CLOCK_LOW;
+
+       /* The max clock for CDV is 355 instead of 400 */
+       max_clock = 355000;
+       if (mode->clock > max_clock)
+               return MODE_CLOCK_HIGH;
+
+       if (mode->hdisplay > 1680 || mode->vdisplay > 1050)
+               return MODE_PANEL;
+
+       return MODE_OK;
+}
+
+static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder,
+                                struct drm_display_mode *mode,
+                                struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode)
+{
+
+       struct drm_device *dev = encoder->dev;
+       struct drm_crtc *crtc = encoder->crtc;
+       struct psb_intel_crtc *psb_intel_crtc =
+                                       to_psb_intel_crtc(crtc);
+       int dpll_md_reg;
+       u32 adpa, dpll_md;
+       u32 adpa_reg;
+
+       if (psb_intel_crtc->pipe == 0)
+               dpll_md_reg = DPLL_A_MD;
+       else
+               dpll_md_reg = DPLL_B_MD;
+
+       adpa_reg = ADPA;
+
+       /*
+        * Disable separate mode multiplier used when cloning SDVO to CRT
+        * XXX this needs to be adjusted when we really are cloning
+        */
+       {
+               dpll_md = REG_READ(dpll_md_reg);
+               REG_WRITE(dpll_md_reg,
+                          dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
+       }
+
+       adpa = 0;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+               adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+               adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+
+       if (psb_intel_crtc->pipe == 0)
+               adpa |= ADPA_PIPE_A_SELECT;
+       else
+               adpa |= ADPA_PIPE_B_SELECT;
+
+       REG_WRITE(adpa_reg, adpa);
+}
+
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
+ *
+ * \return true if CRT is connected.
+ * \return false if CRT is disconnected.
+ */
+static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
+                                                               bool force)
+{
+       struct drm_device *dev = connector->dev;
+       u32 hotplug_en;
+       int i, tries = 0, ret = false;
+       u32 adpa_orig;
+
+       /* disable the DAC when doing the hotplug detection */
+
+       adpa_orig = REG_READ(ADPA);
+
+       REG_WRITE(ADPA, adpa_orig & ~(ADPA_DAC_ENABLE));
+
+       /*
+        * On a CDV thep, CRT detect sequence need to be done twice
+        * to get a reliable result.
+        */
+       tries = 2;
+
+       hotplug_en = REG_READ(PORT_HOTPLUG_EN);
+       hotplug_en &= ~(CRT_HOTPLUG_DETECT_MASK);
+       hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
+
+       hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+       hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+
+       for (i = 0; i < tries ; i++) {
+               unsigned long timeout;
+               /* turn on the FORCE_DETECT */
+               REG_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+               timeout = jiffies + msecs_to_jiffies(1000);
+               /* wait for FORCE_DETECT to go off */
+               do {
+                       if (!(REG_READ(PORT_HOTPLUG_EN) &
+                                       CRT_HOTPLUG_FORCE_DETECT))
+                               break;
+                       msleep(1);
+               } while (time_after(timeout, jiffies));
+       }
+
+       if ((REG_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
+           CRT_HOTPLUG_MONITOR_NONE)
+               ret = true;
+
+       /* Restore the saved ADPA */
+       REG_WRITE(ADPA, adpa_orig);
+       return ret;
+}
+
+static enum drm_connector_status cdv_intel_crt_detect(
+                               struct drm_connector *connector, bool force)
+{
+       if (cdv_intel_crt_detect_hotplug(connector, force))
+               return connector_status_connected;
+       else
+               return connector_status_disconnected;
+}
+
+static void cdv_intel_crt_destroy(struct drm_connector *connector)
+{
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+
+       psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
+
+static int cdv_intel_crt_get_modes(struct drm_connector *connector)
+{
+       struct psb_intel_encoder *psb_intel_encoder =
+                               psb_intel_attached_encoder(connector);
+       return psb_intel_ddc_get_modes(connector, &psb_intel_encoder->ddc_bus->adapter);
+}
+
+static int cdv_intel_crt_set_property(struct drm_connector *connector,
+                                 struct drm_property *property,
+                                 uint64_t value)
+{
+       return 0;
+}
+
+/*
+ * Routines for controlling stuff on the analog port
+ */
+
+static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
+       .dpms = cdv_intel_crt_dpms,
+       .mode_fixup = cdv_intel_crt_mode_fixup,
+       .prepare = psb_intel_encoder_prepare,
+       .commit = psb_intel_encoder_commit,
+       .mode_set = cdv_intel_crt_mode_set,
+};
+
+static const struct drm_connector_funcs cdv_intel_crt_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .detect = cdv_intel_crt_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = cdv_intel_crt_destroy,
+       .set_property = cdv_intel_crt_set_property,
+};
+
+static const struct drm_connector_helper_funcs
+                               cdv_intel_crt_connector_helper_funcs = {
+       .mode_valid = cdv_intel_crt_mode_valid,
+       .get_modes = cdv_intel_crt_get_modes,
+       .best_encoder = psb_intel_best_encoder,
+};
+
+static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = {
+       .destroy = cdv_intel_crt_enc_destroy,
+};
+
+void cdv_intel_crt_init(struct drm_device *dev,
+                       struct psb_intel_mode_device *mode_dev)
+{
+
+       struct psb_intel_connector *psb_intel_connector;
+       struct psb_intel_encoder *psb_intel_encoder;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+
+       u32 i2c_reg;
+
+       psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+       if (!psb_intel_encoder)
+               return;
+
+       psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+       if (!psb_intel_connector)
+               goto failed_connector;
+
+       connector = &psb_intel_connector->base;
+       drm_connector_init(dev, connector,
+               &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+       encoder = &psb_intel_encoder->base;
+       drm_encoder_init(dev, encoder,
+               &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
+
+       psb_intel_connector_attach_encoder(psb_intel_connector,
+                                          psb_intel_encoder);
+
+       /* Set up the DDC bus. */
+       i2c_reg = GPIOA;
+       /* Remove the following code for CDV */
+       /*
+       if (dev_priv->crt_ddc_bus != 0)
+               i2c_reg = dev_priv->crt_ddc_bus;
+       }*/
+       psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
+                                                         i2c_reg, "CRTDDC_A");
+       if (!psb_intel_encoder->ddc_bus) {
+               dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+                          "failed.\n");
+               goto failed_ddc;
+       }
+
+       psb_intel_encoder->type = INTEL_OUTPUT_ANALOG;
+       /*
+       psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
+       psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
+       */
+       connector->interlace_allowed = 0;
+       connector->doublescan_allowed = 0;
+
+       drm_encoder_helper_add(encoder, &cdv_intel_crt_helper_funcs);
+       drm_connector_helper_add(connector,
+                                       &cdv_intel_crt_connector_helper_funcs);
+
+       drm_sysfs_connector_add(connector);
+
+       return;
+failed_ddc:
+       drm_encoder_cleanup(&psb_intel_encoder->base);
+       drm_connector_cleanup(&psb_intel_connector->base);
+       kfree(psb_intel_connector);
+failed_connector:
+       kfree(psb_intel_encoder);
+       return;
+}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
new file mode 100644 (file)
index 0000000..18d1152
--- /dev/null
@@ -0,0 +1,1508 @@
+/*
+ * Copyright Â© 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "power.h"
+#include "cdv_device.h"
+
+
+struct cdv_intel_range_t {
+       int min, max;
+};
+
+struct cdv_intel_p2_t {
+       int dot_limit;
+       int p2_slow, p2_fast;
+};
+
+struct cdv_intel_clock_t {
+       /* given values */
+       int n;
+       int m1, m2;
+       int p1, p2;
+       /* derived values */
+       int dot;
+       int vco;
+       int m;
+       int p;
+};
+
+#define INTEL_P2_NUM                 2
+
+struct cdv_intel_limit_t {
+       struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
+       struct cdv_intel_p2_t p2;
+};
+
+#define CDV_LIMIT_SINGLE_LVDS_96       0
+#define CDV_LIMIT_SINGLE_LVDS_100      1
+#define CDV_LIMIT_DAC_HDMI_27          2
+#define CDV_LIMIT_DAC_HDMI_96          3
+
+static const struct cdv_intel_limit_t cdv_intel_limits[] = {
+       {                       /* CDV_SIGNLE_LVDS_96MHz */
+        .dot = {.min = 20000, .max = 115500},
+        .vco = {.min = 1800000, .max = 3600000},
+        .n = {.min = 2, .max = 6},
+        .m = {.min = 60, .max = 160},
+        .m1 = {.min = 0, .max = 0},
+        .m2 = {.min = 58, .max = 158},
+        .p = {.min = 28, .max = 140},
+        .p1 = {.min = 2, .max = 10},
+        .p2 = {.dot_limit = 200000,
+               .p2_slow = 14, .p2_fast = 14},
+        },
+       {                       /* CDV_SINGLE_LVDS_100MHz */
+        .dot = {.min = 20000, .max = 115500},
+        .vco = {.min = 1800000, .max = 3600000},
+        .n = {.min = 2, .max = 6},
+        .m = {.min = 60, .max = 160},
+        .m1 = {.min = 0, .max = 0},
+        .m2 = {.min = 58, .max = 158},
+        .p = {.min = 28, .max = 140},
+        .p1 = {.min = 2, .max = 10},
+        /* The single-channel range is 25-112Mhz, and dual-channel
+         * is 80-224Mhz.  Prefer single channel as much as possible.
+         */
+        .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
+        },
+       {                       /* CDV_DAC_HDMI_27MHz */
+        .dot = {.min = 20000, .max = 400000},
+        .vco = {.min = 1809000, .max = 3564000},
+        .n = {.min = 1, .max = 1},
+        .m = {.min = 67, .max = 132},
+        .m1 = {.min = 0, .max = 0},
+        .m2 = {.min = 65, .max = 130},
+        .p = {.min = 5, .max = 90},
+        .p1 = {.min = 1, .max = 9},
+        .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+        },
+       {                       /* CDV_DAC_HDMI_96MHz */
+        .dot = {.min = 20000, .max = 400000},
+        .vco = {.min = 1800000, .max = 3600000},
+        .n = {.min = 2, .max = 6},
+        .m = {.min = 60, .max = 160},
+        .m1 = {.min = 0, .max = 0},
+        .m2 = {.min = 58, .max = 158},
+        .p = {.min = 5, .max = 100},
+        .p1 = {.min = 1, .max = 10},
+        .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+        },
+};
+
+#define _wait_for(COND, MS, W) ({ \
+       unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);       \
+       int ret__ = 0;                                                  \
+       while (!(COND)) {                                               \
+               if (time_after(jiffies, timeout__)) {                   \
+                       ret__ = -ETIMEDOUT;                             \
+                       break;                                          \
+               }                                                       \
+               if (W && !in_dbg_master())                              \
+                       msleep(W);                                      \
+       }                                                               \
+       ret__;                                                          \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+
+
+static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
+{
+       int ret;
+
+       ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+       if (ret) {
+               DRM_ERROR("timeout waiting for SB to idle before read\n");
+               return ret;
+       }
+
+       REG_WRITE(SB_ADDR, reg);
+       REG_WRITE(SB_PCKT,
+                  SET_FIELD(SB_OPCODE_READ, SB_OPCODE) |
+                  SET_FIELD(SB_DEST_DPLL, SB_DEST) |
+                  SET_FIELD(0xf, SB_BYTE_ENABLE));
+
+       ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+       if (ret) {
+               DRM_ERROR("timeout waiting for SB to idle after read\n");
+               return ret;
+       }
+
+       *val = REG_READ(SB_DATA);
+
+       return 0;
+}
+
+static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
+{
+       int ret;
+       static bool dpio_debug = true;
+       u32 temp;
+
+       if (dpio_debug) {
+               if (cdv_sb_read(dev, reg, &temp) == 0)
+                       DRM_DEBUG_KMS("0x%08x: 0x%08x (before)\n", reg, temp);
+               DRM_DEBUG_KMS("0x%08x: 0x%08x\n", reg, val);
+       }
+
+       ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+       if (ret) {
+               DRM_ERROR("timeout waiting for SB to idle before write\n");
+               return ret;
+       }
+
+       REG_WRITE(SB_ADDR, reg);
+       REG_WRITE(SB_DATA, val);
+       REG_WRITE(SB_PCKT,
+                  SET_FIELD(SB_OPCODE_WRITE, SB_OPCODE) |
+                  SET_FIELD(SB_DEST_DPLL, SB_DEST) |
+                  SET_FIELD(0xf, SB_BYTE_ENABLE));
+
+       ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+       if (ret) {
+               DRM_ERROR("timeout waiting for SB to idle after write\n");
+               return ret;
+       }
+
+       if (dpio_debug) {
+               if (cdv_sb_read(dev, reg, &temp) == 0)
+                       DRM_DEBUG_KMS("0x%08x: 0x%08x (after)\n", reg, temp);
+       }
+
+       return 0;
+}
+
+/* Reset the DPIO configuration register.  The BIOS does this at every
+ * mode set.
+ */
+static void cdv_sb_reset(struct drm_device *dev)
+{
+
+       REG_WRITE(DPIO_CFG, 0);
+       REG_READ(DPIO_CFG);
+       REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
+}
+
+/* Unlike most Intel display engines, on Cedarview the DPLL registers
+ * are behind this sideband bus.  They must be programmed while the
+ * DPLL reference clock is on in the DPLL control register, but before
+ * the DPLL is enabled in the DPLL control register.
+ */
+static int
+cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
+                              struct cdv_intel_clock_t *clock)
+{
+       struct psb_intel_crtc *psb_crtc =
+                               to_psb_intel_crtc(crtc);
+       int pipe = psb_crtc->pipe;
+       u32 m, n_vco, p;
+       int ret = 0;
+       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+       u32 ref_value;
+
+       cdv_sb_reset(dev);
+
+       if ((REG_READ(dpll_reg) & DPLL_SYNCLOCK_ENABLE) == 0) {
+               DRM_ERROR("Attempting to set DPLL with refclk disabled\n");
+               return -EBUSY;
+       }
+
+       /* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
+       ref_value = 0x68A701;
+
+       cdv_sb_write(dev, SB_REF_SFR(pipe), ref_value);
+
+       /* We don't know what the other fields of these regs are, so
+        * leave them in place.
+        */
+       ret = cdv_sb_read(dev, SB_M(pipe), &m);
+       if (ret)
+               return ret;
+       m &= ~SB_M_DIVIDER_MASK;
+       m |= ((clock->m2) << SB_M_DIVIDER_SHIFT);
+       ret = cdv_sb_write(dev, SB_M(pipe), m);
+       if (ret)
+               return ret;
+
+       ret = cdv_sb_read(dev, SB_N_VCO(pipe), &n_vco);
+       if (ret)
+               return ret;
+
+       /* Follow the BIOS to program the N_DIVIDER REG */
+       n_vco &= 0xFFFF;
+       n_vco |= 0x107;
+       n_vco &= ~(SB_N_VCO_SEL_MASK |
+                  SB_N_DIVIDER_MASK |
+                  SB_N_CB_TUNE_MASK);
+
+       n_vco |= ((clock->n) << SB_N_DIVIDER_SHIFT);
+
+       if (clock->vco < 2250000) {
+               n_vco |= (2 << SB_N_CB_TUNE_SHIFT);
+               n_vco |= (0 << SB_N_VCO_SEL_SHIFT);
+       } else if (clock->vco < 2750000) {
+               n_vco |= (1 << SB_N_CB_TUNE_SHIFT);
+               n_vco |= (1 << SB_N_VCO_SEL_SHIFT);
+       } else if (clock->vco < 3300000) {
+               n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
+               n_vco |= (2 << SB_N_VCO_SEL_SHIFT);
+       } else {
+               n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
+               n_vco |= (3 << SB_N_VCO_SEL_SHIFT);
+       }
+
+       ret = cdv_sb_write(dev, SB_N_VCO(pipe), n_vco);
+       if (ret)
+               return ret;
+
+       ret = cdv_sb_read(dev, SB_P(pipe), &p);
+       if (ret)
+               return ret;
+       p &= ~(SB_P2_DIVIDER_MASK | SB_P1_DIVIDER_MASK);
+       p |= SET_FIELD(clock->p1, SB_P1_DIVIDER);
+       switch (clock->p2) {
+       case 5:
+               p |= SET_FIELD(SB_P2_5, SB_P2_DIVIDER);
+               break;
+       case 10:
+               p |= SET_FIELD(SB_P2_10, SB_P2_DIVIDER);
+               break;
+       case 14:
+               p |= SET_FIELD(SB_P2_14, SB_P2_DIVIDER);
+               break;
+       case 7:
+               p |= SET_FIELD(SB_P2_7, SB_P2_DIVIDER);
+               break;
+       default:
+               DRM_ERROR("Bad P2 clock: %d\n", clock->p2);
+               return -EINVAL;
+       }
+       ret = cdv_sb_write(dev, SB_P(pipe), p);
+       if (ret)
+               return ret;
+
+       /* always Program the Lane Register for the Pipe A*/
+       if (pipe == 0) {
+               /* Program the Lane0/1 for HDMI B */
+               u32 lane_reg, lane_value;
+
+               lane_reg = PSB_LANE0;
+               cdv_sb_read(dev, lane_reg, &lane_value);
+               lane_value &= ~(LANE_PLL_MASK);
+               lane_value |= LANE_PLL_ENABLE;
+               cdv_sb_write(dev, lane_reg, lane_value);
+
+               lane_reg = PSB_LANE1;
+               cdv_sb_read(dev, lane_reg, &lane_value);
+               lane_value &= ~(LANE_PLL_MASK);
+               lane_value |= LANE_PLL_ENABLE;
+               cdv_sb_write(dev, lane_reg, lane_value);
+
+               /* Program the Lane2/3 for HDMI C */
+               lane_reg = PSB_LANE2;
+               cdv_sb_read(dev, lane_reg, &lane_value);
+               lane_value &= ~(LANE_PLL_MASK);
+               lane_value |= LANE_PLL_ENABLE;
+               cdv_sb_write(dev, lane_reg, lane_value);
+
+               lane_reg = PSB_LANE3;
+               cdv_sb_read(dev, lane_reg, &lane_value);
+               lane_value &= ~(LANE_PLL_MASK);
+               lane_value |= LANE_PLL_ENABLE;
+               cdv_sb_write(dev, lane_reg, lane_value);
+       }
+
+       return 0;
+}
+
+/*
+ * Returns whether any encoder on the specified pipe is of the specified type
+ */
+bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_connector *l_entry;
+
+       list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+               if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+                       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(l_entry);
+                       if (psb_intel_encoder->type == type)
+                               return true;
+               }
+       }
+       return false;
+}
+
+static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
+                                                       int refclk)
+{
+       const struct cdv_intel_limit_t *limit;
+       if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+               /*
+                * Now only single-channel LVDS is supported on CDV. If it is
+                * incorrect, please add the dual-channel LVDS.
+                */
+               if (refclk == 96000)
+                       limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
+               else
+                       limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
+       } else {
+               if (refclk == 27000)
+                       limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
+               else
+                       limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_96];
+       }
+       return limit;
+}
+
+/* m1 is reserved as 0 in CDV, n is a ring counter */
+static void cdv_intel_clock(struct drm_device *dev,
+                       int refclk, struct cdv_intel_clock_t *clock)
+{
+       clock->m = clock->m2 + 2;
+       clock->p = clock->p1 * clock->p2;
+       clock->vco = (refclk * clock->m) / clock->n;
+       clock->dot = clock->vco / clock->p;
+}
+
+
+#define INTELPllInvalid(s)   { /* ErrorF (s) */; return false; }
+static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,
+                               const struct cdv_intel_limit_t *limit,
+                              struct cdv_intel_clock_t *clock)
+{
+       if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+               INTELPllInvalid("p1 out of range\n");
+       if (clock->p < limit->p.min || limit->p.max < clock->p)
+               INTELPllInvalid("p out of range\n");
+       /* unnecessary to check the range of m(m1/M2)/n again */
+       if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+               INTELPllInvalid("vco out of range\n");
+       /* XXX: We may need to be checking "Dot clock"
+        * depending on the multiplier, connector, etc.,
+        * rather than just a single range.
+        */
+       if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+               INTELPllInvalid("dot out of range\n");
+
+       return true;
+}
+
+static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
+                               int refclk,
+                               struct cdv_intel_clock_t *best_clock)
+{
+       struct drm_device *dev = crtc->dev;
+       struct cdv_intel_clock_t clock;
+       const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk);
+       int err = target;
+
+
+       if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+           (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
+               /*
+                * For LVDS, if the panel is on, just rely on its current
+                * settings for dual-channel.  We haven't figured out how to
+                * reliably set up different single/dual channel state, if we
+                * even can.
+                */
+               if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+                   LVDS_CLKB_POWER_UP)
+                       clock.p2 = limit->p2.p2_fast;
+               else
+                       clock.p2 = limit->p2.p2_slow;
+       } else {
+               if (target < limit->p2.dot_limit)
+                       clock.p2 = limit->p2.p2_slow;
+               else
+                       clock.p2 = limit->p2.p2_fast;
+       }
+
+       memset(best_clock, 0, sizeof(*best_clock));
+       clock.m1 = 0;
+       /* m1 is reserved as 0 in CDV, n is a ring counter.
+          So skip the m1 loop */
+       for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
+               for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max;
+                                            clock.m2++) {
+                       for (clock.p1 = limit->p1.min;
+                                       clock.p1 <= limit->p1.max;
+                                       clock.p1++) {
+                               int this_err;
+
+                               cdv_intel_clock(dev, refclk, &clock);
+
+                               if (!cdv_intel_PLL_is_valid(crtc,
+                                                               limit, &clock))
+                                               continue;
+
+                               this_err = abs(clock.dot - target);
+                               if (this_err < err) {
+                                       *best_clock = clock;
+                                       err = this_err;
+                               }
+                       }
+               }
+       }
+
+       return err != target;
+}
+
+int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
+                           int x, int y, struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+       int pipe = psb_intel_crtc->pipe;
+       unsigned long start, offset;
+       int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
+       int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+       int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       u32 dspcntr;
+       int ret = 0;
+
+       if (!gma_power_begin(dev, true))
+               return 0;
+
+       /* no fb bound */
+       if (!crtc->fb) {
+               dev_err(dev->dev, "No FB bound\n");
+               goto psb_intel_pipe_cleaner;
+       }
+
+
+       /* We are displaying this buffer, make sure it is actually loaded
+          into the GTT */
+       ret = psb_gtt_pin(psbfb->gtt);
+       if (ret < 0)
+               goto psb_intel_pipe_set_base_exit;
+       start = psbfb->gtt->offset;
+       offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+       REG_WRITE(dspstride, crtc->fb->pitches[0]);
+
+       dspcntr = REG_READ(dspcntr_reg);
+       dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+       switch (crtc->fb->bits_per_pixel) {
+       case 8:
+               dspcntr |= DISPPLANE_8BPP;
+               break;
+       case 16:
+               if (crtc->fb->depth == 15)
+                       dspcntr |= DISPPLANE_15_16BPP;
+               else
+                       dspcntr |= DISPPLANE_16BPP;
+               break;
+       case 24:
+       case 32:
+               dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+               break;
+       default:
+               dev_err(dev->dev, "Unknown color depth\n");
+               ret = -EINVAL;
+               goto psb_intel_pipe_set_base_exit;
+       }
+       REG_WRITE(dspcntr_reg, dspcntr);
+
+       dev_dbg(dev->dev,
+               "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
+
+       REG_WRITE(dspbase, offset);
+       REG_READ(dspbase);
+       REG_WRITE(dspsurf, start);
+       REG_READ(dspsurf);
+
+psb_intel_pipe_cleaner:
+       /* If there was a previous display we can now unpin it */
+       if (old_fb)
+               psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+
+psb_intel_pipe_set_base_exit:
+       gma_power_end(dev);
+       return ret;
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       u32 temp;
+       bool enabled;
+
+       /* XXX: When our outputs are all unaware of DPMS modes other than off
+        * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+        */
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+               /* Enable the DPLL */
+               temp = REG_READ(dpll_reg);
+               if ((temp & DPLL_VCO_ENABLE) == 0) {
+                       REG_WRITE(dpll_reg, temp);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+                       REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+                       REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+               }
+
+               /* Jim Bish - switch plan and pipe per scott */
+               /* Enable the plane */
+               temp = REG_READ(dspcntr_reg);
+               if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+                       REG_WRITE(dspcntr_reg,
+                                 temp | DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+               }
+
+               udelay(150);
+
+               /* Enable the pipe */
+               temp = REG_READ(pipeconf_reg);
+               if ((temp & PIPEACONF_ENABLE) == 0)
+                       REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+
+               psb_intel_crtc_load_lut(crtc);
+
+               /* Give the overlay scaler a chance to enable
+                * if it's on this pipe */
+               /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+               break;
+       case DRM_MODE_DPMS_OFF:
+               /* Give the overlay scaler a chance to disable
+                * if it's on this pipe */
+               /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+               /* Disable the VGA plane that we never use */
+               REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+               /* Jim Bish - changed pipe/plane here as well. */
+
+               /* Wait for vblank for the disable to take effect */
+               cdv_intel_wait_for_vblank(dev);
+
+               /* Next, disable display pipes */
+               temp = REG_READ(pipeconf_reg);
+               if ((temp & PIPEACONF_ENABLE) != 0) {
+                       REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+                       REG_READ(pipeconf_reg);
+               }
+
+               /* Wait for vblank for the disable to take effect. */
+               cdv_intel_wait_for_vblank(dev);
+
+               udelay(150);
+
+               /* Disable display plane */
+               temp = REG_READ(dspcntr_reg);
+               if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+                       REG_WRITE(dspcntr_reg,
+                                 temp & ~DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+                       REG_READ(dspbase_reg);
+               }
+
+               temp = REG_READ(dpll_reg);
+               if ((temp & DPLL_VCO_ENABLE) != 0) {
+                       REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+               }
+
+               /* Wait for the clocks to turn off. */
+               udelay(150);
+               break;
+       }
+       enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+       /*Set FIFO Watermarks*/
+       REG_WRITE(DSPARB, 0x3F3E);
+}
+
+static void cdv_intel_crtc_prepare(struct drm_crtc *crtc)
+{
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
+{
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+void cdv_intel_encoder_prepare(struct drm_encoder *encoder)
+{
+       struct drm_encoder_helper_funcs *encoder_funcs =
+           encoder->helper_private;
+       /* lvds has its own version of prepare see cdv_intel_lvds_prepare */
+       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void cdv_intel_encoder_commit(struct drm_encoder *encoder)
+{
+       struct drm_encoder_helper_funcs *encoder_funcs =
+           encoder->helper_private;
+       /* lvds has its own version of commit see cdv_intel_lvds_commit */
+       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int cdv_intel_panel_fitter_pipe(struct drm_device *dev)
+{
+       u32 pfit_control;
+
+       pfit_control = REG_READ(PFIT_CONTROL);
+
+       /* See if the panel fitter is in use */
+       if ((pfit_control & PFIT_ENABLE) == 0)
+               return -1;
+       return (pfit_control >> 29) & 0x3;
+}
+
+static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode,
+                              int x, int y,
+                              struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+       int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+       int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+       int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+       int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+       int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+       int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+       int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+       int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+       int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+       int refclk;
+       struct cdv_intel_clock_t clock;
+       u32 dpll = 0, dspcntr, pipeconf;
+       bool ok, is_sdvo = false, is_dvo = false;
+       bool is_crt = false, is_lvds = false, is_tv = false;
+       bool is_hdmi = false;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_connector *connector;
+
+       list_for_each_entry(connector, &mode_config->connector_list, head) {
+               struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+
+               if (!connector->encoder
+                   || connector->encoder->crtc != crtc)
+                       continue;
+
+               switch (psb_intel_encoder->type) {
+               case INTEL_OUTPUT_LVDS:
+                       is_lvds = true;
+                       break;
+               case INTEL_OUTPUT_SDVO:
+                       is_sdvo = true;
+                       break;
+               case INTEL_OUTPUT_DVO:
+                       is_dvo = true;
+                       break;
+               case INTEL_OUTPUT_TVOUT:
+                       is_tv = true;
+                       break;
+               case INTEL_OUTPUT_ANALOG:
+                       is_crt = true;
+                       break;
+               case INTEL_OUTPUT_HDMI:
+                       is_hdmi = true;
+                       break;
+               }
+       }
+
+       refclk = 96000;
+
+       /* Hack selection about ref clk for CRT */
+       /* Select 27MHz as the reference clk for HDMI */
+       if (is_crt || is_hdmi)
+               refclk = 27000;
+
+       drm_mode_debug_printmodeline(adjusted_mode);
+
+       ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+                                &clock);
+       if (!ok) {
+               dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
+               return 0;
+       }
+
+       dpll = DPLL_VGA_MODE_DIS;
+       if (is_tv) {
+               /* XXX: just matching BIOS for now */
+/*     dpll |= PLL_REF_INPUT_TVCLKINBC; */
+               dpll |= 3;
+       }
+               dpll |= PLL_REF_INPUT_DREFCLK;
+
+       dpll |= DPLL_SYNCLOCK_ENABLE;
+       dpll |= DPLL_VGA_MODE_DIS;
+       if (is_lvds)
+               dpll |= DPLLB_MODE_LVDS;
+       else
+               dpll |= DPLLB_MODE_DAC_SERIAL;
+       /* dpll |= (2 << 11); */
+
+       /* setup pipeconf */
+       pipeconf = REG_READ(pipeconf_reg);
+
+       /* Set up the display plane register */
+       dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+       if (pipe == 0)
+               dspcntr |= DISPPLANE_SEL_PIPE_A;
+       else
+               dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+       dspcntr |= DISPLAY_PLANE_ENABLE;
+       pipeconf |= PIPEACONF_ENABLE;
+
+       REG_WRITE(dpll_reg, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
+       REG_READ(dpll_reg);
+
+       cdv_dpll_set_clock_cdv(dev, crtc, &clock);
+
+       udelay(150);
+
+
+       /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+        * This is an exception to the general rule that mode_set doesn't turn
+        * things on.
+        */
+       if (is_lvds) {
+               u32 lvds = REG_READ(LVDS);
+
+               lvds |=
+                   LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
+                   LVDS_PIPEB_SELECT;
+               /* Set the B0-B3 data pairs corresponding to
+                * whether we're going to
+                * set the DPLLs for dual-channel mode or not.
+                */
+               if (clock.p2 == 7)
+                       lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+               else
+                       lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+               /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+                * appropriately here, but we need to look more
+                * thoroughly into how panels behave in the two modes.
+                */
+
+               REG_WRITE(LVDS, lvds);
+               REG_READ(LVDS);
+       }
+
+       dpll |= DPLL_VCO_ENABLE;
+
+       /* Disable the panel fitter if it was on our pipe */
+       if (cdv_intel_panel_fitter_pipe(dev) == pipe)
+               REG_WRITE(PFIT_CONTROL, 0);
+
+       DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+       drm_mode_debug_printmodeline(mode);
+
+       REG_WRITE(dpll_reg,
+               (REG_READ(dpll_reg) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
+       REG_READ(dpll_reg);
+       /* Wait for the clocks to stabilize. */
+       udelay(150); /* 42 usec w/o calibration, 110 with.  rounded up. */
+
+       if (!(REG_READ(dpll_reg) & DPLL_LOCK)) {
+               dev_err(dev->dev, "Failed to get DPLL lock\n");
+               return -EBUSY;
+       }
+
+       {
+               int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+               REG_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+       }
+
+       REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+                 ((adjusted_mode->crtc_htotal - 1) << 16));
+       REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+                 ((adjusted_mode->crtc_hblank_end - 1) << 16));
+       REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+                 ((adjusted_mode->crtc_hsync_end - 1) << 16));
+       REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+                 ((adjusted_mode->crtc_vtotal - 1) << 16));
+       REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+                 ((adjusted_mode->crtc_vblank_end - 1) << 16));
+       REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+                 ((adjusted_mode->crtc_vsync_end - 1) << 16));
+       /* pipesrc and dspsize control the size that is scaled from,
+        * which should always be the user's requested size.
+        */
+       REG_WRITE(dspsize_reg,
+                 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+       REG_WRITE(dsppos_reg, 0);
+       REG_WRITE(pipesrc_reg,
+                 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+       REG_WRITE(pipeconf_reg, pipeconf);
+       REG_READ(pipeconf_reg);
+
+       cdv_intel_wait_for_vblank(dev);
+
+       REG_WRITE(dspcntr_reg, dspcntr);
+
+       /* Flush the plane changes */
+       {
+               struct drm_crtc_helper_funcs *crtc_funcs =
+                   crtc->helper_private;
+               crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+       }
+
+       cdv_intel_wait_for_vblank(dev);
+
+       return 0;
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_psb_private *dev_priv =
+                               (struct drm_psb_private *)dev->dev_private;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int palreg = PALETTE_A;
+       int i;
+
+       /* The clocks have to be on to load the palette. */
+       if (!crtc->enabled)
+               return;
+
+       switch (psb_intel_crtc->pipe) {
+       case 0:
+               break;
+       case 1:
+               palreg = PALETTE_B;
+               break;
+       case 2:
+               palreg = PALETTE_C;
+               break;
+       default:
+               dev_err(dev->dev, "Illegal Pipe Number.\n");
+               return;
+       }
+
+       if (gma_power_begin(dev, false)) {
+               for (i = 0; i < 256; i++) {
+                       REG_WRITE(palreg + 4 * i,
+                                 ((psb_intel_crtc->lut_r[i] +
+                                 psb_intel_crtc->lut_adj[i]) << 16) |
+                                 ((psb_intel_crtc->lut_g[i] +
+                                 psb_intel_crtc->lut_adj[i]) << 8) |
+                                 (psb_intel_crtc->lut_b[i] +
+                                 psb_intel_crtc->lut_adj[i]));
+               }
+               gma_power_end(dev);
+       } else {
+               for (i = 0; i < 256; i++) {
+                       dev_priv->save_palette_a[i] =
+                                 ((psb_intel_crtc->lut_r[i] +
+                                 psb_intel_crtc->lut_adj[i]) << 16) |
+                                 ((psb_intel_crtc->lut_g[i] +
+                                 psb_intel_crtc->lut_adj[i]) << 8) |
+                                 (psb_intel_crtc->lut_b[i] +
+                                 psb_intel_crtc->lut_adj[i]);
+               }
+
+       }
+}
+
+/**
+ * Save HW states of giving crtc
+ */
+static void cdv_intel_crtc_save(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       /* struct drm_psb_private *dev_priv =
+                       (struct drm_psb_private *)dev->dev_private; */
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+       int pipeA = (psb_intel_crtc->pipe == 0);
+       uint32_t paletteReg;
+       int i;
+
+       if (!crtc_state) {
+               dev_dbg(dev->dev, "No CRTC state found\n");
+               return;
+       }
+
+       crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
+       crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
+       crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
+       crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
+       crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
+       crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
+       crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
+       crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
+       crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
+       crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
+       crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
+       crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
+       crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+
+       /*NOTE: DSPSIZE DSPPOS only for psb*/
+       crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
+       crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+
+       crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+
+       DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+                       crtc_state->saveDSPCNTR,
+                       crtc_state->savePIPECONF,
+                       crtc_state->savePIPESRC,
+                       crtc_state->saveFP0,
+                       crtc_state->saveFP1,
+                       crtc_state->saveDPLL,
+                       crtc_state->saveHTOTAL,
+                       crtc_state->saveHBLANK,
+                       crtc_state->saveHSYNC,
+                       crtc_state->saveVTOTAL,
+                       crtc_state->saveVBLANK,
+                       crtc_state->saveVSYNC,
+                       crtc_state->saveDSPSTRIDE,
+                       crtc_state->saveDSPSIZE,
+                       crtc_state->saveDSPPOS,
+                       crtc_state->saveDSPBASE
+               );
+
+       paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+       for (i = 0; i < 256; ++i)
+               crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
+}
+
+/**
+ * Restore HW states of giving crtc
+ */
+static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       /* struct drm_psb_private * dev_priv =
+                               (struct drm_psb_private *)dev->dev_private; */
+       struct psb_intel_crtc *psb_intel_crtc =  to_psb_intel_crtc(crtc);
+       struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+       /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
+       int pipeA = (psb_intel_crtc->pipe == 0);
+       uint32_t paletteReg;
+       int i;
+
+       if (!crtc_state) {
+               dev_dbg(dev->dev, "No crtc state\n");
+               return;
+       }
+
+       DRM_DEBUG(
+               "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+               REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
+               REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
+               REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
+               REG_READ(pipeA ? FPA0 : FPB0),
+               REG_READ(pipeA ? FPA1 : FPB1),
+               REG_READ(pipeA ? DPLL_A : DPLL_B),
+               REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
+               REG_READ(pipeA ? HBLANK_A : HBLANK_B),
+               REG_READ(pipeA ? HSYNC_A : HSYNC_B),
+               REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
+               REG_READ(pipeA ? VBLANK_A : VBLANK_B),
+               REG_READ(pipeA ? VSYNC_A : VSYNC_B),
+               REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
+               REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
+               REG_READ(pipeA ? DSPAPOS : DSPBPOS),
+               REG_READ(pipeA ? DSPABASE : DSPBBASE)
+               );
+
+       DRM_DEBUG(
+               "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+               crtc_state->saveDSPCNTR,
+               crtc_state->savePIPECONF,
+               crtc_state->savePIPESRC,
+               crtc_state->saveFP0,
+               crtc_state->saveFP1,
+               crtc_state->saveDPLL,
+               crtc_state->saveHTOTAL,
+               crtc_state->saveHBLANK,
+               crtc_state->saveHSYNC,
+               crtc_state->saveVTOTAL,
+               crtc_state->saveVBLANK,
+               crtc_state->saveVSYNC,
+               crtc_state->saveDSPSTRIDE,
+               crtc_state->saveDSPSIZE,
+               crtc_state->saveDSPPOS,
+               crtc_state->saveDSPBASE
+               );
+
+
+       if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+               REG_WRITE(pipeA ? DPLL_A : DPLL_B,
+                       crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+               REG_READ(pipeA ? DPLL_A : DPLL_B);
+               DRM_DEBUG("write dpll: %x\n",
+                               REG_READ(pipeA ? DPLL_A : DPLL_B));
+               udelay(150);
+       }
+
+       REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
+       REG_READ(pipeA ? FPA0 : FPB0);
+
+       REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
+       REG_READ(pipeA ? FPA1 : FPB1);
+
+       REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
+       REG_READ(pipeA ? DPLL_A : DPLL_B);
+       udelay(150);
+
+       REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
+       REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
+       REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
+       REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
+       REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
+       REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
+       REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+
+       REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
+       REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+
+       REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
+       REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+       REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+
+       cdv_intel_wait_for_vblank(dev);
+
+       REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
+       REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+
+       cdv_intel_wait_for_vblank(dev);
+
+       paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+       for (i = 0; i < 256; ++i)
+               REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
+}
+
+static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
+                                struct drm_file *file_priv,
+                                uint32_t handle,
+                                uint32_t width, uint32_t height)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+       uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+       uint32_t temp;
+       size_t addr = 0;
+       struct gtt_range *gt;
+       struct drm_gem_object *obj;
+       int ret;
+
+       /* if we want to turn of the cursor ignore width and height */
+       if (!handle) {
+               /* turn off the cursor */
+               temp = CURSOR_MODE_DISABLE;
+
+               if (gma_power_begin(dev, false)) {
+                       REG_WRITE(control, temp);
+                       REG_WRITE(base, 0);
+                       gma_power_end(dev);
+               }
+
+               /* unpin the old GEM object */
+               if (psb_intel_crtc->cursor_obj) {
+                       gt = container_of(psb_intel_crtc->cursor_obj,
+                                                       struct gtt_range, gem);
+                       psb_gtt_unpin(gt);
+                       drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+                       psb_intel_crtc->cursor_obj = NULL;
+               }
+
+               return 0;
+       }
+
+       /* Currently we only support 64x64 cursors */
+       if (width != 64 || height != 64) {
+               dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
+               return -EINVAL;
+       }
+
+       obj = drm_gem_object_lookup(dev, file_priv, handle);
+       if (!obj)
+               return -ENOENT;
+
+       if (obj->size < width * height * 4) {
+               dev_dbg(dev->dev, "buffer is to small\n");
+               return -ENOMEM;
+       }
+
+       gt = container_of(obj, struct gtt_range, gem);
+
+       /* Pin the memory into the GTT */
+       ret = psb_gtt_pin(gt);
+       if (ret) {
+               dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+               return ret;
+       }
+
+       addr = gt->offset;      /* Or resource.start ??? */
+
+       psb_intel_crtc->cursor_addr = addr;
+
+       temp = 0;
+       /* set the pipe for the cursor */
+       temp |= (pipe << 28);
+       temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+       if (gma_power_begin(dev, false)) {
+               REG_WRITE(control, temp);
+               REG_WRITE(base, addr);
+               gma_power_end(dev);
+       }
+
+       /* unpin the old GEM object */
+       if (psb_intel_crtc->cursor_obj) {
+               gt = container_of(psb_intel_crtc->cursor_obj,
+                                                       struct gtt_range, gem);
+               psb_gtt_unpin(gt);
+               drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+               psb_intel_crtc->cursor_obj = obj;
+       }
+       return 0;
+}
+
+static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       uint32_t temp = 0;
+       uint32_t adder;
+
+
+       if (x < 0) {
+               temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+               x = -x;
+       }
+       if (y < 0) {
+               temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+               y = -y;
+       }
+
+       temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+       temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+       adder = psb_intel_crtc->cursor_addr;
+
+       if (gma_power_begin(dev, false)) {
+               REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+               REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
+               gma_power_end(dev);
+       }
+       return 0;
+}
+
+static void cdv_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+                        u16 *green, u16 *blue, uint32_t start, uint32_t size)
+{
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int i;
+       int end = (start + size > 256) ? 256 : start + size;
+
+       for (i = start; i < end; i++) {
+               psb_intel_crtc->lut_r[i] = red[i] >> 8;
+               psb_intel_crtc->lut_g[i] = green[i] >> 8;
+               psb_intel_crtc->lut_b[i] = blue[i] >> 8;
+       }
+
+       cdv_intel_crtc_load_lut(crtc);
+}
+
+static int cdv_crtc_set_config(struct drm_mode_set *set)
+{
+       int ret = 0;
+       struct drm_device *dev = set->crtc->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (!dev_priv->rpm_enabled)
+               return drm_crtc_helper_set_config(set);
+
+       pm_runtime_forbid(&dev->pdev->dev);
+
+       ret = drm_crtc_helper_set_config(set);
+
+       pm_runtime_allow(&dev->pdev->dev);
+
+       return ret;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+
+/* FIXME: why are we using this, should it be cdv_ in this tree ? */
+
+static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
+{
+       clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+       clock->p = clock->p1 * clock->p2;
+       clock->vco = refclk * clock->m / (clock->n + 2);
+       clock->dot = clock->vco / clock->p;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int cdv_intel_crtc_clock_get(struct drm_device *dev,
+                               struct drm_crtc *crtc)
+{
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       u32 dpll;
+       u32 fp;
+       struct cdv_intel_clock_t clock;
+       bool is_lvds;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (gma_power_begin(dev, false)) {
+               dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+               if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+                       fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+               else
+                       fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+               is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
+               gma_power_end(dev);
+       } else {
+               dpll = (pipe == 0) ?
+                       dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
+
+               if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+                       fp = (pipe == 0) ?
+                               dev_priv->saveFPA0 :
+                               dev_priv->saveFPB0;
+               else
+                       fp = (pipe == 0) ?
+                               dev_priv->saveFPA1 :
+                               dev_priv->saveFPB1;
+
+               is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
+       }
+
+       clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+       clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+       clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+
+       if (is_lvds) {
+               clock.p1 =
+                   ffs((dpll &
+                        DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+                       DPLL_FPA01_P1_POST_DIV_SHIFT);
+               if (clock.p1 == 0) {
+                       clock.p1 = 4;
+                       dev_err(dev->dev, "PLL %d\n", dpll);
+               }
+               clock.p2 = 14;
+
+               if ((dpll & PLL_REF_INPUT_MASK) ==
+                   PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+                       /* XXX: might not be 66MHz */
+                       i8xx_clock(66000, &clock);
+               } else
+                       i8xx_clock(48000, &clock);
+       } else {
+               if (dpll & PLL_P1_DIVIDE_BY_TWO)
+                       clock.p1 = 2;
+               else {
+                       clock.p1 =
+                           ((dpll &
+                             DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+                            DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+               }
+               if (dpll & PLL_P2_DIVIDE_BY_4)
+                       clock.p2 = 4;
+               else
+                       clock.p2 = 2;
+
+               i8xx_clock(48000, &clock);
+       }
+
+       /* XXX: It would be nice to validate the clocks, but we can't reuse
+        * i830PllIsValid() because it relies on the xf86_config connector
+        * configuration being accurate, which it isn't necessarily.
+        */
+
+       return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
+                                            struct drm_crtc *crtc)
+{
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       struct drm_display_mode *mode;
+       int htot;
+       int hsync;
+       int vtot;
+       int vsync;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (gma_power_begin(dev, false)) {
+               htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
+               hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
+               vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
+               vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+               gma_power_end(dev);
+       } else {
+               htot = (pipe == 0) ?
+                       dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
+               hsync = (pipe == 0) ?
+                       dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
+               vtot = (pipe == 0) ?
+                       dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
+               vsync = (pipe == 0) ?
+                       dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
+       }
+
+       mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+       if (!mode)
+               return NULL;
+
+       mode->clock = cdv_intel_crtc_clock_get(dev, crtc);
+       mode->hdisplay = (htot & 0xffff) + 1;
+       mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+       mode->hsync_start = (hsync & 0xffff) + 1;
+       mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+       mode->vdisplay = (vtot & 0xffff) + 1;
+       mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+       mode->vsync_start = (vsync & 0xffff) + 1;
+       mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+       drm_mode_set_name(mode);
+       drm_mode_set_crtcinfo(mode, 0);
+
+       return mode;
+}
+
+static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+       kfree(psb_intel_crtc->crtc_state);
+       drm_crtc_cleanup(crtc);
+       kfree(psb_intel_crtc);
+}
+
+const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
+       .dpms = cdv_intel_crtc_dpms,
+       .mode_fixup = cdv_intel_crtc_mode_fixup,
+       .mode_set = cdv_intel_crtc_mode_set,
+       .mode_set_base = cdv_intel_pipe_set_base,
+       .prepare = cdv_intel_crtc_prepare,
+       .commit = cdv_intel_crtc_commit,
+};
+
+const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
+       .save = cdv_intel_crtc_save,
+       .restore = cdv_intel_crtc_restore,
+       .cursor_set = cdv_intel_crtc_cursor_set,
+       .cursor_move = cdv_intel_crtc_cursor_move,
+       .gamma_set = cdv_intel_crtc_gamma_set,
+       .set_config = cdv_crtc_set_config,
+       .destroy = cdv_intel_crtc_destroy,
+};
+
+/*
+ * Set the default value of cursor control and base register
+ * to zero. This is a workaround for h/w defect on oaktrail
+ */
+void cdv_intel_cursor_init(struct drm_device *dev, int pipe)
+{
+       uint32_t control;
+       uint32_t base;
+
+       switch (pipe) {
+       case 0:
+               control = CURACNTR;
+               base = CURABASE;
+               break;
+       case 1:
+               control = CURBCNTR;
+               base = CURBBASE;
+               break;
+       case 2:
+               control = CURCCNTR;
+               base = CURCBASE;
+               break;
+       default:
+               return;
+       }
+
+       REG_WRITE(control, 0);
+       REG_WRITE(base, 0);
+}
+
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
new file mode 100644 (file)
index 0000000..50d7cfb
--- /dev/null
@@ -0,0 +1,394 @@
+/*
+ * Copyright Â© 2006-2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     jim liu <jim.liu@intel.com>
+ *
+ * FIXME:
+ *     We should probably make this generic and share it with Medfield
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "psb_intel_drv.h"
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include <linux/pm_runtime.h>
+
+/* hdmi control bits */
+#define HDMI_NULL_PACKETS_DURING_VSYNC (1 << 9)
+#define HDMI_BORDER_ENABLE             (1 << 7)
+#define HDMI_AUDIO_ENABLE              (1 << 6)
+#define HDMI_VSYNC_ACTIVE_HIGH         (1 << 4)
+#define HDMI_HSYNC_ACTIVE_HIGH         (1 << 3)
+/* hdmi-b control bits */
+#define        HDMIB_PIPE_B_SELECT             (1 << 30)
+
+
+struct mid_intel_hdmi_priv {
+       u32 hdmi_reg;
+       u32 save_HDMIB;
+       bool has_hdmi_sink;
+       bool has_hdmi_audio;
+       /* Should set this when detect hotplug */
+       bool hdmi_device_connected;
+       struct mdfld_hdmi_i2c *i2c_bus;
+       struct i2c_adapter *hdmi_i2c_adapter;   /* for control functions */
+       struct drm_device *dev;
+};
+
+static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
+                       struct drm_display_mode *mode,
+                       struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder);
+       struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+       u32 hdmib;
+       struct drm_crtc *crtc = encoder->crtc;
+       struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+
+       hdmib = (2 << 10);
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+               hdmib |= HDMI_VSYNC_ACTIVE_HIGH;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+               hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
+
+       if (intel_crtc->pipe == 1)
+               hdmib |= HDMIB_PIPE_B_SELECT;
+
+       if (hdmi_priv->has_hdmi_audio) {
+               hdmib |= HDMI_AUDIO_ENABLE;
+               hdmib |= HDMI_NULL_PACKETS_DURING_VSYNC;
+       }
+
+       REG_WRITE(hdmi_priv->hdmi_reg, hdmib);
+       REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                               to_psb_intel_encoder(encoder);
+       struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+       u32 hdmib;
+
+       hdmib = REG_READ(hdmi_priv->hdmi_reg);
+
+       if (mode != DRM_MODE_DPMS_ON)
+               REG_WRITE(hdmi_priv->hdmi_reg, hdmib & ~HDMIB_PORT_EN);
+       else
+               REG_WRITE(hdmi_priv->hdmi_reg, hdmib | HDMIB_PORT_EN);
+       REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static void cdv_hdmi_save(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+
+       hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static void cdv_hdmi_restore(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+
+       REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
+       REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static enum drm_connector_status cdv_hdmi_detect(
+                               struct drm_connector *connector, bool force)
+{
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct psb_intel_connector *psb_intel_connector =
+                                       to_psb_intel_connector(connector);
+       struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+       struct edid *edid = NULL;
+       enum drm_connector_status status = connector_status_disconnected;
+
+       edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
+
+       hdmi_priv->has_hdmi_sink = false;
+       hdmi_priv->has_hdmi_audio = false;
+       if (edid) {
+               if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+                       status = connector_status_connected;
+                       hdmi_priv->has_hdmi_sink =
+                                               drm_detect_hdmi_monitor(edid);
+                       hdmi_priv->has_hdmi_audio =
+                                               drm_detect_monitor_audio(edid);
+               }
+
+               psb_intel_connector->base.display_info.raw_edid = NULL;
+               kfree(edid);
+       }
+       return status;
+}
+
+static int cdv_hdmi_set_property(struct drm_connector *connector,
+                                      struct drm_property *property,
+                                      uint64_t value)
+{
+       struct drm_encoder *encoder = connector->encoder;
+
+       if (!strcmp(property->name, "scaling mode") && encoder) {
+               struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc);
+               bool centre;
+               uint64_t curValue;
+
+               if (!crtc)
+                       return -1;
+
+               switch (value) {
+               case DRM_MODE_SCALE_FULLSCREEN:
+                       break;
+               case DRM_MODE_SCALE_NO_SCALE:
+                       break;
+               case DRM_MODE_SCALE_ASPECT:
+                       break;
+               default:
+                       return -1;
+               }
+
+               if (drm_connector_property_get_value(connector,
+                                                       property, &curValue))
+                       return -1;
+
+               if (curValue == value)
+                       return 0;
+
+               if (drm_connector_property_set_value(connector,
+                                                       property, value))
+                       return -1;
+
+               centre = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
+                       (value == DRM_MODE_SCALE_NO_SCALE);
+
+               if (crtc->saved_mode.hdisplay != 0 &&
+                   crtc->saved_mode.vdisplay != 0) {
+                       if (centre) {
+                               if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode,
+                                           encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb))
+                                       return -1;
+                       } else {
+                               struct drm_encoder_helper_funcs *helpers
+                                                   = encoder->helper_private;
+                               helpers->mode_set(encoder, &crtc->saved_mode,
+                                            &crtc->saved_adjusted_mode);
+                       }
+               }
+       }
+       return 0;
+}
+
+/*
+ * Return the list of HDMI DDC modes if available.
+ */
+static int cdv_hdmi_get_modes(struct drm_connector *connector)
+{
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct edid *edid = NULL;
+       int ret = 0;
+
+       edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
+       if (edid) {
+               drm_mode_connector_update_edid_property(connector, edid);
+               ret = drm_add_edid_modes(connector, edid);
+               kfree(edid);
+       }
+       return ret;
+}
+
+static int cdv_hdmi_mode_valid(struct drm_connector *connector,
+                                struct drm_display_mode *mode)
+{
+
+       if (mode->clock > 165000)
+               return MODE_CLOCK_HIGH;
+       if (mode->clock < 20000)
+               return MODE_CLOCK_HIGH;
+
+       /* just in case */
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       /* just in case */
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               return MODE_NO_INTERLACE;
+
+       /*
+        * FIXME: for now we limit the size to 1680x1050 on CDV, otherwise it
+        * will go beyond the stolen memory size allocated to the framebuffer
+        */
+       if (mode->hdisplay > 1680)
+               return MODE_PANEL;
+       if (mode->vdisplay > 1050)
+               return MODE_PANEL;
+       return MODE_OK;
+}
+
+static void cdv_hdmi_destroy(struct drm_connector *connector)
+{
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+
+       if (psb_intel_encoder->i2c_bus)
+               psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
+
+static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
+       .dpms = cdv_hdmi_dpms,
+       .mode_fixup = cdv_hdmi_mode_fixup,
+       .prepare = psb_intel_encoder_prepare,
+       .mode_set = cdv_hdmi_mode_set,
+       .commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_helper_funcs
+                                       cdv_hdmi_connector_helper_funcs = {
+       .get_modes = cdv_hdmi_get_modes,
+       .mode_valid = cdv_hdmi_mode_valid,
+       .best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .save = cdv_hdmi_save,
+       .restore = cdv_hdmi_restore,
+       .detect = cdv_hdmi_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .set_property = cdv_hdmi_set_property,
+       .destroy = cdv_hdmi_destroy,
+};
+
+void cdv_hdmi_init(struct drm_device *dev,
+                       struct psb_intel_mode_device *mode_dev, int reg)
+{
+       struct psb_intel_encoder *psb_intel_encoder;
+       struct psb_intel_connector *psb_intel_connector;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       struct mid_intel_hdmi_priv *hdmi_priv;
+       int ddc_bus;
+
+       psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
+                                   GFP_KERNEL);
+
+       if (!psb_intel_encoder)
+               return;
+
+       psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+                                     GFP_KERNEL);
+
+       if (!psb_intel_connector)
+               goto err_connector;
+
+       hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
+
+       if (!hdmi_priv)
+               goto err_priv;
+
+       connector = &psb_intel_connector->base;
+       encoder = &psb_intel_encoder->base;
+       drm_connector_init(dev, connector,
+                          &cdv_hdmi_connector_funcs,
+                          DRM_MODE_CONNECTOR_DVID);
+
+       drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
+                        DRM_MODE_ENCODER_TMDS);
+
+       psb_intel_connector_attach_encoder(psb_intel_connector,
+                                          psb_intel_encoder);
+       psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
+       hdmi_priv->hdmi_reg = reg;
+       hdmi_priv->has_hdmi_sink = false;
+       psb_intel_encoder->dev_priv = hdmi_priv;
+
+       drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
+       drm_connector_helper_add(connector,
+                                &cdv_hdmi_connector_helper_funcs);
+       connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+       connector->interlace_allowed = false;
+       connector->doublescan_allowed = false;
+
+       drm_connector_attach_property(connector,
+                                     dev->mode_config.scaling_mode_property,
+                                     DRM_MODE_SCALE_FULLSCREEN);
+
+       switch (reg) {
+       case SDVOB:
+               ddc_bus = GPIOE;
+               break;
+       case SDVOC:
+               ddc_bus = GPIOD;
+               break;
+       default:
+               DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
+               goto failed_ddc;
+               break;
+       }
+
+       psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
+                               ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
+
+       if (!psb_intel_encoder->i2c_bus) {
+               dev_err(dev->dev, "No ddc adapter available!\n");
+               goto failed_ddc;
+       }
+
+       hdmi_priv->hdmi_i2c_adapter =
+                               &(psb_intel_encoder->i2c_bus->adapter);
+       hdmi_priv->dev = dev;
+       drm_sysfs_connector_add(connector);
+       return;
+
+failed_ddc:
+       drm_encoder_cleanup(encoder);
+       drm_connector_cleanup(connector);
+err_priv:
+       kfree(psb_intel_connector);
+err_connector:
+       kfree(psb_intel_encoder);
+}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
new file mode 100644 (file)
index 0000000..50e744b
--- /dev/null
@@ -0,0 +1,732 @@
+/*
+ * Copyright Â© 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ *     Dave Airlie <airlied@linux.ie>
+ *     Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/dmi.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+#include "cdv_device.h"
+
+/**
+ * LVDS I2C backlight control macros
+ */
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BRIGHTNESS_MASK 0xFF
+#define BLC_I2C_TYPE   0x01
+#define BLC_PWM_TYPT   0x02
+
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+
+#define PSB_BLC_MAX_PWM_REG_FREQ       (0xFFFE)
+#define PSB_BLC_MIN_PWM_REG_FREQ       (0x2)
+#define PSB_BLC_PWM_PRECISION_FACTOR   (10)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT    (16)
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+
+struct cdv_intel_lvds_priv {
+       /**
+        * Saved LVDO output states
+        */
+       uint32_t savePP_ON;
+       uint32_t savePP_OFF;
+       uint32_t saveLVDS;
+       uint32_t savePP_CONTROL;
+       uint32_t savePP_CYCLE;
+       uint32_t savePFIT_CONTROL;
+       uint32_t savePFIT_PGM_RATIOS;
+       uint32_t saveBLC_PWM_CTL;
+};
+
+/*
+ * Returns the maximum level of the backlight duty cycle field.
+ */
+static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 retval;
+
+       if (gma_power_begin(dev, false)) {
+               retval = ((REG_READ(BLC_PWM_CTL) &
+                         BACKLIGHT_MODULATION_FREQ_MASK) >>
+                         BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+               gma_power_end(dev);
+       } else
+               retval = ((dev_priv->saveBLC_PWM_CTL &
+                         BACKLIGHT_MODULATION_FREQ_MASK) >>
+                         BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+       return retval;
+}
+
+/*
+ * Set LVDS backlight level by I2C command
+ */
+static int cdv_lvds_i2c_set_brightness(struct drm_device *dev,
+                                       unsigned int level)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
+       u8 out_buf[2];
+       unsigned int blc_i2c_brightness;
+
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = lvds_i2c_bus->slave_addr,
+                       .flags = 0,
+                       .len = 2,
+                       .buf = out_buf,
+               }
+       };
+
+       blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
+                            BRIGHTNESS_MASK /
+                            BRIGHTNESS_MAX_LEVEL);
+
+       if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+               blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
+
+       out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
+       out_buf[1] = (u8)blc_i2c_brightness;
+
+       if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
+               return 0;
+
+       DRM_ERROR("I2C transfer error\n");
+       return -1;
+}
+
+
+static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       u32 max_pwm_blc;
+       u32 blc_pwm_duty_cycle;
+
+       max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev);
+
+       /*BLC_PWM_CTL Should be initiated while backlight device init*/
+       BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
+
+       blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
+
+       if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+               blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
+
+       blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+       REG_WRITE(BLC_PWM_CTL,
+                 (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+                 (blc_pwm_duty_cycle));
+
+       return 0;
+}
+
+/*
+ * Set LVDS backlight level either by I2C or PWM
+ */
+void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (!dev_priv->lvds_bl) {
+               DRM_ERROR("NO LVDS Backlight Info\n");
+               return;
+       }
+
+       if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
+               cdv_lvds_i2c_set_brightness(dev, level);
+       else
+               cdv_lvds_pwm_set_brightness(dev, level);
+}
+
+/**
+ * Sets the backlight level.
+ *
+ * level backlight level, from 0 to cdv_intel_lvds_get_max_backlight().
+ */
+static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 blc_pwm_ctl;
+
+       if (gma_power_begin(dev, false)) {
+               blc_pwm_ctl =
+                       REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+               REG_WRITE(BLC_PWM_CTL,
+                               (blc_pwm_ctl |
+                               (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+               gma_power_end(dev);
+       } else {
+               blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
+                               ~BACKLIGHT_DUTY_CYCLE_MASK;
+               dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+                                       (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+       }
+}
+
+/**
+ * Sets the power state for the panel.
+ */
+static void cdv_intel_lvds_set_power(struct drm_device *dev,
+                                    struct drm_encoder *encoder, bool on)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 pp_status;
+
+       if (!gma_power_begin(dev, true))
+               return;
+
+       if (on) {
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+                         POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while ((pp_status & PP_ON) == 0);
+
+               cdv_intel_lvds_set_backlight(dev,
+                               dev_priv->mode_dev.backlight_duty_cycle);
+       } else {
+               cdv_intel_lvds_set_backlight(dev, 0);
+
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+                         ~POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while (pp_status & PP_ON);
+       }
+       gma_power_end(dev);
+}
+
+static void cdv_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       if (mode == DRM_MODE_DPMS_ON)
+               cdv_intel_lvds_set_power(dev, encoder, true);
+       else
+               cdv_intel_lvds_set_power(dev, encoder, false);
+       /* XXX: We never power down the LVDS pairs. */
+}
+
+static void cdv_intel_lvds_save(struct drm_connector *connector)
+{
+}
+
+static void cdv_intel_lvds_restore(struct drm_connector *connector)
+{
+}
+
+int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
+                             struct drm_display_mode *mode)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_display_mode *fixed_mode =
+                                       dev_priv->mode_dev.panel_fixed_mode;
+
+       /* just in case */
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       /* just in case */
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               return MODE_NO_INTERLACE;
+
+       if (fixed_mode) {
+               if (mode->hdisplay > fixed_mode->hdisplay)
+                       return MODE_PANEL;
+               if (mode->vdisplay > fixed_mode->vdisplay)
+                       return MODE_PANEL;
+       }
+       return MODE_OK;
+}
+
+bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+       struct drm_encoder *tmp_encoder;
+       struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
+
+       /* Should never happen!! */
+       list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
+                           head) {
+               if (tmp_encoder != encoder
+                   && tmp_encoder->crtc == encoder->crtc) {
+                       printk(KERN_ERR "Can't enable LVDS and another "
+                              "encoder on the same pipe\n");
+                       return false;
+               }
+       }
+
+       /*
+        * If we have timings from the BIOS for the panel, put them in
+        * to the adjusted mode.  The CRTC will be set up for this mode,
+        * with the panel scaling set up to source from the H/VDisplay
+        * of the original mode.
+        */
+       if (panel_fixed_mode != NULL) {
+               adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
+               adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
+               adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
+               adjusted_mode->htotal = panel_fixed_mode->htotal;
+               adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
+               adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
+               adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
+               adjusted_mode->vtotal = panel_fixed_mode->vtotal;
+               adjusted_mode->clock = panel_fixed_mode->clock;
+               drm_mode_set_crtcinfo(adjusted_mode,
+                                     CRTC_INTERLACE_HALVE_V);
+       }
+
+       /*
+        * XXX: It would be nice to support lower refresh rates on the
+        * panels to reduce power consumption, and perhaps match the
+        * user's requested refresh rate.
+        */
+
+       return true;
+}
+
+static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+       if (!gma_power_begin(dev, true))
+               return;
+
+       mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+       mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+                                         BACKLIGHT_DUTY_CYCLE_MASK);
+
+       cdv_intel_lvds_set_power(dev, encoder, false);
+
+       gma_power_end(dev);
+}
+
+static void cdv_intel_lvds_commit(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+       if (mode_dev->backlight_duty_cycle == 0)
+               mode_dev->backlight_duty_cycle =
+                   cdv_intel_lvds_get_max_backlight(dev);
+
+       cdv_intel_lvds_set_power(dev, encoder, true);
+}
+
+static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 pfit_control;
+
+       /*
+        * The LVDS pin pair will already have been turned on in the
+        * cdv_intel_crtc_mode_set since it has a large impact on the DPLL
+        * settings.
+        */
+
+       /*
+        * Enable automatic panel scaling so that non-native modes fill the
+        * screen.  Should be enabled before the pipe is enabled, according to
+        * register description and PRM.
+        */
+       if (mode->hdisplay != adjusted_mode->hdisplay ||
+           mode->vdisplay != adjusted_mode->vdisplay)
+               pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+                               HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+                               HORIZ_INTERP_BILINEAR);
+       else
+               pfit_control = 0;
+
+       if (dev_priv->lvds_dither)
+               pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+       REG_WRITE(PFIT_CONTROL, pfit_control);
+}
+
+/**
+ * Detect the LVDS connection.
+ *
+ * This always returns CONNECTOR_STATUS_CONNECTED.
+ * This connector should only have
+ * been set up if the LVDS was actually connected anyway.
+ */
+static enum drm_connector_status cdv_intel_lvds_detect(
+                               struct drm_connector *connector, bool force)
+{
+       return connector_status_connected;
+}
+
+/**
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+       int ret;
+
+       ret = psb_intel_ddc_get_modes(connector, &psb_intel_encoder->i2c_bus->adapter);
+
+       if (ret)
+               return ret;
+
+       /* Didn't get an EDID, so
+        * Set wide sync ranges so we get all modes
+        * handed to valid_mode for checking
+        */
+       connector->display_info.min_vfreq = 0;
+       connector->display_info.max_vfreq = 200;
+       connector->display_info.min_hfreq = 0;
+       connector->display_info.max_hfreq = 200;
+       if (mode_dev->panel_fixed_mode != NULL) {
+               struct drm_display_mode *mode =
+                   drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+               drm_mode_probed_add(connector, mode);
+               return 1;
+       }
+
+       return 0;
+}
+
+/**
+ * cdv_intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+void cdv_intel_lvds_destroy(struct drm_connector *connector)
+{
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+
+       if (psb_intel_encoder->i2c_bus)
+               psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
+
+int cdv_intel_lvds_set_property(struct drm_connector *connector,
+                                      struct drm_property *property,
+                                      uint64_t value)
+{
+       struct drm_encoder *encoder = connector->encoder;
+
+       if (!strcmp(property->name, "scaling mode") && encoder) {
+               struct psb_intel_crtc *crtc =
+                                       to_psb_intel_crtc(encoder->crtc);
+               uint64_t curValue;
+
+               if (!crtc)
+                       return -1;
+
+               switch (value) {
+               case DRM_MODE_SCALE_FULLSCREEN:
+                       break;
+               case DRM_MODE_SCALE_NO_SCALE:
+                       break;
+               case DRM_MODE_SCALE_ASPECT:
+                       break;
+               default:
+                       return -1;
+               }
+
+               if (drm_connector_property_get_value(connector,
+                                                    property,
+                                                    &curValue))
+                       return -1;
+
+               if (curValue == value)
+                       return 0;
+
+               if (drm_connector_property_set_value(connector,
+                                                       property,
+                                                       value))
+                       return -1;
+
+               if (crtc->saved_mode.hdisplay != 0 &&
+                   crtc->saved_mode.vdisplay != 0) {
+                       if (!drm_crtc_helper_set_mode(encoder->crtc,
+                                                     &crtc->saved_mode,
+                                                     encoder->crtc->x,
+                                                     encoder->crtc->y,
+                                                     encoder->crtc->fb))
+                               return -1;
+               }
+       } else if (!strcmp(property->name, "backlight") && encoder) {
+               if (drm_connector_property_set_value(connector,
+                                                       property,
+                                                       value))
+                       return -1;
+               else {
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+                       struct drm_psb_private *dev_priv =
+                                               encoder->dev->dev_private;
+                       struct backlight_device *bd =
+                                               dev_priv->backlight_device;
+                       bd->props.brightness = value;
+                       backlight_update_status(bd);
+#endif
+               }
+       } else if (!strcmp(property->name, "DPMS") && encoder) {
+               struct drm_encoder_helper_funcs *helpers =
+                                       encoder->helper_private;
+               helpers->dpms(encoder, value);
+       }
+       return 0;
+}
+
+static const struct drm_encoder_helper_funcs
+                                       cdv_intel_lvds_helper_funcs = {
+       .dpms = cdv_intel_lvds_encoder_dpms,
+       .mode_fixup = cdv_intel_lvds_mode_fixup,
+       .prepare = cdv_intel_lvds_prepare,
+       .mode_set = cdv_intel_lvds_mode_set,
+       .commit = cdv_intel_lvds_commit,
+};
+
+static const struct drm_connector_helper_funcs
+                               cdv_intel_lvds_connector_helper_funcs = {
+       .get_modes = cdv_intel_lvds_get_modes,
+       .mode_valid = cdv_intel_lvds_mode_valid,
+       .best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .save = cdv_intel_lvds_save,
+       .restore = cdv_intel_lvds_restore,
+       .detect = cdv_intel_lvds_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .set_property = cdv_intel_lvds_set_property,
+       .destroy = cdv_intel_lvds_destroy,
+};
+
+
+static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+}
+
+const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
+       .destroy = cdv_intel_lvds_enc_destroy,
+};
+
+/**
+ * cdv_intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void cdv_intel_lvds_init(struct drm_device *dev,
+                    struct psb_intel_mode_device *mode_dev)
+{
+       struct psb_intel_encoder *psb_intel_encoder;
+       struct psb_intel_connector *psb_intel_connector;
+       struct cdv_intel_lvds_priv *lvds_priv;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       struct drm_display_mode *scan;
+       struct drm_crtc *crtc;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 lvds;
+       int pipe;
+
+       psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
+                                   GFP_KERNEL);
+       if (!psb_intel_encoder)
+               return;
+
+       psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+                                     GFP_KERNEL);
+       if (!psb_intel_connector)
+               goto failed_connector;
+
+       lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
+       if (!lvds_priv)
+               goto failed_lvds_priv;
+
+       psb_intel_encoder->dev_priv = lvds_priv;
+
+       connector = &psb_intel_connector->base;
+       encoder = &psb_intel_encoder->base;
+
+
+       drm_connector_init(dev, connector,
+                          &cdv_intel_lvds_connector_funcs,
+                          DRM_MODE_CONNECTOR_LVDS);
+
+       drm_encoder_init(dev, encoder,
+                        &cdv_intel_lvds_enc_funcs,
+                        DRM_MODE_ENCODER_LVDS);
+
+
+       psb_intel_connector_attach_encoder(psb_intel_connector,
+                                          psb_intel_encoder);
+       psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+       drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
+       drm_connector_helper_add(connector,
+                                &cdv_intel_lvds_connector_helper_funcs);
+       connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+       connector->interlace_allowed = false;
+       connector->doublescan_allowed = false;
+
+       /*Attach connector properties*/
+       drm_connector_attach_property(connector,
+                                     dev->mode_config.scaling_mode_property,
+                                     DRM_MODE_SCALE_FULLSCREEN);
+       drm_connector_attach_property(connector,
+                                     dev_priv->backlight_property,
+                                     BRIGHTNESS_MAX_LEVEL);
+
+       /**
+        * Set up I2C bus
+        * FIXME: distroy i2c_bus when exit
+        */
+       psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
+                                                        GPIOB,
+                                                        "LVDSBLC_B");
+       if (!psb_intel_encoder->i2c_bus) {
+               dev_printk(KERN_ERR,
+                       &dev->pdev->dev, "I2C bus registration failed.\n");
+               goto failed_blc_i2c;
+       }
+       psb_intel_encoder->i2c_bus->slave_addr = 0x2C;
+       dev_priv->lvds_i2c_bus = psb_intel_encoder->i2c_bus;
+
+       /*
+        * LVDS discovery:
+        * 1) check for EDID on DDC
+        * 2) check for VBT data
+        * 3) check to see if LVDS is already on
+        *    if none of the above, no panel
+        * 4) make sure lid is open
+        *    if closed, act like it's not there for now
+        */
+
+       /* Set up the DDC bus. */
+       psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
+                                                        GPIOC,
+                                                        "LVDSDDC_C");
+       if (!psb_intel_encoder->ddc_bus) {
+               dev_printk(KERN_ERR, &dev->pdev->dev,
+                          "DDC bus registration " "failed.\n");
+               goto failed_ddc;
+       }
+
+       /*
+        * Attempt to get the fixed panel mode from DDC.  Assume that the
+        * preferred mode is the right one.
+        */
+       psb_intel_ddc_get_modes(connector,
+                               &psb_intel_encoder->ddc_bus->adapter);
+       list_for_each_entry(scan, &connector->probed_modes, head) {
+               if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+                       mode_dev->panel_fixed_mode =
+                           drm_mode_duplicate(dev, scan);
+                       goto out;       /* FIXME: check for quirks */
+               }
+       }
+
+       /* Failed to get EDID, what about VBT? do we need this?*/
+       if (dev_priv->lfp_lvds_vbt_mode) {
+               mode_dev->panel_fixed_mode =
+                       drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+               if (mode_dev->panel_fixed_mode) {
+                       mode_dev->panel_fixed_mode->type |=
+                               DRM_MODE_TYPE_PREFERRED;
+                       goto out;       /* FIXME: check for quirks */
+               }
+       }
+       /*
+        * If we didn't get EDID, try checking if the panel is already turned
+        * on.  If so, assume that whatever is currently programmed is the
+        * correct mode.
+        */
+       lvds = REG_READ(LVDS);
+       pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+       crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
+
+       if (crtc && (lvds & LVDS_PORT_EN)) {
+               mode_dev->panel_fixed_mode =
+                   cdv_intel_crtc_mode_get(dev, crtc);
+               if (mode_dev->panel_fixed_mode) {
+                       mode_dev->panel_fixed_mode->type |=
+                           DRM_MODE_TYPE_PREFERRED;
+                       goto out;       /* FIXME: check for quirks */
+               }
+       }
+
+       /* If we still don't have a mode after all that, give up. */
+       if (!mode_dev->panel_fixed_mode) {
+               DRM_DEBUG
+                       ("Found no modes on the lvds, ignoring the LVDS\n");
+               goto failed_find;
+       }
+
+out:
+       drm_sysfs_connector_add(connector);
+       return;
+
+failed_find:
+       printk(KERN_ERR "Failed find\n");
+       if (psb_intel_encoder->ddc_bus)
+               psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+failed_ddc:
+       printk(KERN_ERR "Failed DDC\n");
+       if (psb_intel_encoder->i2c_bus)
+               psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+failed_blc_i2c:
+       printk(KERN_ERR "Failed BLC\n");
+       drm_encoder_cleanup(encoder);
+       drm_connector_cleanup(connector);
+       kfree(lvds_priv);
+failed_lvds_priv:
+       kfree(psb_intel_connector);
+failed_connector:
+       kfree(psb_intel_encoder);
+}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
new file mode 100644 (file)
index 0000000..791c0ef
--- /dev/null
@@ -0,0 +1,831 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/console.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_drv.h"
+#include "framebuffer.h"
+#include "gtt.h"
+
+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+                                             struct drm_file *file_priv,
+                                             unsigned int *handle);
+
+static const struct drm_framebuffer_funcs psb_fb_funcs = {
+       .destroy = psb_user_framebuffer_destroy,
+       .create_handle = psb_user_framebuffer_create_handle,
+};
+
+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
+
+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+                          unsigned blue, unsigned transp,
+                          struct fb_info *info)
+{
+       struct psb_fbdev *fbdev = info->par;
+       struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+       uint32_t v;
+
+       if (!fb)
+               return -ENOMEM;
+
+       if (regno > 255)
+               return 1;
+
+       red = CMAP_TOHW(red, info->var.red.length);
+       blue = CMAP_TOHW(blue, info->var.blue.length);
+       green = CMAP_TOHW(green, info->var.green.length);
+       transp = CMAP_TOHW(transp, info->var.transp.length);
+
+       v = (red << info->var.red.offset) |
+           (green << info->var.green.offset) |
+           (blue << info->var.blue.offset) |
+           (transp << info->var.transp.offset);
+
+       if (regno < 16) {
+               switch (fb->bits_per_pixel) {
+               case 16:
+                       ((uint32_t *) info->pseudo_palette)[regno] = v;
+                       break;
+               case 24:
+               case 32:
+                       ((uint32_t *) info->pseudo_palette)[regno] = v;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+       struct psb_fbdev *fbdev = info->par;
+       struct psb_framebuffer *psbfb = &fbdev->pfb;
+       struct drm_device *dev = psbfb->base.dev;
+
+       /*
+        *      We have to poke our nose in here. The core fb code assumes
+        *      panning is part of the hardware that can be invoked before
+        *      the actual fb is mapped. In our case that isn't quite true.
+        */
+       if (psbfb->gtt->npage) {
+               /* GTT roll shifts in 4K pages, we need to shift the right
+                  number of pages */
+               int pages = info->fix.line_length >> 12;
+               psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
+       }
+        return 0;
+}
+
+void psbfb_suspend(struct drm_device *dev)
+{
+       struct drm_framebuffer *fb = 0;
+       struct psb_framebuffer *psbfb = to_psb_fb(fb);
+
+       console_lock();
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+               struct fb_info *info = psbfb->fbdev;
+               fb_set_suspend(info, 1);
+               drm_fb_helper_blank(FB_BLANK_POWERDOWN, info);
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+       console_unlock();
+}
+
+void psbfb_resume(struct drm_device *dev)
+{
+       struct drm_framebuffer *fb = 0;
+       struct psb_framebuffer *psbfb = to_psb_fb(fb);
+
+       console_lock();
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+               struct fb_info *info = psbfb->fbdev;
+               fb_set_suspend(info, 0);
+               drm_fb_helper_blank(FB_BLANK_UNBLANK, info);
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+       console_unlock();
+       drm_helper_disable_unused_functions(dev);
+}
+
+static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct psb_framebuffer *psbfb = vma->vm_private_data;
+       struct drm_device *dev = psbfb->base.dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int page_num;
+       int i;
+       unsigned long address;
+       int ret;
+       unsigned long pfn;
+       /* FIXME: assumes fb at stolen base which may not be true */
+       unsigned long phys_addr = (unsigned long)dev_priv->stolen_base;
+
+       page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+       address = (unsigned long)vmf->virtual_address;
+
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+       for (i = 0; i < page_num; i++) {
+               pfn = (phys_addr >> PAGE_SHIFT);
+
+               ret = vm_insert_mixed(vma, address, pfn);
+               if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+                       break;
+               else if (unlikely(ret != 0)) {
+                       ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+                       return ret;
+               }
+               address += PAGE_SIZE;
+               phys_addr += PAGE_SIZE;
+       }
+       return VM_FAULT_NOPAGE;
+}
+
+static void psbfb_vm_open(struct vm_area_struct *vma)
+{
+}
+
+static void psbfb_vm_close(struct vm_area_struct *vma)
+{
+}
+
+static struct vm_operations_struct psbfb_vm_ops = {
+       .fault  = psbfb_vm_fault,
+       .open   = psbfb_vm_open,
+       .close  = psbfb_vm_close
+};
+
+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+       struct psb_fbdev *fbdev = info->par;
+       struct psb_framebuffer *psbfb = &fbdev->pfb;
+
+       if (vma->vm_pgoff != 0)
+               return -EINVAL;
+       if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+               return -EINVAL;
+
+       if (!psbfb->addr_space)
+               psbfb->addr_space = vma->vm_file->f_mapping;
+       /*
+        * If this is a GEM object then info->screen_base is the virtual
+        * kernel remapping of the object. FIXME: Review if this is
+        * suitable for our mmap work
+        */
+       vma->vm_ops = &psbfb_vm_ops;
+       vma->vm_private_data = (void *)psbfb;
+       vma->vm_flags |= VM_RESERVED | VM_IO |
+                                       VM_MIXEDMAP | VM_DONTEXPAND;
+       return 0;
+}
+
+static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
+                                               unsigned long arg)
+{
+       return -ENOTTY;
+}
+
+static struct fb_ops psbfb_ops = {
+       .owner = THIS_MODULE,
+       .fb_check_var = drm_fb_helper_check_var,
+       .fb_set_par = drm_fb_helper_set_par,
+       .fb_blank = drm_fb_helper_blank,
+       .fb_setcolreg = psbfb_setcolreg,
+       .fb_fillrect = cfb_fillrect,
+       .fb_copyarea = psbfb_copyarea,
+       .fb_imageblit = cfb_imageblit,
+       .fb_mmap = psbfb_mmap,
+       .fb_sync = psbfb_sync,
+       .fb_ioctl = psbfb_ioctl,
+};
+
+static struct fb_ops psbfb_roll_ops = {
+       .owner = THIS_MODULE,
+       .fb_check_var = drm_fb_helper_check_var,
+       .fb_set_par = drm_fb_helper_set_par,
+       .fb_blank = drm_fb_helper_blank,
+       .fb_setcolreg = psbfb_setcolreg,
+       .fb_fillrect = cfb_fillrect,
+       .fb_copyarea = cfb_copyarea,
+       .fb_imageblit = cfb_imageblit,
+       .fb_pan_display = psbfb_pan,
+       .fb_mmap = psbfb_mmap,
+       .fb_sync = psbfb_sync,
+       .fb_ioctl = psbfb_ioctl,
+};
+
+static struct fb_ops psbfb_unaccel_ops = {
+       .owner = THIS_MODULE,
+       .fb_check_var = drm_fb_helper_check_var,
+       .fb_set_par = drm_fb_helper_set_par,
+       .fb_blank = drm_fb_helper_blank,
+       .fb_setcolreg = psbfb_setcolreg,
+       .fb_fillrect = cfb_fillrect,
+       .fb_copyarea = cfb_copyarea,
+       .fb_imageblit = cfb_imageblit,
+       .fb_mmap = psbfb_mmap,
+       .fb_ioctl = psbfb_ioctl,
+};
+
+/**
+ *     psb_framebuffer_init    -       initialize a framebuffer
+ *     @dev: our DRM device
+ *     @fb: framebuffer to set up
+ *     @mode_cmd: mode description
+ *     @gt: backing object
+ *
+ *     Configure and fill in the boilerplate for our frame buffer. Return
+ *     0 on success or an error code if we fail.
+ */
+static int psb_framebuffer_init(struct drm_device *dev,
+                                       struct psb_framebuffer *fb,
+                                       struct drm_mode_fb_cmd2 *mode_cmd,
+                                       struct gtt_range *gt)
+{
+       u32 bpp, depth;
+       int ret;
+
+       drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+       if (mode_cmd->pitches[0] & 63)
+               return -EINVAL;
+       switch (bpp) {
+       case 8:
+       case 16:
+       case 24:
+       case 32:
+               break;
+       default:
+               return -EINVAL;
+       }
+       ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
+       if (ret) {
+               dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+               return ret;
+       }
+       drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+       fb->gtt = gt;
+       return 0;
+}
+
+/**
+ *     psb_framebuffer_create  -       create a framebuffer backed by gt
+ *     @dev: our DRM device
+ *     @mode_cmd: the description of the requested mode
+ *     @gt: the backing object
+ *
+ *     Create a framebuffer object backed by the gt, and fill in the
+ *     boilerplate required
+ *
+ *     TODO: review object references
+ */
+
+static struct drm_framebuffer *psb_framebuffer_create
+                       (struct drm_device *dev,
+                        struct drm_mode_fb_cmd2 *mode_cmd,
+                        struct gtt_range *gt)
+{
+       struct psb_framebuffer *fb;
+       int ret;
+
+       fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+       if (!fb)
+               return ERR_PTR(-ENOMEM);
+
+       ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
+       if (ret) {
+               kfree(fb);
+               return ERR_PTR(ret);
+       }
+       return &fb->base;
+}
+
+/**
+ *     psbfb_alloc             -       allocate frame buffer memory
+ *     @dev: the DRM device
+ *     @aligned_size: space needed
+ *     @force: fall back to GEM buffers if need be
+ *
+ *     Allocate the frame buffer. In the usual case we get a GTT range that
+ *     is stolen memory backed and life is simple. If there isn't sufficient
+ *     we fail as we don't have the virtual mapping space to really vmap it
+ *     and the kernel console code can't handle non linear framebuffers.
+ *
+ *     Re-address this as and if the framebuffer layer grows this ability.
+ */
+static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
+{
+       struct gtt_range *backing;
+       /* Begin by trying to use stolen memory backing */
+       backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
+       if (backing) {
+               if (drm_gem_private_object_init(dev,
+                                       &backing->gem, aligned_size) == 0)
+                       return backing;
+               psb_gtt_free_range(dev, backing);
+       }
+       return NULL;
+}
+
+/**
+ *     psbfb_create            -       create a framebuffer
+ *     @fbdev: the framebuffer device
+ *     @sizes: specification of the layout
+ *
+ *     Create a framebuffer to the specifications provided
+ */
+static int psbfb_create(struct psb_fbdev *fbdev,
+                               struct drm_fb_helper_surface_size *sizes)
+{
+       struct drm_device *dev = fbdev->psb_fb_helper.dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct fb_info *info;
+       struct drm_framebuffer *fb;
+       struct psb_framebuffer *psbfb = &fbdev->pfb;
+       struct drm_mode_fb_cmd2 mode_cmd;
+       struct device *device = &dev->pdev->dev;
+       int size;
+       int ret;
+       struct gtt_range *backing;
+       u32 bpp, depth;
+       int gtt_roll = 0;
+       int pitch_lines = 0;
+
+       mode_cmd.width = sizes->surface_width;
+       mode_cmd.height = sizes->surface_height;
+       bpp = sizes->surface_bpp;
+
+       /* No 24bit packed */
+       if (bpp == 24)
+               bpp = 32;
+
+       do {
+               /*
+                * Acceleration via the GTT requires pitch to be
+                * power of two aligned. Preferably page but less
+                * is ok with some fonts
+                */
+               mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
+               depth = sizes->surface_depth;
+
+               size = mode_cmd.pitches[0] * mode_cmd.height;
+               size = ALIGN(size, PAGE_SIZE);
+
+               /* Allocate the fb in the GTT with stolen page backing */
+               backing = psbfb_alloc(dev, size);
+
+               if (pitch_lines)
+                       pitch_lines *= 2;
+               else
+                       pitch_lines = 1;
+               gtt_roll++;
+       } while (backing == NULL && pitch_lines <= 16);
+
+       /* The final pitch we accepted if we succeeded */
+       pitch_lines /= 2;
+
+       if (backing == NULL) {
+               /*
+                *      We couldn't get the space we wanted, fall back to the
+                *      display engine requirement instead.  The HW requires
+                *      the pitch to be 64 byte aligned
+                */
+
+               gtt_roll = 0;   /* Don't use GTT accelerated scrolling */
+               pitch_lines = 64;
+
+               mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
+
+               size = mode_cmd.pitches[0] * mode_cmd.height;
+               size = ALIGN(size, PAGE_SIZE);
+
+               /* Allocate the framebuffer in the GTT with stolen page backing */
+               backing = psbfb_alloc(dev, size);
+               if (backing == NULL)
+                       return -ENOMEM;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+
+       info = framebuffer_alloc(0, device);
+       if (!info) {
+               ret = -ENOMEM;
+               goto out_err1;
+       }
+       info->par = fbdev;
+
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+       ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
+       if (ret)
+               goto out_unref;
+
+       fb = &psbfb->base;
+       psbfb->fbdev = info;
+
+       fbdev->psb_fb_helper.fb = fb;
+       fbdev->psb_fb_helper.fbdev = info;
+
+       strcpy(info->fix.id, "psbfb");
+
+       info->flags = FBINFO_DEFAULT;
+       if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
+               info->fbops = &psbfb_ops;
+       else if (gtt_roll) {    /* GTT rolling seems best */
+               info->fbops = &psbfb_roll_ops;
+               info->flags |= FBINFO_HWACCEL_YPAN;
+       } else  /* Software */
+               info->fbops = &psbfb_unaccel_ops;
+
+       ret = fb_alloc_cmap(&info->cmap, 256, 0);
+       if (ret) {
+               ret = -ENOMEM;
+               goto out_unref;
+       }
+
+       info->fix.smem_start = dev->mode_config.fb_base;
+       info->fix.smem_len = size;
+       info->fix.ywrapstep = gtt_roll;
+       info->fix.ypanstep = 0;
+
+       /* Accessed stolen memory directly */
+       info->screen_base = (char *)dev_priv->vram_addr +
+                                                       backing->offset;
+       info->screen_size = size;
+
+       if (dev_priv->gtt.stolen_size) {
+               info->apertures = alloc_apertures(1);
+               if (!info->apertures) {
+                       ret = -ENOMEM;
+                       goto out_unref;
+               }
+               info->apertures->ranges[0].base = dev->mode_config.fb_base;
+               info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
+       }
+
+       drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+       drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
+                               sizes->fb_width, sizes->fb_height);
+
+       info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
+       info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
+
+       info->pixmap.size = 64 * 1024;
+       info->pixmap.buf_align = 8;
+       info->pixmap.access_align = 32;
+       info->pixmap.flags = FB_PIXMAP_SYSTEM;
+       info->pixmap.scan_align = 1;
+
+       dev_info(dev->dev, "allocated %dx%d fb\n",
+                                       psbfb->base.width, psbfb->base.height);
+
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+out_unref:
+       if (backing->stolen)
+               psb_gtt_free_range(dev, backing);
+       else
+               drm_gem_object_unreference(&backing->gem);
+out_err1:
+       mutex_unlock(&dev->struct_mutex);
+       psb_gtt_free_range(dev, backing);
+       return ret;
+}
+
+/**
+ *     psb_user_framebuffer_create     -       create framebuffer
+ *     @dev: our DRM device
+ *     @filp: client file
+ *     @cmd: mode request
+ *
+ *     Create a new framebuffer backed by a userspace GEM object
+ */
+static struct drm_framebuffer *psb_user_framebuffer_create
+                       (struct drm_device *dev, struct drm_file *filp,
+                        struct drm_mode_fb_cmd2 *cmd)
+{
+       struct gtt_range *r;
+       struct drm_gem_object *obj;
+
+       /*
+        *      Find the GEM object and thus the gtt range object that is
+        *      to back this space
+        */
+       obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
+       if (obj == NULL)
+               return ERR_PTR(-ENOENT);
+
+       /* Let the core code do all the work */
+       r = container_of(obj, struct gtt_range, gem);
+       return psb_framebuffer_create(dev, cmd, r);
+}
+
+static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+                                                       u16 blue, int regno)
+{
+}
+
+static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
+                                       u16 *green, u16 *blue, int regno)
+{
+}
+
+static int psbfb_probe(struct drm_fb_helper *helper,
+                               struct drm_fb_helper_surface_size *sizes)
+{
+       struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
+       int new_fb = 0;
+       int ret;
+
+       if (!helper->fb) {
+               ret = psbfb_create(psb_fbdev, sizes);
+               if (ret)
+                       return ret;
+               new_fb = 1;
+       }
+       return new_fb;
+}
+
+struct drm_fb_helper_funcs psb_fb_helper_funcs = {
+       .gamma_set = psbfb_gamma_set,
+       .gamma_get = psbfb_gamma_get,
+       .fb_probe = psbfb_probe,
+};
+
+int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
+{
+       struct fb_info *info;
+       struct psb_framebuffer *psbfb = &fbdev->pfb;
+
+       if (fbdev->psb_fb_helper.fbdev) {
+               info = fbdev->psb_fb_helper.fbdev;
+               unregister_framebuffer(info);
+               if (info->cmap.len)
+                       fb_dealloc_cmap(&info->cmap);
+               framebuffer_release(info);
+       }
+       drm_fb_helper_fini(&fbdev->psb_fb_helper);
+       drm_framebuffer_cleanup(&psbfb->base);
+
+       if (psbfb->gtt)
+               drm_gem_object_unreference(&psbfb->gtt->gem);
+       return 0;
+}
+
+int psb_fbdev_init(struct drm_device *dev)
+{
+       struct psb_fbdev *fbdev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
+       if (!fbdev) {
+               dev_err(dev->dev, "no memory\n");
+               return -ENOMEM;
+       }
+
+       dev_priv->fbdev = fbdev;
+       fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
+
+       drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
+                                                       INTELFB_CONN_LIMIT);
+
+       drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+       drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
+       return 0;
+}
+
+void psb_fbdev_fini(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (!dev_priv->fbdev)
+               return;
+
+       psb_fbdev_destroy(dev, dev_priv->fbdev);
+       kfree(dev_priv->fbdev);
+       dev_priv->fbdev = NULL;
+}
+
+static void psbfb_output_poll_changed(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_fbdev *fbdev = (struct psb_fbdev *)dev_priv->fbdev;
+       drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
+}
+
+/**
+ *     psb_user_framebuffer_create_handle - add hamdle to a framebuffer
+ *     @fb: framebuffer
+ *     @file_priv: our DRM file
+ *     @handle: returned handle
+ *
+ *     Our framebuffer object is a GTT range which also contains a GEM
+ *     object. We need to turn it into a handle for userspace. GEM will do
+ *     the work for us
+ */
+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+                                             struct drm_file *file_priv,
+                                             unsigned int *handle)
+{
+       struct psb_framebuffer *psbfb = to_psb_fb(fb);
+       struct gtt_range *r = psbfb->gtt;
+       return drm_gem_handle_create(file_priv, &r->gem, handle);
+}
+
+/**
+ *     psb_user_framebuffer_destroy    -       destruct user created fb
+ *     @fb: framebuffer
+ *
+ *     User framebuffers are backed by GEM objects so all we have to do is
+ *     clean up a bit and drop the reference, GEM will handle the fallout
+ */
+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+       struct psb_framebuffer *psbfb = to_psb_fb(fb);
+       struct gtt_range *r = psbfb->gtt;
+       struct drm_device *dev = fb->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_fbdev *fbdev = dev_priv->fbdev;
+       struct drm_crtc *crtc;
+       int reset = 0;
+
+       /* Should never get stolen memory for a user fb */
+       WARN_ON(r->stolen);
+
+       /* Check if we are erroneously live */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               if (crtc->fb == fb)
+                       reset = 1;
+
+       if (reset)
+               /*
+                * Now force a sane response before we permit the DRM CRTC
+                * layer to do stupid things like blank the display. Instead
+                * we reset this framebuffer as if the user had forced a reset.
+                * We must do this before the cleanup so that the DRM layer
+                * doesn't get a chance to stick its oar in where it isn't
+                * wanted.
+                */
+               drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
+
+       /* Let DRM do its clean up */
+       drm_framebuffer_cleanup(fb);
+       /*  We are no longer using the resource in GEM */
+       drm_gem_object_unreference_unlocked(&r->gem);
+       kfree(fb);
+}
+
+static const struct drm_mode_config_funcs psb_mode_funcs = {
+       .fb_create = psb_user_framebuffer_create,
+       .output_poll_changed = psbfb_output_poll_changed,
+};
+
+static int psb_create_backlight_property(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_property *backlight;
+
+       if (dev_priv->backlight_property)
+               return 0;
+
+       backlight = drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                                       "backlight", 2);
+       backlight->values[0] = 0;
+       backlight->values[1] = 100;
+
+       dev_priv->backlight_property = backlight;
+
+       return 0;
+}
+
+static void psb_setup_outputs(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_connector *connector;
+
+       drm_mode_create_scaling_mode_property(dev);
+       psb_create_backlight_property(dev);
+
+       dev_priv->ops->output_init(dev);
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list,
+                           head) {
+               struct psb_intel_encoder *psb_intel_encoder =
+                       psb_intel_attached_encoder(connector);
+               struct drm_encoder *encoder = &psb_intel_encoder->base;
+               int crtc_mask = 0, clone_mask = 0;
+
+               /* valid crtcs */
+               switch (psb_intel_encoder->type) {
+               case INTEL_OUTPUT_ANALOG:
+                       crtc_mask = (1 << 0);
+                       clone_mask = (1 << INTEL_OUTPUT_ANALOG);
+                       break;
+               case INTEL_OUTPUT_SDVO:
+                       crtc_mask = ((1 << 0) | (1 << 1));
+                       clone_mask = (1 << INTEL_OUTPUT_SDVO);
+                       break;
+               case INTEL_OUTPUT_LVDS:
+                       if (IS_MRST(dev))
+                               crtc_mask = (1 << 0);
+                       else
+                               crtc_mask = (1 << 1);
+                       clone_mask = (1 << INTEL_OUTPUT_LVDS);
+                       break;
+               case INTEL_OUTPUT_MIPI:
+                       crtc_mask = (1 << 0);
+                       clone_mask = (1 << INTEL_OUTPUT_MIPI);
+                       break;
+               case INTEL_OUTPUT_MIPI2:
+                       crtc_mask = (1 << 2);
+                       clone_mask = (1 << INTEL_OUTPUT_MIPI2);
+                       break;
+               case INTEL_OUTPUT_HDMI:
+                       if (IS_MFLD(dev))
+                               crtc_mask = (1 << 1);
+                       else    
+                               crtc_mask = (1 << 0);
+                       clone_mask = (1 << INTEL_OUTPUT_HDMI);
+                       break;
+               }
+               encoder->possible_crtcs = crtc_mask;
+               encoder->possible_clones =
+                   psb_intel_connector_clones(dev, clone_mask);
+       }
+}
+
+void psb_modeset_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+       int i;
+
+       drm_mode_config_init(dev);
+
+       dev->mode_config.min_width = 0;
+       dev->mode_config.min_height = 0;
+
+       dev->mode_config.funcs = (void *) &psb_mode_funcs;
+
+       /* set memory base */
+       /* Oaktrail and Poulsbo should use BAR 2*/
+       pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
+                                       &(dev->mode_config.fb_base));
+
+       /* num pipes is 2 for PSB but 1 for Mrst */
+       for (i = 0; i < dev_priv->num_pipe; i++)
+               psb_intel_crtc_init(dev, i, mode_dev);
+
+       dev->mode_config.max_width = 2048;
+       dev->mode_config.max_height = 2048;
+
+       psb_setup_outputs(dev);
+}
+
+void psb_modeset_cleanup(struct drm_device *dev)
+{
+       mutex_lock(&dev->struct_mutex);
+
+       drm_kms_helper_poll_fini(dev);
+       psb_fbdev_fini(dev);
+       drm_mode_config_cleanup(dev);
+
+       mutex_unlock(&dev->struct_mutex);
+}
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
new file mode 100644 (file)
index 0000000..989558a
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2008-2011, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *      Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _FRAMEBUFFER_H_
+#define _FRAMEBUFFER_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+
+#include "psb_drv.h"
+
+struct psb_framebuffer {
+       struct drm_framebuffer base;
+       struct address_space *addr_space;
+       struct fb_info *fbdev;
+       struct gtt_range *gtt;
+};
+
+struct psb_fbdev {
+       struct drm_fb_helper psb_fb_helper;
+       struct psb_framebuffer pfb;
+};
+
+#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
+
+extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
+
+#endif
+
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
new file mode 100644 (file)
index 0000000..9fbb868
--- /dev/null
@@ -0,0 +1,292 @@
+/*
+ *  psb GEM interface
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Alan Cox
+ *
+ * TODO:
+ *     -       we need to work out if the MMU is relevant (eg for
+ *             accelerated operations on a GEM object)
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+
+int psb_gem_init_object(struct drm_gem_object *obj)
+{
+       return -EINVAL;
+}
+
+void psb_gem_free_object(struct drm_gem_object *obj)
+{
+       struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
+       drm_gem_object_release_wrap(obj);
+       /* This must occur last as it frees up the memory of the GEM object */
+       psb_gtt_free_range(obj->dev, gtt);
+}
+
+int psb_gem_get_aperture(struct drm_device *dev, void *data,
+                               struct drm_file *file)
+{
+       return -EINVAL;
+}
+
+/**
+ *     psb_gem_dumb_map_gtt    -       buffer mapping for dumb interface
+ *     @file: our drm client file
+ *     @dev: drm device
+ *     @handle: GEM handle to the object (from dumb_create)
+ *
+ *     Do the necessary setup to allow the mapping of the frame buffer
+ *     into user memory. We don't have to do much here at the moment.
+ */
+int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
+                        uint32_t handle, uint64_t *offset)
+{
+       int ret = 0;
+       struct drm_gem_object *obj;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       mutex_lock(&dev->struct_mutex);
+
+       /* GEM does all our handle to object mapping */
+       obj = drm_gem_object_lookup(dev, file, handle);
+       if (obj == NULL) {
+               ret = -ENOENT;
+               goto unlock;
+       }
+       /* What validation is needed here ? */
+
+       /* Make it mmapable */
+       if (!obj->map_list.map) {
+               ret = gem_create_mmap_offset(obj);
+               if (ret)
+                       goto out;
+       }
+       /* GEM should really work out the hash offsets for us */
+       *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+out:
+       drm_gem_object_unreference(obj);
+unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+/**
+ *     psb_gem_create          -       create a mappable object
+ *     @file: the DRM file of the client
+ *     @dev: our device
+ *     @size: the size requested
+ *     @handlep: returned handle (opaque number)
+ *
+ *     Create a GEM object, fill in the boilerplate and attach a handle to
+ *     it so that userspace can speak about it. This does the core work
+ *     for the various methods that do/will create GEM objects for things
+ */
+static int psb_gem_create(struct drm_file *file,
+       struct drm_device *dev, uint64_t size, uint32_t *handlep)
+{
+       struct gtt_range *r;
+       int ret;
+       u32 handle;
+
+       size = roundup(size, PAGE_SIZE);
+
+       /* Allocate our object - for now a direct gtt range which is not
+          stolen memory backed */
+       r = psb_gtt_alloc_range(dev, size, "gem", 0);
+       if (r == NULL) {
+               dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
+               return -ENOSPC;
+       }
+       /* Initialize the extra goodies GEM needs to do all the hard work */
+       if (drm_gem_object_init(dev, &r->gem, size) != 0) {
+               psb_gtt_free_range(dev, r);
+               /* GEM doesn't give an error code so use -ENOMEM */
+               dev_err(dev->dev, "GEM init failed for %lld\n", size);
+               return -ENOMEM;
+       }
+       /* Give the object a handle so we can carry it more easily */
+       ret = drm_gem_handle_create(file, &r->gem, &handle);
+       if (ret) {
+               dev_err(dev->dev, "GEM handle failed for %p, %lld\n",
+                                                       &r->gem, size);
+               drm_gem_object_release(&r->gem);
+               psb_gtt_free_range(dev, r);
+               return ret;
+       }
+       /* We have the initial and handle reference but need only one now */
+       drm_gem_object_unreference(&r->gem);
+       *handlep = handle;
+       return 0;
+}
+
+/**
+ *     psb_gem_dumb_create     -       create a dumb buffer
+ *     @drm_file: our client file
+ *     @dev: our device
+ *     @args: the requested arguments copied from userspace
+ *
+ *     Allocate a buffer suitable for use for a frame buffer of the
+ *     form described by user space. Give userspace a handle by which
+ *     to reference it.
+ */
+int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+                       struct drm_mode_create_dumb *args)
+{
+       args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
+       args->size = args->pitch * args->height;
+       return psb_gem_create(file, dev, args->size, &args->handle);
+}
+
+/**
+ *     psb_gem_dumb_destroy    -       destroy a dumb buffer
+ *     @file: client file
+ *     @dev: our DRM device
+ *     @handle: the object handle
+ *
+ *     Destroy a handle that was created via psb_gem_dumb_create, at least
+ *     we hope it was created that way. i915 seems to assume the caller
+ *     does the checking but that might be worth review ! FIXME
+ */
+int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+                       uint32_t handle)
+{
+       /* No special work needed, drop the reference and see what falls out */
+       return drm_gem_handle_delete(file, handle);
+}
+
+/**
+ *     psb_gem_fault           -       pagefault handler for GEM objects
+ *     @vma: the VMA of the GEM object
+ *     @vmf: fault detail
+ *
+ *     Invoked when a fault occurs on an mmap of a GEM managed area. GEM
+ *     does most of the work for us including the actual map/unmap calls
+ *     but we need to do the actual page work.
+ *
+ *     This code eventually needs to handle faulting objects in and out
+ *     of the GTT and repacking it when we run out of space. We can put
+ *     that off for now and for our simple uses
+ *
+ *     The VMA was set up by GEM. In doing so it also ensured that the
+ *     vma->vm_private_data points to the GEM object that is backing this
+ *     mapping.
+ */
+int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_gem_object *obj;
+       struct gtt_range *r;
+       int ret;
+       unsigned long pfn;
+       pgoff_t page_offset;
+       struct drm_device *dev;
+       struct drm_psb_private *dev_priv;
+
+       obj = vma->vm_private_data;     /* GEM object */
+       dev = obj->dev;
+       dev_priv = dev->dev_private;
+
+       r = container_of(obj, struct gtt_range, gem);   /* Get the gtt range */
+
+       /* Make sure we don't parallel update on a fault, nor move or remove
+          something from beneath our feet */
+       mutex_lock(&dev->struct_mutex);
+
+       /* For now the mmap pins the object and it stays pinned. As things
+          stand that will do us no harm */
+       if (r->mmapping == 0) {
+               ret = psb_gtt_pin(r);
+               if (ret < 0) {
+                       dev_err(dev->dev, "gma500: pin failed: %d\n", ret);
+                       goto fail;
+               }
+               r->mmapping = 1;
+       }
+
+       /* Page relative to the VMA start - we must calculate this ourselves
+          because vmf->pgoff is the fake GEM offset */
+       page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
+                               >> PAGE_SHIFT;
+
+       /* CPU view of the page, don't go via the GART for CPU writes */
+       if (r->stolen)
+               pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
+       else
+               pfn = page_to_pfn(r->pages[page_offset]);
+       ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+
+fail:
+       mutex_unlock(&dev->struct_mutex);
+       switch (ret) {
+       case 0:
+       case -ERESTARTSYS:
+       case -EINTR:
+               return VM_FAULT_NOPAGE;
+       case -ENOMEM:
+               return VM_FAULT_OOM;
+       default:
+               return VM_FAULT_SIGBUS;
+       }
+}
+
+static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
+                                               int size, u32 *handle)
+{
+       struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
+       if (gtt == NULL)
+               return -ENOMEM;
+       if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0)
+               goto free_gtt;
+       if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
+               return 0;
+free_gtt:
+       psb_gtt_free_range(dev, gtt);
+       return -ENOMEM;
+}
+
+/*
+ *     GEM interfaces for our specific client
+ */
+int psb_gem_create_ioctl(struct drm_device *dev, void *data,
+                                       struct drm_file *file)
+{
+       struct drm_psb_gem_create *args = data;
+       int ret;
+       if (args->flags & GMA_GEM_CREATE_STOLEN) {
+               ret = psb_gem_create_stolen(file, dev, args->size,
+                                                       &args->handle);
+               if (ret == 0)
+                       return 0;
+               /* Fall throguh */
+               args->flags &= ~GMA_GEM_CREATE_STOLEN;
+       }
+       return psb_gem_create(file, dev, args->size, &args->handle);
+}
+
+int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
+                                       struct drm_file *file)
+{
+       struct drm_psb_gem_mmap *args = data;
+       return dev->driver->dumb_map_offset(file, dev,
+                                               args->handle, &args->offset);
+}
+
diff --git a/drivers/gpu/drm/gma500/gem_glue.c b/drivers/gpu/drm/gma500/gem_glue.c
new file mode 100644 (file)
index 0000000..daac121
--- /dev/null
@@ -0,0 +1,89 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+void drm_gem_object_release_wrap(struct drm_gem_object *obj)
+{
+       /* Remove the list map if one is present */
+       if (obj->map_list.map) {
+               struct drm_gem_mm *mm = obj->dev->mm_private;
+               struct drm_map_list *list = &obj->map_list;
+               drm_ht_remove_item(&mm->offset_hash, &list->hash);
+               drm_mm_put_block(list->file_offset_node);
+               kfree(list->map);
+               list->map = NULL;
+       }
+       drm_gem_object_release(obj);
+}
+
+/**
+ *     gem_create_mmap_offset          -       invent an mmap offset
+ *     @obj: our object
+ *
+ *     Standard implementation of offset generation for mmap as is
+ *     duplicated in several drivers. This belongs in GEM.
+ */
+int gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_gem_mm *mm = dev->mm_private;
+       struct drm_map_list *list;
+       struct drm_local_map *map;
+       int ret;
+
+       list = &obj->map_list;
+       list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
+       if (list->map == NULL)
+               return -ENOMEM;
+       map = list->map;
+       map->type = _DRM_GEM;
+       map->size = obj->size;
+       map->handle = obj;
+
+       list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+                                       obj->size / PAGE_SIZE, 0, 0);
+       if (!list->file_offset_node) {
+               dev_err(dev->dev, "failed to allocate offset for bo %d\n",
+                                                               obj->name);
+               ret = -ENOSPC;
+               goto free_it;
+       }
+       list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+                                       obj->size / PAGE_SIZE, 0);
+       if (!list->file_offset_node) {
+               ret = -ENOMEM;
+               goto free_it;
+       }
+       list->hash.key = list->file_offset_node->start;
+       ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+       if (ret) {
+               dev_err(dev->dev, "failed to add to map hash\n");
+               goto free_mm;
+       }
+       return 0;
+
+free_mm:
+       drm_mm_put_block(list->file_offset_node);
+free_it:
+       kfree(list->map);
+       list->map = NULL;
+       return ret;
+}
diff --git a/drivers/gpu/drm/gma500/gem_glue.h b/drivers/gpu/drm/gma500/gem_glue.h
new file mode 100644 (file)
index 0000000..ce5ce30
--- /dev/null
@@ -0,0 +1,2 @@
+extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
+extern int gem_create_mmap_offset(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
new file mode 100644 (file)
index 0000000..e770bd1
--- /dev/null
@@ -0,0 +1,553 @@
+/*
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
+ *         Alan Cox <alan@linux.intel.com>
+ */
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+
+
+/*
+ *     GTT resource allocator - manage page mappings in GTT space
+ */
+
+/**
+ *     psb_gtt_mask_pte        -       generate GTT pte entry
+ *     @pfn: page number to encode
+ *     @type: type of memory in the GTT
+ *
+ *     Set the GTT entry for the appropriate memory type.
+ */
+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
+{
+       uint32_t mask = PSB_PTE_VALID;
+
+       if (type & PSB_MMU_CACHED_MEMORY)
+               mask |= PSB_PTE_CACHED;
+       if (type & PSB_MMU_RO_MEMORY)
+               mask |= PSB_PTE_RO;
+       if (type & PSB_MMU_WO_MEMORY)
+               mask |= PSB_PTE_WO;
+
+       return (pfn << PAGE_SHIFT) | mask;
+}
+
+/**
+ *     psb_gtt_entry           -       find the GTT entries for a gtt_range
+ *     @dev: our DRM device
+ *     @r: our GTT range
+ *
+ *     Given a gtt_range object return the GTT offset of the page table
+ *     entries for this gtt_range
+ */
+u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long offset;
+
+       offset = r->resource.start - dev_priv->gtt_mem->start;
+
+       return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
+}
+
+/**
+ *     psb_gtt_insert  -       put an object into the GTT
+ *     @dev: our DRM device
+ *     @r: our GTT range
+ *
+ *     Take our preallocated GTT range and insert the GEM object into
+ *     the GTT. This is protected via the gtt mutex which the caller
+ *     must hold.
+ */
+static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
+{
+       u32 *gtt_slot, pte;
+       struct page **pages;
+       int i;
+
+       if (r->pages == NULL) {
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       WARN_ON(r->stolen);     /* refcount these maybe ? */
+
+       gtt_slot = psb_gtt_entry(dev, r);
+       pages = r->pages;
+
+       /* Make sure changes are visible to the GPU */
+       set_pages_array_uc(pages, r->npage);
+
+       /* Write our page entries into the GTT itself */
+       for (i = r->roll; i < r->npage; i++) {
+               pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+               iowrite32(pte, gtt_slot++);
+       }
+       for (i = 0; i < r->roll; i++) {
+               pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+               iowrite32(pte, gtt_slot++);
+       }
+       /* Make sure all the entries are set before we return */
+       ioread32(gtt_slot - 1);
+
+       return 0;
+}
+
+/**
+ *     psb_gtt_remove  -       remove an object from the GTT
+ *     @dev: our DRM device
+ *     @r: our GTT range
+ *
+ *     Remove a preallocated GTT range from the GTT. Overwrite all the
+ *     page table entries with the dummy page. This is protected via the gtt
+ *     mutex which the caller must hold.
+ */
+static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 *gtt_slot, pte;
+       int i;
+
+       WARN_ON(r->stolen);
+
+       gtt_slot = psb_gtt_entry(dev, r);
+       pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);
+
+       for (i = 0; i < r->npage; i++)
+               iowrite32(pte, gtt_slot++);
+       ioread32(gtt_slot - 1);
+       set_pages_array_wb(r->pages, r->npage);
+}
+
+/**
+ *     psb_gtt_roll    -       set scrolling position
+ *     @dev: our DRM device
+ *     @r: the gtt mapping we are using
+ *     @roll: roll offset
+ *
+ *     Roll an existing pinned mapping by moving the pages through the GTT.
+ *     This allows us to implement hardware scrolling on the consoles without
+ *     a 2D engine
+ */
+void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
+{
+       u32 *gtt_slot, pte;
+       int i;
+
+       if (roll >= r->npage) {
+               WARN_ON(1);
+               return;
+       }
+
+       r->roll = roll;
+
+       /* Not currently in the GTT - no worry we will write the mapping at
+          the right position when it gets pinned */
+       if (!r->stolen && !r->in_gart)
+               return;
+
+       gtt_slot = psb_gtt_entry(dev, r);
+
+       for (i = r->roll; i < r->npage; i++) {
+               pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+               iowrite32(pte, gtt_slot++);
+       }
+       for (i = 0; i < r->roll; i++) {
+               pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+               iowrite32(pte, gtt_slot++);
+       }
+       ioread32(gtt_slot - 1);
+}
+
+/**
+ *     psb_gtt_attach_pages    -       attach and pin GEM pages
+ *     @gt: the gtt range
+ *
+ *     Pin and build an in kernel list of the pages that back our GEM object.
+ *     While we hold this the pages cannot be swapped out. This is protected
+ *     via the gtt mutex which the caller must hold.
+ */
+static int psb_gtt_attach_pages(struct gtt_range *gt)
+{
+       struct inode *inode;
+       struct address_space *mapping;
+       int i;
+       struct page *p;
+       int pages = gt->gem.size / PAGE_SIZE;
+
+       WARN_ON(gt->pages);
+
+       /* This is the shared memory object that backs the GEM resource */
+       inode = gt->gem.filp->f_path.dentry->d_inode;
+       mapping = inode->i_mapping;
+
+       gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
+       if (gt->pages == NULL)
+               return -ENOMEM;
+       gt->npage = pages;
+
+       for (i = 0; i < pages; i++) {
+               /* FIXME: needs updating as per mail from Hugh Dickins */
+               p = read_cache_page_gfp(mapping, i,
+                                       __GFP_COLD | GFP_KERNEL);
+               if (IS_ERR(p))
+                       goto err;
+               gt->pages[i] = p;
+       }
+       return 0;
+
+err:
+       while (i--)
+               page_cache_release(gt->pages[i]);
+       kfree(gt->pages);
+       gt->pages = NULL;
+       return PTR_ERR(p);
+}
+
+/**
+ *     psb_gtt_detach_pages    -       attach and pin GEM pages
+ *     @gt: the gtt range
+ *
+ *     Undo the effect of psb_gtt_attach_pages. At this point the pages
+ *     must have been removed from the GTT as they could now be paged out
+ *     and move bus address. This is protected via the gtt mutex which the
+ *     caller must hold.
+ */
+static void psb_gtt_detach_pages(struct gtt_range *gt)
+{
+       int i;
+       for (i = 0; i < gt->npage; i++) {
+               /* FIXME: do we need to force dirty */
+               set_page_dirty(gt->pages[i]);
+               page_cache_release(gt->pages[i]);
+       }
+       kfree(gt->pages);
+       gt->pages = NULL;
+}
+
+/**
+ *     psb_gtt_pin             -       pin pages into the GTT
+ *     @gt: range to pin
+ *
+ *     Pin a set of pages into the GTT. The pins are refcounted so that
+ *     multiple pins need multiple unpins to undo.
+ *
+ *     Non GEM backed objects treat this as a no-op as they are always GTT
+ *     backed objects.
+ */
+int psb_gtt_pin(struct gtt_range *gt)
+{
+       int ret = 0;
+       struct drm_device *dev = gt->gem.dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       mutex_lock(&dev_priv->gtt_mutex);
+
+       if (gt->in_gart == 0 && gt->stolen == 0) {
+               ret = psb_gtt_attach_pages(gt);
+               if (ret < 0)
+                       goto out;
+               ret = psb_gtt_insert(dev, gt);
+               if (ret < 0) {
+                       psb_gtt_detach_pages(gt);
+                       goto out;
+               }
+       }
+       gt->in_gart++;
+out:
+       mutex_unlock(&dev_priv->gtt_mutex);
+       return ret;
+}
+
+/**
+ *     psb_gtt_unpin           -       Drop a GTT pin requirement
+ *     @gt: range to pin
+ *
+ *     Undoes the effect of psb_gtt_pin. On the last drop the GEM object
+ *     will be removed from the GTT which will also drop the page references
+ *     and allow the VM to clean up or page stuff.
+ *
+ *     Non GEM backed objects treat this as a no-op as they are always GTT
+ *     backed objects.
+ */
+void psb_gtt_unpin(struct gtt_range *gt)
+{
+       struct drm_device *dev = gt->gem.dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       mutex_lock(&dev_priv->gtt_mutex);
+
+       WARN_ON(!gt->in_gart);
+
+       gt->in_gart--;
+       if (gt->in_gart == 0 && gt->stolen == 0) {
+               psb_gtt_remove(dev, gt);
+               psb_gtt_detach_pages(gt);
+       }
+       mutex_unlock(&dev_priv->gtt_mutex);
+}
+
+/*
+ *     GTT resource allocator - allocate and manage GTT address space
+ */
+
+/**
+ *     psb_gtt_alloc_range     -       allocate GTT address space
+ *     @dev: Our DRM device
+ *     @len: length (bytes) of address space required
+ *     @name: resource name
+ *     @backed: resource should be backed by stolen pages
+ *
+ *     Ask the kernel core to find us a suitable range of addresses
+ *     to use for a GTT mapping.
+ *
+ *     Returns a gtt_range structure describing the object, or NULL on
+ *     error. On successful return the resource is both allocated and marked
+ *     as in use.
+ */
+struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
+                                               const char *name, int backed)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct gtt_range *gt;
+       struct resource *r = dev_priv->gtt_mem;
+       int ret;
+       unsigned long start, end;
+
+       if (backed) {
+               /* The start of the GTT is the stolen pages */
+               start = r->start;
+               end = r->start + dev_priv->gtt.stolen_size - 1;
+       } else {
+               /* The rest we will use for GEM backed objects */
+               start = r->start + dev_priv->gtt.stolen_size;
+               end = r->end;
+       }
+
+       gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
+       if (gt == NULL)
+               return NULL;
+       gt->resource.name = name;
+       gt->stolen = backed;
+       gt->in_gart = backed;
+       gt->roll = 0;
+       /* Ensure this is set for non GEM objects */
+       gt->gem.dev = dev;
+       ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
+                               len, start, end, PAGE_SIZE, NULL, NULL);
+       if (ret == 0) {
+               gt->offset = gt->resource.start - r->start;
+               return gt;
+       }
+       kfree(gt);
+       return NULL;
+}
+
+/**
+ *     psb_gtt_free_range      -       release GTT address space
+ *     @dev: our DRM device
+ *     @gt: a mapping created with psb_gtt_alloc_range
+ *
+ *     Release a resource that was allocated with psb_gtt_alloc_range. If the
+ *     object has been pinned by mmap users we clean this up here currently.
+ */
+void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
+{
+       /* Undo the mmap pin if we are destroying the object */
+       if (gt->mmapping) {
+               psb_gtt_unpin(gt);
+               gt->mmapping = 0;
+       }
+       WARN_ON(gt->in_gart && !gt->stolen);
+       release_resource(&gt->resource);
+       kfree(gt);
+}
+
+void psb_gtt_alloc(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       init_rwsem(&dev_priv->gtt.sem);
+}
+
+void psb_gtt_takedown(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (dev_priv->gtt_map) {
+               iounmap(dev_priv->gtt_map);
+               dev_priv->gtt_map = NULL;
+       }
+       if (dev_priv->gtt_initialized) {
+               pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+                                     dev_priv->gmch_ctrl);
+               PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
+               (void) PSB_RVDC32(PSB_PGETBL_CTL);
+       }
+       if (dev_priv->vram_addr)
+               iounmap(dev_priv->gtt_map);
+}
+
+int psb_gtt_init(struct drm_device *dev, int resume)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned gtt_pages;
+       unsigned long stolen_size, vram_stolen_size;
+       unsigned i, num_pages;
+       unsigned pfn_base;
+       uint32_t vram_pages;
+       uint32_t dvmt_mode = 0;
+       struct psb_gtt *pg;
+
+       int ret = 0;
+       uint32_t pte;
+
+       mutex_init(&dev_priv->gtt_mutex);
+
+       psb_gtt_alloc(dev);
+       pg = &dev_priv->gtt;
+
+       /* Enable the GTT */
+       pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
+       pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+                             dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
+
+       dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
+       PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+       (void) PSB_RVDC32(PSB_PGETBL_CTL);
+
+       /* The root resource we allocate address space from */
+       dev_priv->gtt_initialized = 1;
+
+       pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
+
+       /*
+        *      The video mmu has a hw bug when accessing 0x0D0000000.
+        *      Make gatt start at 0x0e000,0000. This doesn't actually
+        *      matter for us but may do if the video acceleration ever
+        *      gets opened up.
+        */
+       pg->mmu_gatt_start = 0xE0000000;
+
+       pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
+       gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
+                                                               >> PAGE_SHIFT;
+       /* Some CDV firmware doesn't report this currently. In which case the
+          system has 64 gtt pages */
+       if (pg->gtt_start == 0 || gtt_pages == 0) {
+               dev_err(dev->dev, "GTT PCI BAR not initialized.\n");
+               gtt_pages = 64;
+               pg->gtt_start = dev_priv->pge_ctl;
+       }
+
+       pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
+       pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
+                                                               >> PAGE_SHIFT;
+       dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
+
+       if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
+               static struct resource fudge;   /* Preferably peppermint */
+               /* This can occur on CDV SDV systems. Fudge it in this case.
+                  We really don't care what imaginary space is being allocated
+                  at this point */
+               dev_err(dev->dev, "GATT PCI BAR not initialized.\n");
+               pg->gatt_start = 0x40000000;
+               pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
+               /* This is a little confusing but in fact the GTT is providing
+                  a view from the GPU into memory and not vice versa. As such
+                  this is really allocating space that is not the same as the
+                  CPU address space on CDV */
+               fudge.start = 0x40000000;
+               fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
+               fudge.name = "fudge";
+               fudge.flags = IORESOURCE_MEM;
+               dev_priv->gtt_mem = &fudge;
+       }
+
+       pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
+       vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
+                                                               - PAGE_SIZE;
+
+       stolen_size = vram_stolen_size;
+
+       printk(KERN_INFO "Stolen memory information\n");
+       printk(KERN_INFO "       base in RAM: 0x%x\n", dev_priv->stolen_base);
+       printk(KERN_INFO "       size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
+               vram_stolen_size/1024);
+       dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
+       printk(KERN_INFO "      the correct size should be: %dM(dvmt mode=%d)\n",
+               (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
+
+       if (resume && (gtt_pages != pg->gtt_pages) &&
+           (stolen_size != pg->stolen_size)) {
+               dev_err(dev->dev, "GTT resume error.\n");
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+       pg->gtt_pages = gtt_pages;
+       pg->stolen_size = stolen_size;
+       dev_priv->vram_stolen_size = vram_stolen_size;
+
+       /*
+        *      Map the GTT and the stolen memory area
+        */
+       dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
+                                               gtt_pages << PAGE_SHIFT);
+       if (!dev_priv->gtt_map) {
+               dev_err(dev->dev, "Failure to map gtt.\n");
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
+       if (!dev_priv->vram_addr) {
+               dev_err(dev->dev, "Failure to map stolen base.\n");
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       /*
+        * Insert vram stolen pages into the GTT
+        */
+
+       pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
+       vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
+       printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
+               num_pages, pfn_base << PAGE_SHIFT, 0);
+       for (i = 0; i < num_pages; ++i) {
+               pte = psb_gtt_mask_pte(pfn_base + i, 0);
+               iowrite32(pte, dev_priv->gtt_map + i);
+       }
+
+       /*
+        * Init rest of GTT to the scratch page to avoid accidents or scribbles
+        */
+
+       pfn_base = page_to_pfn(dev_priv->scratch_page);
+       pte = psb_gtt_mask_pte(pfn_base, 0);
+       for (; i < gtt_pages; ++i)
+               iowrite32(pte, dev_priv->gtt_map + i);
+
+       (void) ioread32(dev_priv->gtt_map + i - 1);
+       return 0;
+
+out_err:
+       psb_gtt_takedown(dev);
+       return ret;
+}
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
new file mode 100644 (file)
index 0000000..aa17423
--- /dev/null
@@ -0,0 +1,64 @@
+/**************************************************************************
+ * Copyright (c) 2007-2008, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_GTT_H_
+#define _PSB_GTT_H_
+
+#include <drm/drmP.h>
+
+/* This wants cleaning up with respect to the psb_dev and un-needed stuff */
+struct psb_gtt {
+       uint32_t gatt_start;
+       uint32_t mmu_gatt_start;
+       uint32_t gtt_start;
+       uint32_t gtt_phys_start;
+       unsigned gtt_pages;
+       unsigned gatt_pages;
+       unsigned long stolen_size;
+       unsigned long vram_stolen_size;
+       struct rw_semaphore sem;
+};
+
+/* Exported functions */
+extern int psb_gtt_init(struct drm_device *dev, int resume);
+extern void psb_gtt_takedown(struct drm_device *dev);
+
+/* Each gtt_range describes an allocation in the GTT area */
+struct gtt_range {
+       struct resource resource;       /* Resource for our allocation */
+       u32 offset;                     /* GTT offset of our object */
+       struct drm_gem_object gem;      /* GEM high level stuff */
+       int in_gart;                    /* Currently in the GART (ref ct) */
+       bool stolen;                    /* Backed from stolen RAM */
+       bool mmapping;                  /* Is mmappable */
+       struct page **pages;            /* Backing pages if present */
+       int npage;                      /* Number of backing pages */
+       int roll;                       /* Roll applied to the GTT entries */
+};
+
+extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
+                                               const char *name, int backed);
+extern void psb_gtt_kref_put(struct gtt_range *gt);
+extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt);
+extern int psb_gtt_pin(struct gtt_range *gt);
+extern void psb_gtt_unpin(struct gtt_range *gt);
+extern void psb_gtt_roll(struct drm_device *dev,
+                                       struct gtt_range *gt, int roll);
+
+#endif
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
new file mode 100644 (file)
index 0000000..d4d0c5b
--- /dev/null
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2006 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+
+
+static void *find_section(struct bdb_header *bdb, int section_id)
+{
+       u8 *base = (u8 *)bdb;
+       int index = 0;
+       u16 total, current_size;
+       u8 current_id;
+
+       /* skip to first section */
+       index += bdb->header_size;
+       total = bdb->bdb_size;
+
+       /* walk the sections looking for section_id */
+       while (index < total) {
+               current_id = *(base + index);
+               index++;
+               current_size = *((u16 *)(base + index));
+               index += 2;
+               if (current_id == section_id)
+                       return base + index;
+               index += current_size;
+       }
+
+       return NULL;
+}
+
+static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
+                       struct lvds_dvo_timing *dvo_timing)
+{
+       panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
+               dvo_timing->hactive_lo;
+       panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
+               ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
+       panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
+               dvo_timing->hsync_pulse_width;
+       panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
+               ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+
+       panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
+               dvo_timing->vactive_lo;
+       panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
+               dvo_timing->vsync_off;
+       panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
+               dvo_timing->vsync_pulse_width;
+       panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
+               ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
+       panel_fixed_mode->clock = dvo_timing->clock * 10;
+       panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+
+       /* Some VBTs have bogus h/vtotal values */
+       if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
+               panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
+       if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
+               panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
+
+       drm_mode_set_name(panel_fixed_mode);
+}
+
+static void parse_backlight_data(struct drm_psb_private *dev_priv,
+                               struct bdb_header *bdb)
+{
+       struct bdb_lvds_backlight *vbt_lvds_bl = NULL;
+       struct bdb_lvds_backlight *lvds_bl;
+       u8 p_type = 0;
+       void *bl_start = NULL;
+       struct bdb_lvds_options *lvds_opts
+                               = find_section(bdb, BDB_LVDS_OPTIONS);
+
+       dev_priv->lvds_bl = NULL;
+
+       if (lvds_opts)
+               p_type = lvds_opts->panel_type;
+       else
+               return;
+
+       bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
+       vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
+
+       lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL);
+       if (!lvds_bl) {
+               dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
+               return;
+       }
+       memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
+       dev_priv->lvds_bl = lvds_bl;
+}
+
+/* Try to find integrated panel data */
+static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
+                           struct bdb_header *bdb)
+{
+       struct bdb_lvds_options *lvds_options;
+       struct bdb_lvds_lfp_data *lvds_lfp_data;
+       struct bdb_lvds_lfp_data_entry *entry;
+       struct lvds_dvo_timing *dvo_timing;
+       struct drm_display_mode *panel_fixed_mode;
+
+       /* Defaults if we can't find VBT info */
+       dev_priv->lvds_dither = 0;
+       dev_priv->lvds_vbt = 0;
+
+       lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+       if (!lvds_options)
+               return;
+
+       dev_priv->lvds_dither = lvds_options->pixel_dither;
+       if (lvds_options->panel_type == 0xff)
+               return;
+
+       lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+       if (!lvds_lfp_data)
+               return;
+
+
+       entry = &lvds_lfp_data->data[lvds_options->panel_type];
+       dvo_timing = &entry->dvo_timing;
+
+       panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
+                                     GFP_KERNEL);
+       if (panel_fixed_mode == NULL) {
+               dev_err(dev_priv->dev->dev, "out of memory for fixed panel mode\n");
+               return;
+       }
+
+       dev_priv->lvds_vbt = 1;
+       fill_detail_timing_data(panel_fixed_mode, dvo_timing);
+
+       if (panel_fixed_mode->htotal > 0 && panel_fixed_mode->vtotal > 0) {
+               dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
+               drm_mode_debug_printmodeline(panel_fixed_mode);
+       } else {
+               dev_dbg(dev_priv->dev->dev, "ignoring invalid LVDS VBT\n");
+               dev_priv->lvds_vbt = 0;
+               kfree(panel_fixed_mode);
+       }
+       return;
+}
+
+/* Try to find sdvo panel data */
+static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv,
+                     struct bdb_header *bdb)
+{
+       struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+       struct lvds_dvo_timing *dvo_timing;
+       struct drm_display_mode *panel_fixed_mode;
+
+       dev_priv->sdvo_lvds_vbt_mode = NULL;
+
+       sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+       if (!sdvo_lvds_options)
+               return;
+
+       dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
+       if (!dvo_timing)
+               return;
+
+       panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+
+       if (!panel_fixed_mode)
+               return;
+
+       fill_detail_timing_data(panel_fixed_mode,
+                       dvo_timing + sdvo_lvds_options->panel_type);
+
+       dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
+
+       return;
+}
+
+static void parse_general_features(struct drm_psb_private *dev_priv,
+                      struct bdb_header *bdb)
+{
+       struct bdb_general_features *general;
+
+       /* Set sensible defaults in case we can't find the general block */
+       dev_priv->int_tv_support = 1;
+       dev_priv->int_crt_support = 1;
+
+       general = find_section(bdb, BDB_GENERAL_FEATURES);
+       if (general) {
+               dev_priv->int_tv_support = general->int_tv_support;
+               dev_priv->int_crt_support = general->int_crt_support;
+               dev_priv->lvds_use_ssc = general->enable_ssc;
+
+               if (dev_priv->lvds_use_ssc) {
+                       dev_priv->lvds_ssc_freq
+                               = general->ssc_freq ? 100 : 96;
+               }
+       }
+}
+
+/**
+ * psb_intel_init_bios - initialize VBIOS settings & find VBT
+ * @dev: DRM device
+ *
+ * Loads the Video BIOS and checks that the VBT exists.  Sets scratch registers
+ * to appropriate values.
+ *
+ * VBT existence is a sanity check that is relied on by other i830_bios.c code.
+ * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
+ * feed an updated VBT back through that, compared to what we'll fetch using
+ * this method of groping around in the BIOS data.
+ *
+ * Returns 0 on success, nonzero on failure.
+ */
+bool psb_intel_init_bios(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct pci_dev *pdev = dev->pdev;
+       struct vbt_header *vbt = NULL;
+       struct bdb_header *bdb;
+       u8 __iomem *bios;
+       size_t size;
+       int i;
+
+       bios = pci_map_rom(pdev, &size);
+       if (!bios)
+               return -1;
+
+       /* Scour memory looking for the VBT signature */
+       for (i = 0; i + 4 < size; i++) {
+               if (!memcmp(bios + i, "$VBT", 4)) {
+                       vbt = (struct vbt_header *)(bios + i);
+                       break;
+               }
+       }
+
+       if (!vbt) {
+               dev_err(dev->dev, "VBT signature missing\n");
+               pci_unmap_rom(pdev, bios);
+               return -1;
+       }
+
+       bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+
+       /* Grab useful general definitions */
+       parse_general_features(dev_priv, bdb);
+       parse_lfp_panel_data(dev_priv, bdb);
+       parse_sdvo_panel_data(dev_priv, bdb);
+       parse_backlight_data(dev_priv, bdb);
+
+       pci_unmap_rom(pdev, bios);
+
+       return 0;
+}
+
+/**
+ * Destroy and free VBT data
+ */
+void psb_intel_destroy_bios(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_display_mode *sdvo_lvds_vbt_mode =
+                               dev_priv->sdvo_lvds_vbt_mode;
+       struct drm_display_mode *lfp_lvds_vbt_mode =
+                               dev_priv->lfp_lvds_vbt_mode;
+       struct bdb_lvds_backlight *lvds_bl =
+                               dev_priv->lvds_bl;
+
+       /*free sdvo panel mode*/
+       if (sdvo_lvds_vbt_mode) {
+               dev_priv->sdvo_lvds_vbt_mode = NULL;
+               kfree(sdvo_lvds_vbt_mode);
+       }
+
+       if (lfp_lvds_vbt_mode) {
+               dev_priv->lfp_lvds_vbt_mode = NULL;
+               kfree(lfp_lvds_vbt_mode);
+       }
+
+       if (lvds_bl) {
+               dev_priv->lvds_bl = NULL;
+               kfree(lvds_bl);
+       }
+}
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
new file mode 100644 (file)
index 0000000..70f1bf0
--- /dev/null
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2006 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _I830_BIOS_H_
+#define _I830_BIOS_H_
+
+#include <drm/drmP.h>
+
+struct vbt_header {
+       u8 signature[20];               /**< Always starts with 'VBT$' */
+       u16 version;                    /**< decimal */
+       u16 header_size;                /**< in bytes */
+       u16 vbt_size;                   /**< in bytes */
+       u8 vbt_checksum;
+       u8 reserved0;
+       u32 bdb_offset;                 /**< from beginning of VBT */
+       u32 aim_offset[4];              /**< from beginning of VBT */
+} __attribute__((packed));
+
+
+struct bdb_header {
+       u8 signature[16];               /**< Always 'BIOS_DATA_BLOCK' */
+       u16 version;                    /**< decimal */
+       u16 header_size;                /**< in bytes */
+       u16 bdb_size;                   /**< in bytes */
+};
+
+/* strictly speaking, this is a "skip" block, but it has interesting info */
+struct vbios_data {
+       u8 type; /* 0 == desktop, 1 == mobile */
+       u8 relstage;
+       u8 chipset;
+       u8 lvds_present:1;
+       u8 tv_present:1;
+       u8 rsvd2:6; /* finish byte */
+       u8 rsvd3[4];
+       u8 signon[155];
+       u8 copyright[61];
+       u16 code_segment;
+       u8 dos_boot_mode;
+       u8 bandwidth_percent;
+       u8 rsvd4; /* popup memory size */
+       u8 resize_pci_bios;
+       u8 rsvd5; /* is crt already on ddc2 */
+} __attribute__((packed));
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+#define BDB_GENERAL_FEATURES     1
+#define BDB_GENERAL_DEFINITIONS          2
+#define BDB_OLD_TOGGLE_LIST      3
+#define BDB_MODE_SUPPORT_LIST    4
+#define BDB_GENERIC_MODE_TABLE   5
+#define BDB_EXT_MMIO_REGS        6
+#define BDB_SWF_IO               7
+#define BDB_SWF_MMIO             8
+#define BDB_DOT_CLOCK_TABLE      9
+#define BDB_MODE_REMOVAL_TABLE  10
+#define BDB_CHILD_DEVICE_TABLE  11
+#define BDB_DRIVER_FEATURES     12
+#define BDB_DRIVER_PERSISTENCE  13
+#define BDB_EXT_TABLE_PTRS      14
+#define BDB_DOT_CLOCK_OVERRIDE  15
+#define BDB_DISPLAY_SELECT      16
+/* 17 rsvd */
+#define BDB_DRIVER_ROTATION     18
+#define BDB_DISPLAY_REMOVE      19
+#define BDB_OEM_CUSTOM          20
+#define BDB_EFP_LIST            21 /* workarounds for VGA hsync/vsync */
+#define BDB_SDVO_LVDS_OPTIONS   22
+#define BDB_SDVO_PANEL_DTDS     23
+#define BDB_SDVO_LVDS_PNP_IDS   24
+#define BDB_SDVO_LVDS_POWER_SEQ         25
+#define BDB_TV_OPTIONS          26
+#define BDB_LVDS_OPTIONS        40
+#define BDB_LVDS_LFP_DATA_PTRS  41
+#define BDB_LVDS_LFP_DATA       42
+#define BDB_LVDS_BACKLIGHT      43
+#define BDB_LVDS_POWER          44
+#define BDB_SKIP               254 /* VBIOS private block, ignore */
+
+struct bdb_general_features {
+       /* bits 1 */
+       u8 panel_fitting:2;
+       u8 flexaim:1;
+       u8 msg_enable:1;
+       u8 clear_screen:3;
+       u8 color_flip:1;
+
+       /* bits 2 */
+       u8 download_ext_vbt:1;
+       u8 enable_ssc:1;
+       u8 ssc_freq:1;
+       u8 enable_lfp_on_override:1;
+       u8 disable_ssc_ddt:1;
+       u8 rsvd8:3; /* finish byte */
+
+       /* bits 3 */
+       u8 disable_smooth_vision:1;
+       u8 single_dvi:1;
+       u8 rsvd9:6; /* finish byte */
+
+       /* bits 4 */
+       u8 legacy_monitor_detect;
+
+       /* bits 5 */
+       u8 int_crt_support:1;
+       u8 int_tv_support:1;
+       u8 rsvd11:6; /* finish byte */
+} __attribute__((packed));
+
+struct bdb_general_definitions {
+       /* DDC GPIO */
+       u8 crt_ddc_gmbus_pin;
+
+       /* DPMS bits */
+       u8 dpms_acpi:1;
+       u8 skip_boot_crt_detect:1;
+       u8 dpms_aim:1;
+       u8 rsvd1:5; /* finish byte */
+
+       /* boot device bits */
+       u8 boot_display[2];
+       u8 child_dev_size;
+
+       /* device info */
+       u8 tv_or_lvds_info[33];
+       u8 dev1[33];
+       u8 dev2[33];
+       u8 dev3[33];
+       u8 dev4[33];
+       /* may be another device block here on some platforms */
+};
+
+struct bdb_lvds_options {
+       u8 panel_type;
+       u8 rsvd1;
+       /* LVDS capabilities, stored in a dword */
+       u8 pfit_mode:2;
+       u8 pfit_text_mode_enhanced:1;
+       u8 pfit_gfx_mode_enhanced:1;
+       u8 pfit_ratio_auto:1;
+       u8 pixel_dither:1;
+       u8 lvds_edid:1;
+       u8 rsvd2:1;
+       u8 rsvd4;
+} __attribute__((packed));
+
+struct bdb_lvds_backlight {
+       u8 type:2;
+       u8 pol:1;
+       u8 gpio:3;
+       u8 gmbus:2;
+       u16 freq;
+       u8 minbrightness;
+       u8 i2caddr;
+       u8 brightnesscmd;
+       /*FIXME: more...*/
+} __attribute__((packed));
+
+/* LFP pointer table contains entries to the struct below */
+struct bdb_lvds_lfp_data_ptr {
+       u16 fp_timing_offset; /* offsets are from start of bdb */
+       u8 fp_table_size;
+       u16 dvo_timing_offset;
+       u8 dvo_table_size;
+       u16 panel_pnp_id_offset;
+       u8 pnp_table_size;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_ptrs {
+       u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+       struct bdb_lvds_lfp_data_ptr ptr[16];
+} __attribute__((packed));
+
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+       u16 x_res;
+       u16 y_res;
+       u32 lvds_reg;
+       u32 lvds_reg_val;
+       u32 pp_on_reg;
+       u32 pp_on_reg_val;
+       u32 pp_off_reg;
+       u32 pp_off_reg_val;
+       u32 pp_cycle_reg;
+       u32 pp_cycle_reg_val;
+       u32 pfit_reg;
+       u32 pfit_reg_val;
+       u16 terminator;
+} __attribute__((packed));
+
+struct lvds_dvo_timing {
+       u16 clock;              /**< In 10khz */
+       u8 hactive_lo;
+       u8 hblank_lo;
+       u8 hblank_hi:4;
+       u8 hactive_hi:4;
+       u8 vactive_lo;
+       u8 vblank_lo;
+       u8 vblank_hi:4;
+       u8 vactive_hi:4;
+       u8 hsync_off_lo;
+       u8 hsync_pulse_width;
+       u8 vsync_pulse_width:4;
+       u8 vsync_off:4;
+       u8 rsvd0:6;
+       u8 hsync_off_hi:2;
+       u8 h_image;
+       u8 v_image;
+       u8 max_hv;
+       u8 h_border;
+       u8 v_border;
+       u8 rsvd1:3;
+       u8 digital:2;
+       u8 vsync_positive:1;
+       u8 hsync_positive:1;
+       u8 rsvd2:1;
+} __attribute__((packed));
+
+struct lvds_pnp_id {
+       u16 mfg_name;
+       u16 product_code;
+       u32 serial;
+       u8 mfg_week;
+       u8 mfg_year;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_entry {
+       struct lvds_fp_timing fp_timing;
+       struct lvds_dvo_timing dvo_timing;
+       struct lvds_pnp_id pnp_id;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data {
+       struct bdb_lvds_lfp_data_entry data[16];
+} __attribute__((packed));
+
+struct aimdb_header {
+       char signature[16];
+       char oem_device[20];
+       u16 aimdb_version;
+       u16 aimdb_header_size;
+       u16 aimdb_size;
+} __attribute__((packed));
+
+struct aimdb_block {
+       u8 aimdb_id;
+       u16 aimdb_size;
+} __attribute__((packed));
+
+struct vch_panel_data {
+       u16 fp_timing_offset;
+       u8 fp_timing_size;
+       u16 dvo_timing_offset;
+       u8 dvo_timing_size;
+       u16 text_fitting_offset;
+       u8 text_fitting_size;
+       u16 graphics_fitting_offset;
+       u8 graphics_fitting_size;
+} __attribute__((packed));
+
+struct vch_bdb_22 {
+       struct aimdb_block aimdb_block;
+       struct vch_panel_data panels[16];
+} __attribute__((packed));
+
+struct bdb_sdvo_lvds_options {
+       u8 panel_backlight;
+       u8 h40_set_panel_type;
+       u8 panel_type;
+       u8 ssc_clk_freq;
+       u16 als_low_trip;
+       u16 als_high_trip;
+       u8 sclalarcoeff_tab_row_num;
+       u8 sclalarcoeff_tab_row_size;
+       u8 coefficient[8];
+       u8 panel_misc_bits_1;
+       u8 panel_misc_bits_2;
+       u8 panel_misc_bits_3;
+       u8 panel_misc_bits_4;
+} __attribute__((packed));
+
+
+extern bool psb_intel_init_bios(struct drm_device *dev);
+extern void psb_intel_destroy_bios(struct drm_device *dev);
+
+/*
+ * Driver<->VBIOS interaction occurs through scratch bits in
+ * GR18 & SWF*.
+ */
+
+/* GR18 bits are set on display switch and hotkey events */
+#define GR18_DRIVER_SWITCH_EN  (1<<7) /* 0: VBIOS control, 1: driver control */
+#define GR18_HOTKEY_MASK       0x78 /* See also SWF4 15:0 */
+#define   GR18_HK_NONE         (0x0<<3)
+#define   GR18_HK_LFP_STRETCH  (0x1<<3)
+#define   GR18_HK_TOGGLE_DISP  (0x2<<3)
+#define   GR18_HK_DISP_SWITCH  (0x4<<3) /* see SWF14 15:0 for what to enable */
+#define   GR18_HK_POPUP_DISABLED (0x6<<3)
+#define   GR18_HK_POPUP_ENABLED        (0x7<<3)
+#define   GR18_HK_PFIT         (0x8<<3)
+#define   GR18_HK_APM_CHANGE   (0xa<<3)
+#define   GR18_HK_MULTIPLE     (0xc<<3)
+#define GR18_USER_INT_EN       (1<<2)
+#define GR18_A0000_FLUSH_EN    (1<<1)
+#define GR18_SMM_EN            (1<<0)
+
+/* Set by driver, cleared by VBIOS */
+#define SWF00_YRES_SHIFT       16
+#define SWF00_XRES_SHIFT       0
+#define SWF00_RES_MASK         0xffff
+
+/* Set by VBIOS at boot time and driver at runtime */
+#define SWF01_TV2_FORMAT_SHIFT 8
+#define SWF01_TV1_FORMAT_SHIFT 0
+#define SWF01_TV_FORMAT_MASK   0xffff
+
+#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
+#define SWF10_GTT_OVERRIDE_EN  (1<<28)
+#define SWF10_LFP_DPMS_OVR     (1<<27) /* override DPMS on display switch */
+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
+#define   SWF10_OLD_TOGGLE     0x0
+#define   SWF10_TOGGLE_LIST_1  0x1
+#define   SWF10_TOGGLE_LIST_2  0x2
+#define   SWF10_TOGGLE_LIST_3  0x3
+#define   SWF10_TOGGLE_LIST_4  0x4
+#define SWF10_PANNING_EN       (1<<23)
+#define SWF10_DRIVER_LOADED    (1<<22)
+#define SWF10_EXTENDED_DESKTOP (1<<21)
+#define SWF10_EXCLUSIVE_MODE   (1<<20)
+#define SWF10_OVERLAY_EN       (1<<19)
+#define SWF10_PLANEB_HOLDOFF   (1<<18)
+#define SWF10_PLANEA_HOLDOFF   (1<<17)
+#define SWF10_VGA_HOLDOFF      (1<<16)
+#define SWF10_ACTIVE_DISP_MASK 0xffff
+#define   SWF10_PIPEB_LFP2     (1<<15)
+#define   SWF10_PIPEB_EFP2     (1<<14)
+#define   SWF10_PIPEB_TV2      (1<<13)
+#define   SWF10_PIPEB_CRT2     (1<<12)
+#define   SWF10_PIPEB_LFP      (1<<11)
+#define   SWF10_PIPEB_EFP      (1<<10)
+#define   SWF10_PIPEB_TV       (1<<9)
+#define   SWF10_PIPEB_CRT      (1<<8)
+#define   SWF10_PIPEA_LFP2     (1<<7)
+#define   SWF10_PIPEA_EFP2     (1<<6)
+#define   SWF10_PIPEA_TV2      (1<<5)
+#define   SWF10_PIPEA_CRT2     (1<<4)
+#define   SWF10_PIPEA_LFP      (1<<3)
+#define   SWF10_PIPEA_EFP      (1<<2)
+#define   SWF10_PIPEA_TV       (1<<1)
+#define   SWF10_PIPEA_CRT      (1<<0)
+
+#define SWF11_MEMORY_SIZE_SHIFT        16
+#define SWF11_SV_TEST_EN       (1<<15)
+#define SWF11_IS_AGP           (1<<14)
+#define SWF11_DISPLAY_HOLDOFF  (1<<13)
+#define SWF11_DPMS_REDUCED     (1<<12)
+#define SWF11_IS_VBE_MODE      (1<<11)
+#define SWF11_PIPEB_ACCESS     (1<<10) /* 0 here means pipe a */
+#define SWF11_DPMS_MASK                0x07
+#define   SWF11_DPMS_OFF       (1<<2)
+#define   SWF11_DPMS_SUSPEND   (1<<1)
+#define   SWF11_DPMS_STANDBY   (1<<0)
+#define   SWF11_DPMS_ON                0
+
+#define SWF14_GFX_PFIT_EN      (1<<31)
+#define SWF14_TEXT_PFIT_EN     (1<<30)
+#define SWF14_LID_STATUS_CLOSED        (1<<29) /* 0 here means open */
+#define SWF14_POPUP_EN         (1<<28)
+#define SWF14_DISPLAY_HOLDOFF  (1<<27)
+#define SWF14_DISP_DETECT_EN   (1<<26)
+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
+#define SWF14_DRIVER_STATUS    (1<<24)
+#define SWF14_OS_TYPE_WIN9X    (1<<23)
+#define SWF14_OS_TYPE_WINNT    (1<<22)
+/* 21:19 rsvd */
+#define SWF14_PM_TYPE_MASK     0x00070000
+#define   SWF14_PM_ACPI_VIDEO  (0x4 << 16)
+#define   SWF14_PM_ACPI                (0x3 << 16)
+#define   SWF14_PM_APM_12      (0x2 << 16)
+#define   SWF14_PM_APM_11      (0x1 << 16)
+#define SWF14_HK_REQUEST_MASK  0x0000ffff /* see GR18 6:3 for event type */
+         /* if GR18 indicates a display switch */
+#define   SWF14_DS_PIPEB_LFP2_EN (1<<15)
+#define   SWF14_DS_PIPEB_EFP2_EN (1<<14)
+#define   SWF14_DS_PIPEB_TV2_EN  (1<<13)
+#define   SWF14_DS_PIPEB_CRT2_EN (1<<12)
+#define   SWF14_DS_PIPEB_LFP_EN  (1<<11)
+#define   SWF14_DS_PIPEB_EFP_EN  (1<<10)
+#define   SWF14_DS_PIPEB_TV_EN  (1<<9)
+#define   SWF14_DS_PIPEB_CRT_EN  (1<<8)
+#define   SWF14_DS_PIPEA_LFP2_EN (1<<7)
+#define   SWF14_DS_PIPEA_EFP2_EN (1<<6)
+#define   SWF14_DS_PIPEA_TV2_EN  (1<<5)
+#define   SWF14_DS_PIPEA_CRT2_EN (1<<4)
+#define   SWF14_DS_PIPEA_LFP_EN  (1<<3)
+#define   SWF14_DS_PIPEA_EFP_EN  (1<<2)
+#define   SWF14_DS_PIPEA_TV_EN  (1<<1)
+#define   SWF14_DS_PIPEA_CRT_EN  (1<<0)
+         /* if GR18 indicates a panel fitting request */
+#define   SWF14_PFIT_EN                (1<<0) /* 0 means disable */
+         /* if GR18 indicates an APM change request */
+#define   SWF14_APM_HIBERNATE  0x4
+#define   SWF14_APM_SUSPEND    0x3
+#define   SWF14_APM_STANDBY    0x1
+#define   SWF14_APM_RESTORE    0x0
+
+#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
new file mode 100644 (file)
index 0000000..147584a
--- /dev/null
@@ -0,0 +1,493 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright Â© 2006-2008,2010 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ *     Chris Wilson <chris@chris-wilson.co.uk>
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm.h"
+#include "psb_intel_drv.h"
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+#define _wait_for(COND, MS, W) ({ \
+       unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);       \
+       int ret__ = 0;                                                  \
+       while (! (COND)) {                                              \
+               if (time_after(jiffies, timeout__)) {                   \
+                       ret__ = -ETIMEDOUT;                             \
+                       break;                                          \
+               }                                                       \
+               if (W && !(in_atomic() || in_dbg_master())) msleep(W);  \
+       }                                                               \
+       ret__;                                                          \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 20
+
+static inline struct intel_gmbus *
+to_intel_gmbus(struct i2c_adapter *i2c)
+{
+       return container_of(i2c, struct intel_gmbus, adapter);
+}
+
+struct intel_gpio {
+       struct i2c_adapter adapter;
+       struct i2c_algo_bit_data algo;
+       struct drm_psb_private *dev_priv;
+       u32 reg;
+};
+
+void
+gma_intel_i2c_reset(struct drm_device *dev)
+{
+       REG_WRITE(GMBUS0, 0);
+}
+
+static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
+{
+       /* When using bit bashing for I2C, this bit needs to be set to 1 */
+       /* FIXME: We are never Pineview, right?
+
+       u32 val;
+
+       if (!IS_PINEVIEW(dev_priv->dev))
+               return;
+
+       val = REG_READ(DSPCLK_GATE_D);
+       if (enable)
+               val |= DPCUNIT_CLOCK_GATE_DISABLE;
+       else
+               val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+       REG_WRITE(DSPCLK_GATE_D, val);
+
+       return;
+       */
+}
+
+static u32 get_reserved(struct intel_gpio *gpio)
+{
+       struct drm_psb_private *dev_priv = gpio->dev_priv;
+       struct drm_device *dev = dev_priv->dev;
+       u32 reserved = 0;
+
+       /* On most chips, these bits must be preserved in software. */
+       reserved = REG_READ(gpio->reg) &
+                                    (GPIO_DATA_PULLUP_DISABLE |
+                                     GPIO_CLOCK_PULLUP_DISABLE);
+
+       return reserved;
+}
+
+static int get_clock(void *data)
+{
+       struct intel_gpio *gpio = data;
+       struct drm_psb_private *dev_priv = gpio->dev_priv;
+       struct drm_device *dev = dev_priv->dev;
+       u32 reserved = get_reserved(gpio);
+       REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+       REG_WRITE(gpio->reg, reserved);
+       return (REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int get_data(void *data)
+{
+       struct intel_gpio *gpio = data;
+       struct drm_psb_private *dev_priv = gpio->dev_priv;
+       struct drm_device *dev = dev_priv->dev;
+       u32 reserved = get_reserved(gpio);
+       REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+       REG_WRITE(gpio->reg, reserved);
+       return (REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+}
+
+static void set_clock(void *data, int state_high)
+{
+       struct intel_gpio *gpio = data;
+       struct drm_psb_private *dev_priv = gpio->dev_priv;
+       struct drm_device *dev = dev_priv->dev;
+       u32 reserved = get_reserved(gpio);
+       u32 clock_bits;
+
+       if (state_high)
+               clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+       else
+               clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+                       GPIO_CLOCK_VAL_MASK;
+
+       REG_WRITE(gpio->reg, reserved | clock_bits);
+       REG_READ(gpio->reg); /* Posting */
+}
+
+static void set_data(void *data, int state_high)
+{
+       struct intel_gpio *gpio = data;
+       struct drm_psb_private *dev_priv = gpio->dev_priv;
+       struct drm_device *dev = dev_priv->dev;
+       u32 reserved = get_reserved(gpio);
+       u32 data_bits;
+
+       if (state_high)
+               data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+       else
+               data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+                       GPIO_DATA_VAL_MASK;
+
+       REG_WRITE(gpio->reg, reserved | data_bits);
+       REG_READ(gpio->reg);
+}
+
+static struct i2c_adapter *
+intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin)
+{
+       static const int map_pin_to_reg[] = {
+               0,
+               GPIOB,
+               GPIOA,
+               GPIOC,
+               GPIOD,
+               GPIOE,
+               0,
+               GPIOF,
+       };
+       struct intel_gpio *gpio;
+
+       if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
+               return NULL;
+
+       gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
+       if (gpio == NULL)
+               return NULL;
+
+       gpio->reg = map_pin_to_reg[pin];
+       gpio->dev_priv = dev_priv;
+
+       snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
+                "gma500 GPIO%c", "?BACDE?F"[pin]);
+       gpio->adapter.owner = THIS_MODULE;
+       gpio->adapter.algo_data = &gpio->algo;
+       gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+       gpio->algo.setsda = set_data;
+       gpio->algo.setscl = set_clock;
+       gpio->algo.getsda = get_data;
+       gpio->algo.getscl = get_clock;
+       gpio->algo.udelay = I2C_RISEFALL_TIME;
+       gpio->algo.timeout = usecs_to_jiffies(2200);
+       gpio->algo.data = gpio;
+
+       if (i2c_bit_add_bus(&gpio->adapter))
+               goto out_free;
+
+       return &gpio->adapter;
+
+out_free:
+       kfree(gpio);
+       return NULL;
+}
+
+static int
+intel_i2c_quirk_xfer(struct drm_psb_private *dev_priv,
+                    struct i2c_adapter *adapter,
+                    struct i2c_msg *msgs,
+                    int num)
+{
+       struct intel_gpio *gpio = container_of(adapter,
+                                              struct intel_gpio,
+                                              adapter);
+       int ret;
+
+       gma_intel_i2c_reset(dev_priv->dev);
+
+       intel_i2c_quirk_set(dev_priv, true);
+       set_data(gpio, 1);
+       set_clock(gpio, 1);
+       udelay(I2C_RISEFALL_TIME);
+
+       ret = adapter->algo->master_xfer(adapter, msgs, num);
+
+       set_data(gpio, 1);
+       set_clock(gpio, 1);
+       intel_i2c_quirk_set(dev_priv, false);
+
+       return ret;
+}
+
+static int
+gmbus_xfer(struct i2c_adapter *adapter,
+          struct i2c_msg *msgs,
+          int num)
+{
+       struct intel_gmbus *bus = container_of(adapter,
+                                              struct intel_gmbus,
+                                              adapter);
+       struct drm_psb_private *dev_priv = adapter->algo_data;
+       struct drm_device *dev = dev_priv->dev;
+       int i, reg_offset;
+
+       if (bus->force_bit)
+               return intel_i2c_quirk_xfer(dev_priv,
+                                           bus->force_bit, msgs, num);
+
+       reg_offset = 0;
+
+       REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
+
+       for (i = 0; i < num; i++) {
+               u16 len = msgs[i].len;
+               u8 *buf = msgs[i].buf;
+
+               if (msgs[i].flags & I2C_M_RD) {
+                       REG_WRITE(GMBUS1 + reg_offset,
+                                  GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+                                  (len << GMBUS_BYTE_COUNT_SHIFT) |
+                                  (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+                                  GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+                       REG_READ(GMBUS2+reg_offset);
+                       do {
+                               u32 val, loop = 0;
+
+                               if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+                                       goto timeout;
+                               if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+                                       goto clear_err;
+
+                               val = REG_READ(GMBUS3 + reg_offset);
+                               do {
+                                       *buf++ = val & 0xff;
+                                       val >>= 8;
+                               } while (--len && ++loop < 4);
+                       } while (len);
+               } else {
+                       u32 val, loop;
+
+                       val = loop = 0;
+                       do {
+                               val |= *buf++ << (8 * loop);
+                       } while (--len && ++loop < 4);
+
+                       REG_WRITE(GMBUS3 + reg_offset, val);
+                       REG_WRITE(GMBUS1 + reg_offset,
+                                  (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
+                                  (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
+                                  (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+                                  GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+                       REG_READ(GMBUS2+reg_offset);
+
+                       while (len) {
+                               if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+                                       goto timeout;
+                               if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+                                       goto clear_err;
+
+                               val = loop = 0;
+                               do {
+                                       val |= *buf++ << (8 * loop);
+                               } while (--len && ++loop < 4);
+
+                               REG_WRITE(GMBUS3 + reg_offset, val);
+                               REG_READ(GMBUS2+reg_offset);
+                       }
+               }
+
+               if (i + 1 < num && wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+                       goto timeout;
+               if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+                       goto clear_err;
+       }
+
+       goto done;
+
+clear_err:
+       /* Toggle the Software Clear Interrupt bit. This has the effect
+        * of resetting the GMBUS controller and so clearing the
+        * BUS_ERROR raised by the slave's NAK.
+        */
+       REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
+       REG_WRITE(GMBUS1 + reg_offset, 0);
+
+done:
+       /* Mark the GMBUS interface as disabled. We will re-enable it at the
+        * start of the next xfer, till then let it sleep.
+        */
+       REG_WRITE(GMBUS0 + reg_offset, 0);
+       return i;
+
+timeout:
+       DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
+                bus->reg0 & 0xff, bus->adapter.name);
+       REG_WRITE(GMBUS0 + reg_offset, 0);
+
+       /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+       bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
+       if (!bus->force_bit)
+               return -ENOMEM;
+
+       return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
+}
+
+static u32 gmbus_func(struct i2c_adapter *adapter)
+{
+       struct intel_gmbus *bus = container_of(adapter,
+                                              struct intel_gmbus,
+                                              adapter);
+
+       if (bus->force_bit)
+               bus->force_bit->algo->functionality(bus->force_bit);
+
+       return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+               /* I2C_FUNC_10BIT_ADDR | */
+               I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+               I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
+}
+
+static const struct i2c_algorithm gmbus_algorithm = {
+       .master_xfer    = gmbus_xfer,
+       .functionality  = gmbus_func
+};
+
+/**
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
+ * @dev: DRM device
+ */
+int gma_intel_setup_gmbus(struct drm_device *dev)
+{
+       static const char *names[GMBUS_NUM_PORTS] = {
+               "disabled",
+               "ssc",
+               "vga",
+               "panel",
+               "dpc",
+               "dpb",
+               "reserved",
+               "dpd",
+       };
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int ret, i;
+
+       dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
+                                 GFP_KERNEL);
+       if (dev_priv->gmbus == NULL)
+               return -ENOMEM;
+
+       for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+               struct intel_gmbus *bus = &dev_priv->gmbus[i];
+
+               bus->adapter.owner = THIS_MODULE;
+               bus->adapter.class = I2C_CLASS_DDC;
+               snprintf(bus->adapter.name,
+                        sizeof(bus->adapter.name),
+                        "gma500 gmbus %s",
+                        names[i]);
+
+               bus->adapter.dev.parent = &dev->pdev->dev;
+               bus->adapter.algo_data  = dev_priv;
+
+               bus->adapter.algo = &gmbus_algorithm;
+               ret = i2c_add_adapter(&bus->adapter);
+               if (ret)
+                       goto err;
+
+               /* By default use a conservative clock rate */
+               bus->reg0 = i | GMBUS_RATE_100KHZ;
+
+               /* XXX force bit banging until GMBUS is fully debugged */
+               bus->force_bit = intel_gpio_create(dev_priv, i);
+       }
+
+       gma_intel_i2c_reset(dev_priv->dev);
+
+       return 0;
+
+err:
+       while (--i) {
+               struct intel_gmbus *bus = &dev_priv->gmbus[i];
+               i2c_del_adapter(&bus->adapter);
+       }
+       kfree(dev_priv->gmbus);
+       dev_priv->gmbus = NULL;
+       return ret;
+}
+
+void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
+{
+       struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+       /* speed:
+        * 0x0 = 100 KHz
+        * 0x1 = 50 KHz
+        * 0x2 = 400 KHz
+        * 0x3 = 1000 Khz
+        */
+       bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
+}
+
+void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+{
+       struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+       if (force_bit) {
+               if (bus->force_bit == NULL) {
+                       struct drm_psb_private *dev_priv = adapter->algo_data;
+                       bus->force_bit = intel_gpio_create(dev_priv,
+                                                          bus->reg0 & 0xff);
+               }
+       } else {
+               if (bus->force_bit) {
+                       i2c_del_adapter(bus->force_bit);
+                       kfree(bus->force_bit);
+                       bus->force_bit = NULL;
+               }
+       }
+}
+
+void gma_intel_teardown_gmbus(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int i;
+
+       if (dev_priv->gmbus == NULL)
+               return;
+
+       for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+               struct intel_gmbus *bus = &dev_priv->gmbus[i];
+               if (bus->force_bit) {
+                       i2c_del_adapter(bus->force_bit);
+                       kfree(bus->force_bit);
+               }
+               i2c_del_adapter(&bus->adapter);
+       }
+
+       kfree(dev_priv->gmbus);
+       dev_priv->gmbus = NULL;
+}
diff --git a/drivers/gpu/drm/gma500/intel_i2c.c b/drivers/gpu/drm/gma500/intel_i2c.c
new file mode 100644 (file)
index 0000000..98a28c2
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * Copyright Â© 2006-2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+/*
+ * Intel GPIO access functions
+ */
+
+#define I2C_RISEFALL_TIME 20
+
+static int get_clock(void *data)
+{
+       struct psb_intel_i2c_chan *chan = data;
+       struct drm_device *dev = chan->drm_dev;
+       u32 val;
+
+       val = REG_READ(chan->reg);
+       return (val & GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int get_data(void *data)
+{
+       struct psb_intel_i2c_chan *chan = data;
+       struct drm_device *dev = chan->drm_dev;
+       u32 val;
+
+       val = REG_READ(chan->reg);
+       return (val & GPIO_DATA_VAL_IN) != 0;
+}
+
+static void set_clock(void *data, int state_high)
+{
+       struct psb_intel_i2c_chan *chan = data;
+       struct drm_device *dev = chan->drm_dev;
+       u32 reserved = 0, clock_bits;
+
+       /* On most chips, these bits must be preserved in software. */
+       reserved =
+                   REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+                                          GPIO_CLOCK_PULLUP_DISABLE);
+
+       if (state_high)
+               clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+       else
+               clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+                   GPIO_CLOCK_VAL_MASK;
+       REG_WRITE(chan->reg, reserved | clock_bits);
+       udelay(I2C_RISEFALL_TIME);      /* wait for the line to change state */
+}
+
+static void set_data(void *data, int state_high)
+{
+       struct psb_intel_i2c_chan *chan = data;
+       struct drm_device *dev = chan->drm_dev;
+       u32 reserved = 0, data_bits;
+
+       /* On most chips, these bits must be preserved in software. */
+       reserved =
+                   REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+                                          GPIO_CLOCK_PULLUP_DISABLE);
+
+       if (state_high)
+               data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+       else
+               data_bits =
+                   GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+                   GPIO_DATA_VAL_MASK;
+
+       REG_WRITE(chan->reg, reserved | data_bits);
+       udelay(I2C_RISEFALL_TIME);      /* wait for the line to change state */
+}
+
+/**
+ * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * @dev: DRM device
+ * @output: driver specific output device
+ * @reg: GPIO reg to use
+ * @name: name for this bus
+ *
+ * Creates and registers a new i2c bus with the Linux i2c layer, for use
+ * in output probing and control (e.g. DDC or SDVO control functions).
+ *
+ * Possible values for @reg include:
+ *   %GPIOA
+ *   %GPIOB
+ *   %GPIOC
+ *   %GPIOD
+ *   %GPIOE
+ *   %GPIOF
+ *   %GPIOG
+ *   %GPIOH
+ * see PRM for details on how these different busses are used.
+ */
+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
+                                       const u32 reg, const char *name)
+{
+       struct psb_intel_i2c_chan *chan;
+
+       chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
+       if (!chan)
+               goto out_free;
+
+       chan->drm_dev = dev;
+       chan->reg = reg;
+       snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
+       chan->adapter.owner = THIS_MODULE;
+       chan->adapter.algo_data = &chan->algo;
+       chan->adapter.dev.parent = &dev->pdev->dev;
+       chan->algo.setsda = set_data;
+       chan->algo.setscl = set_clock;
+       chan->algo.getsda = get_data;
+       chan->algo.getscl = get_clock;
+       chan->algo.udelay = 20;
+       chan->algo.timeout = usecs_to_jiffies(2200);
+       chan->algo.data = chan;
+
+       i2c_set_adapdata(&chan->adapter, chan);
+
+       if (i2c_bit_add_bus(&chan->adapter))
+               goto out_free;
+
+       /* JJJ:  raise SCL and SDA? */
+       set_data(chan, 1);
+       set_clock(chan, 1);
+       udelay(20);
+
+       return chan;
+
+out_free:
+       kfree(chan);
+       return NULL;
+}
+
+/**
+ * psb_intel_i2c_destroy - unregister and free i2c bus resources
+ * @output: channel to free
+ *
+ * Unregister the adapter from the i2c layer, then free the structure.
+ */
+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
+{
+       if (!chan)
+               return;
+
+       i2c_del_adapter(&chan->adapter);
+       kfree(chan);
+}
diff --git a/drivers/gpu/drm/gma500/intel_opregion.c b/drivers/gpu/drm/gma500/intel_opregion.c
new file mode 100644 (file)
index 0000000..d946bc1
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * FIXME: resolve with the i915 version
+ */
+
+#include "psb_drv.h"
+
+struct opregion_header {
+       u8 signature[16];
+       u32 size;
+       u32 opregion_ver;
+       u8 bios_ver[32];
+       u8 vbios_ver[16];
+       u8 driver_ver[16];
+       u32 mboxes;
+       u8 reserved[164];
+} __packed;
+
+struct opregion_apci {
+       /*FIXME: add it later*/
+} __packed;
+
+struct opregion_swsci {
+       /*FIXME: add it later*/
+} __packed;
+
+struct opregion_acpi {
+       /*FIXME: add it later*/
+} __packed;
+
+int gma_intel_opregion_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 opregion_phy;
+       void *base;
+       u32 *lid_state;
+
+       dev_priv->lid_state = NULL;
+
+       pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
+       if (opregion_phy == 0)
+               return -ENOTSUPP;
+
+       base = ioremap(opregion_phy, 8*1024);
+       if (!base)
+               return -ENOMEM;
+
+       lid_state = base + 0x01ac;
+
+       dev_priv->lid_state = lid_state;
+       dev_priv->lid_last_state = readl(lid_state);
+       return 0;
+}
+
+int gma_intel_opregion_exit(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       if (dev_priv->lid_state)
+               iounmap(dev_priv->lid_state);
+       return 0;
+}
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
new file mode 100644 (file)
index 0000000..5eee9ad
--- /dev/null
@@ -0,0 +1,263 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+/* TODO
+ * - Split functions by vbt type
+ * - Make them all take drm_device
+ * - Check ioremap failures
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "mid_bios.h"
+
+static void mid_get_fuse_settings(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       uint32_t fuse_value = 0;
+       uint32_t fuse_value_tmp = 0;
+
+#define FB_REG06 0xD0810600
+#define FB_MIPI_DISABLE  (1 << 11)
+#define FB_REG09 0xD0810900
+#define FB_REG09 0xD0810900
+#define FB_SKU_MASK  0x7000
+#define FB_SKU_SHIFT 12
+#define FB_SKU_100 0
+#define FB_SKU_100L 1
+#define FB_SKU_83 2
+       if (pci_root == NULL) {
+               WARN_ON(1);
+               return;
+       }
+
+
+       pci_write_config_dword(pci_root, 0xD0, FB_REG06);
+       pci_read_config_dword(pci_root, 0xD4, &fuse_value);
+
+       /* FB_MIPI_DISABLE doesn't mean LVDS on with Medfield */
+       if (IS_MRST(dev))
+               dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
+
+       DRM_INFO("internal display is %s\n",
+                dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
+
+        /* Prevent runtime suspend at start*/
+        if (dev_priv->iLVDS_enable) {
+               dev_priv->is_lvds_on = true;
+               dev_priv->is_mipi_on = false;
+       } else {
+               dev_priv->is_mipi_on = true;
+               dev_priv->is_lvds_on = false;
+       }
+
+       dev_priv->video_device_fuse = fuse_value;
+
+       pci_write_config_dword(pci_root, 0xD0, FB_REG09);
+       pci_read_config_dword(pci_root, 0xD4, &fuse_value);
+
+       dev_dbg(dev->dev, "SKU values is 0x%x.\n", fuse_value);
+       fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
+
+       dev_priv->fuse_reg_value = fuse_value;
+
+       switch (fuse_value_tmp) {
+       case FB_SKU_100:
+               dev_priv->core_freq = 200;
+               break;
+       case FB_SKU_100L:
+               dev_priv->core_freq = 100;
+               break;
+       case FB_SKU_83:
+               dev_priv->core_freq = 166;
+               break;
+       default:
+               dev_warn(dev->dev, "Invalid SKU values, SKU value = 0x%08x\n",
+                                                               fuse_value_tmp);
+               dev_priv->core_freq = 0;
+       }
+       dev_dbg(dev->dev, "LNC core clk is %dMHz.\n", dev_priv->core_freq);
+       pci_dev_put(pci_root);
+}
+
+/*
+ *     Get the revison ID, B0:D2:F0;0x08
+ */
+static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
+{
+       uint32_t platform_rev_id = 0;
+       struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+
+       if (pci_gfx_root == NULL) {
+               WARN_ON(1);
+               return;
+       }
+       pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
+       dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
+       pci_dev_put(pci_gfx_root);
+       dev_dbg(dev_priv->dev->dev, "platform_rev_id is %x\n",
+                                       dev_priv->platform_rev_id);
+}
+
+static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
+       u32 addr;
+       u16 new_size;
+       u8 *vbt_virtual;
+       u8 bpi;
+       u8 number_desc = 0;
+       struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
+       struct gct_r10_timing_info ti;
+       void *pGCT;
+       struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+
+       /* Get the address of the platform config vbt, B0:D2:F0;0xFC */
+       pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
+       pci_dev_put(pci_gfx_root);
+
+       dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
+
+       /* check for platform config address == 0. */
+       /* this means fw doesn't support vbt */
+
+       if (addr == 0) {
+               vbt->size = 0;
+               return;
+       }
+
+       /* get the virtual address of the vbt */
+       vbt_virtual = ioremap(addr, sizeof(*vbt));
+       if (vbt_virtual == NULL) {
+               vbt->size = 0;
+               return;
+       }
+
+       memcpy(vbt, vbt_virtual, sizeof(*vbt));
+       iounmap(vbt_virtual); /* Free virtual address space */
+
+       /* No matching signature don't process the data */
+       if (memcmp(vbt->signature, "$GCT", 4)) {
+               vbt->size = 0;
+               return;
+       }
+
+       dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision);
+
+       switch (vbt->revision) {
+       case 0:
+               vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
+                                       vbt->size - sizeof(*vbt) + 4);
+               pGCT = vbt->oaktrail_gct;
+               bpi = ((struct oaktrail_gct_v1 *)pGCT)->PD.BootPanelIndex;
+               dev_priv->gct_data.bpi = bpi;
+               dev_priv->gct_data.pt =
+                       ((struct oaktrail_gct_v1 *)pGCT)->PD.PanelType;
+               memcpy(&dev_priv->gct_data.DTD,
+                       &((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].DTD,
+                               sizeof(struct oaktrail_timing_info));
+               dev_priv->gct_data.Panel_Port_Control =
+                 ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
+               dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+                       ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+               break;
+       case 1:
+               vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
+                                       vbt->size - sizeof(*vbt) + 4);
+               pGCT = vbt->oaktrail_gct;
+               bpi = ((struct oaktrail_gct_v2 *)pGCT)->PD.BootPanelIndex;
+               dev_priv->gct_data.bpi = bpi;
+               dev_priv->gct_data.pt =
+                       ((struct oaktrail_gct_v2 *)pGCT)->PD.PanelType;
+               memcpy(&dev_priv->gct_data.DTD,
+                       &((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].DTD,
+                               sizeof(struct oaktrail_timing_info));
+               dev_priv->gct_data.Panel_Port_Control =
+                 ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
+               dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+                       ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+               break;
+       case 0x10:
+               /*header definition changed from rev 01 (v2) to rev 10h. */
+               /*so, some values have changed location*/
+               new_size = vbt->checksum; /*checksum contains lo size byte*/
+               /*LSB of oaktrail_gct contains hi size byte*/
+               new_size |= ((0xff & (unsigned int)(long)vbt->oaktrail_gct)) << 8;
+
+               vbt->checksum = vbt->size; /*size contains the checksum*/
+               if (new_size > 0xff)
+                       vbt->size = 0xff; /*restrict size to 255*/
+               else
+                       vbt->size = new_size;
+
+               /* number of descriptors defined in the GCT */
+               number_desc = ((0xff00 & (unsigned int)(long)vbt->oaktrail_gct)) >> 8;
+               bpi = ((0xff0000 & (unsigned int)(long)vbt->oaktrail_gct)) >> 16;
+               vbt->oaktrail_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
+                               GCT_R10_DISPLAY_DESC_SIZE * number_desc);
+               pGCT = vbt->oaktrail_gct;
+               pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
+               dev_priv->gct_data.bpi = bpi; /*save boot panel id*/
+
+               /*copy the GCT display timings into a temp structure*/
+               memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));
+
+               /*now copy the temp struct into the dev_priv->gct_data*/
+               dp_ti->pixel_clock = ti.pixel_clock;
+               dp_ti->hactive_hi = ti.hactive_hi;
+               dp_ti->hactive_lo = ti.hactive_lo;
+               dp_ti->hblank_hi = ti.hblank_hi;
+               dp_ti->hblank_lo = ti.hblank_lo;
+               dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
+               dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
+               dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
+               dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
+               dp_ti->vactive_hi = ti.vactive_hi;
+               dp_ti->vactive_lo = ti.vactive_lo;
+               dp_ti->vblank_hi = ti.vblank_hi;
+               dp_ti->vblank_lo = ti.vblank_lo;
+               dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
+               dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
+               dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
+               dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;
+
+               /* Move the MIPI_Display_Descriptor data from GCT to dev priv */
+               dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+                                                       *((u8 *)pGCT + 0x0d);
+               dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
+                                               (*((u8 *)pGCT + 0x0e)) << 8;
+               break;
+       default:
+               dev_err(dev->dev, "Unknown revision of GCT!\n");
+               vbt->size = 0;
+       }
+}
+
+int mid_chip_setup(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       mid_get_fuse_settings(dev);
+       mid_get_vbt_data(dev_priv);
+       mid_get_pci_revID(dev_priv);
+       return 0;
+}
diff --git a/drivers/gpu/drm/gma500/mid_bios.h b/drivers/gpu/drm/gma500/mid_bios.h
new file mode 100644 (file)
index 0000000..00e7d56
--- /dev/null
@@ -0,0 +1,21 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+extern int mid_chip_setup(struct drm_device *dev);
+
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
new file mode 100644 (file)
index 0000000..c904d73
--- /dev/null
@@ -0,0 +1,858 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+
+/*
+ * Code for the SGX MMU:
+ */
+
+/*
+ * clflush on one processor only:
+ * clflush should apparently flush the cache line on all processors in an
+ * SMP system.
+ */
+
+/*
+ * kmap atomic:
+ * The usage of the slots must be completely encapsulated within a spinlock, and
+ * no other functions that may be using the locks for other purposed may be
+ * called from within the locked region.
+ * Since the slots are per processor, this will guarantee that we are the only
+ * user.
+ */
+
+/*
+ * TODO: Inserting ptes from an interrupt handler:
+ * This may be desirable for some SGX functionality where the GPU can fault in
+ * needed pages. For that, we need to make an atomic insert_pages function, that
+ * may fail.
+ * If it fails, the caller need to insert the page using a workqueue function,
+ * but on average it should be fast.
+ */
+
+struct psb_mmu_driver {
+       /* protects driver- and pd structures. Always take in read mode
+        * before taking the page table spinlock.
+        */
+       struct rw_semaphore sem;
+
+       /* protects page tables, directory tables and pt tables.
+        * and pt structures.
+        */
+       spinlock_t lock;
+
+       atomic_t needs_tlbflush;
+
+       uint8_t __iomem *register_map;
+       struct psb_mmu_pd *default_pd;
+       /*uint32_t bif_ctrl;*/
+       int has_clflush;
+       int clflush_add;
+       unsigned long clflush_mask;
+
+       struct drm_psb_private *dev_priv;
+};
+
+struct psb_mmu_pd;
+
+struct psb_mmu_pt {
+       struct psb_mmu_pd *pd;
+       uint32_t index;
+       uint32_t count;
+       struct page *p;
+       uint32_t *v;
+};
+
+struct psb_mmu_pd {
+       struct psb_mmu_driver *driver;
+       int hw_context;
+       struct psb_mmu_pt **tables;
+       struct page *p;
+       struct page *dummy_pt;
+       struct page *dummy_page;
+       uint32_t pd_mask;
+       uint32_t invalid_pde;
+       uint32_t invalid_pte;
+};
+
+static inline uint32_t psb_mmu_pt_index(uint32_t offset)
+{
+       return (offset >> PSB_PTE_SHIFT) & 0x3FF;
+}
+
+static inline uint32_t psb_mmu_pd_index(uint32_t offset)
+{
+       return offset >> PSB_PDE_SHIFT;
+}
+
+static inline void psb_clflush(void *addr)
+{
+       __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
+}
+
+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
+                                  void *addr)
+{
+       if (!driver->has_clflush)
+               return;
+
+       mb();
+       psb_clflush(addr);
+       mb();
+}
+
+static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
+{
+       uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
+       uint32_t clflush_count = PAGE_SIZE / clflush_add;
+       int i;
+       uint8_t *clf;
+
+       clf = kmap_atomic(page, KM_USER0);
+       mb();
+       for (i = 0; i < clflush_count; ++i) {
+               psb_clflush(clf);
+               clf += clflush_add;
+       }
+       mb();
+       kunmap_atomic(clf, KM_USER0);
+}
+
+static void psb_pages_clflush(struct psb_mmu_driver *driver,
+                               struct page *page[], unsigned long num_pages)
+{
+       int i;
+
+       if (!driver->has_clflush)
+               return ;
+
+       for (i = 0; i < num_pages; i++)
+               psb_page_clflush(driver, *page++);
+}
+
+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
+                                   int force)
+{
+       atomic_set(&driver->needs_tlbflush, 0);
+}
+
+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
+{
+       down_write(&driver->sem);
+       psb_mmu_flush_pd_locked(driver, force);
+       up_write(&driver->sem);
+}
+
+void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot)
+{
+       if (rc_prot)
+               down_write(&driver->sem);
+       if (rc_prot)
+               up_write(&driver->sem);
+}
+
+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
+{
+       /*ttm_tt_cache_flush(&pd->p, 1);*/
+       psb_pages_clflush(pd->driver, &pd->p, 1);
+       down_write(&pd->driver->sem);
+       wmb();
+       psb_mmu_flush_pd_locked(pd->driver, 1);
+       pd->hw_context = hw_context;
+       up_write(&pd->driver->sem);
+
+}
+
+static inline unsigned long psb_pd_addr_end(unsigned long addr,
+                                           unsigned long end)
+{
+
+       addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
+       return (addr < end) ? addr : end;
+}
+
+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
+{
+       uint32_t mask = PSB_PTE_VALID;
+
+       if (type & PSB_MMU_CACHED_MEMORY)
+               mask |= PSB_PTE_CACHED;
+       if (type & PSB_MMU_RO_MEMORY)
+               mask |= PSB_PTE_RO;
+       if (type & PSB_MMU_WO_MEMORY)
+               mask |= PSB_PTE_WO;
+
+       return (pfn << PAGE_SHIFT) | mask;
+}
+
+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+                                   int trap_pagefaults, int invalid_type)
+{
+       struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+       uint32_t *v;
+       int i;
+
+       if (!pd)
+               return NULL;
+
+       pd->p = alloc_page(GFP_DMA32);
+       if (!pd->p)
+               goto out_err1;
+       pd->dummy_pt = alloc_page(GFP_DMA32);
+       if (!pd->dummy_pt)
+               goto out_err2;
+       pd->dummy_page = alloc_page(GFP_DMA32);
+       if (!pd->dummy_page)
+               goto out_err3;
+
+       if (!trap_pagefaults) {
+               pd->invalid_pde =
+                   psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
+                                    invalid_type);
+               pd->invalid_pte =
+                   psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
+                                    invalid_type);
+       } else {
+               pd->invalid_pde = 0;
+               pd->invalid_pte = 0;
+       }
+
+       v = kmap(pd->dummy_pt);
+       for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+               v[i] = pd->invalid_pte;
+
+       kunmap(pd->dummy_pt);
+
+       v = kmap(pd->p);
+       for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+               v[i] = pd->invalid_pde;
+
+       kunmap(pd->p);
+
+       clear_page(kmap(pd->dummy_page));
+       kunmap(pd->dummy_page);
+
+       pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
+       if (!pd->tables)
+               goto out_err4;
+
+       pd->hw_context = -1;
+       pd->pd_mask = PSB_PTE_VALID;
+       pd->driver = driver;
+
+       return pd;
+
+out_err4:
+       __free_page(pd->dummy_page);
+out_err3:
+       __free_page(pd->dummy_pt);
+out_err2:
+       __free_page(pd->p);
+out_err1:
+       kfree(pd);
+       return NULL;
+}
+
+void psb_mmu_free_pt(struct psb_mmu_pt *pt)
+{
+       __free_page(pt->p);
+       kfree(pt);
+}
+
+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
+{
+       struct psb_mmu_driver *driver = pd->driver;
+       struct psb_mmu_pt *pt;
+       int i;
+
+       down_write(&driver->sem);
+       if (pd->hw_context != -1)
+               psb_mmu_flush_pd_locked(driver, 1);
+
+       /* Should take the spinlock here, but we don't need to do that
+          since we have the semaphore in write mode. */
+
+       for (i = 0; i < 1024; ++i) {
+               pt = pd->tables[i];
+               if (pt)
+                       psb_mmu_free_pt(pt);
+       }
+
+       vfree(pd->tables);
+       __free_page(pd->dummy_page);
+       __free_page(pd->dummy_pt);
+       __free_page(pd->p);
+       kfree(pd);
+       up_write(&driver->sem);
+}
+
+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
+{
+       struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
+       void *v;
+       uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
+       uint32_t clflush_count = PAGE_SIZE / clflush_add;
+       spinlock_t *lock = &pd->driver->lock;
+       uint8_t *clf;
+       uint32_t *ptes;
+       int i;
+
+       if (!pt)
+               return NULL;
+
+       pt->p = alloc_page(GFP_DMA32);
+       if (!pt->p) {
+               kfree(pt);
+               return NULL;
+       }
+
+       spin_lock(lock);
+
+       v = kmap_atomic(pt->p, KM_USER0);
+       clf = (uint8_t *) v;
+       ptes = (uint32_t *) v;
+       for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+               *ptes++ = pd->invalid_pte;
+
+
+       if (pd->driver->has_clflush && pd->hw_context != -1) {
+               mb();
+               for (i = 0; i < clflush_count; ++i) {
+                       psb_clflush(clf);
+                       clf += clflush_add;
+               }
+               mb();
+       }
+
+       kunmap_atomic(v, KM_USER0);
+       spin_unlock(lock);
+
+       pt->count = 0;
+       pt->pd = pd;
+       pt->index = 0;
+
+       return pt;
+}
+
+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
+                                            unsigned long addr)
+{
+       uint32_t index = psb_mmu_pd_index(addr);
+       struct psb_mmu_pt *pt;
+       uint32_t *v;
+       spinlock_t *lock = &pd->driver->lock;
+
+       spin_lock(lock);
+       pt = pd->tables[index];
+       while (!pt) {
+               spin_unlock(lock);
+               pt = psb_mmu_alloc_pt(pd);
+               if (!pt)
+                       return NULL;
+               spin_lock(lock);
+
+               if (pd->tables[index]) {
+                       spin_unlock(lock);
+                       psb_mmu_free_pt(pt);
+                       spin_lock(lock);
+                       pt = pd->tables[index];
+                       continue;
+               }
+
+               v = kmap_atomic(pd->p, KM_USER0);
+               pd->tables[index] = pt;
+               v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
+               pt->index = index;
+               kunmap_atomic((void *) v, KM_USER0);
+
+               if (pd->hw_context != -1) {
+                       psb_mmu_clflush(pd->driver, (void *) &v[index]);
+                       atomic_set(&pd->driver->needs_tlbflush, 1);
+               }
+       }
+       pt->v = kmap_atomic(pt->p, KM_USER0);
+       return pt;
+}
+
+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
+                                             unsigned long addr)
+{
+       uint32_t index = psb_mmu_pd_index(addr);
+       struct psb_mmu_pt *pt;
+       spinlock_t *lock = &pd->driver->lock;
+
+       spin_lock(lock);
+       pt = pd->tables[index];
+       if (!pt) {
+               spin_unlock(lock);
+               return NULL;
+       }
+       pt->v = kmap_atomic(pt->p, KM_USER0);
+       return pt;
+}
+
+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
+{
+       struct psb_mmu_pd *pd = pt->pd;
+       uint32_t *v;
+
+       kunmap_atomic(pt->v, KM_USER0);
+       if (pt->count == 0) {
+               v = kmap_atomic(pd->p, KM_USER0);
+               v[pt->index] = pd->invalid_pde;
+               pd->tables[pt->index] = NULL;
+
+               if (pd->hw_context != -1) {
+                       psb_mmu_clflush(pd->driver,
+                                       (void *) &v[pt->index]);
+                       atomic_set(&pd->driver->needs_tlbflush, 1);
+               }
+               kunmap_atomic(pt->v, KM_USER0);
+               spin_unlock(&pd->driver->lock);
+               psb_mmu_free_pt(pt);
+               return;
+       }
+       spin_unlock(&pd->driver->lock);
+}
+
+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
+                                  unsigned long addr, uint32_t pte)
+{
+       pt->v[psb_mmu_pt_index(addr)] = pte;
+}
+
+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
+                                         unsigned long addr)
+{
+       pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
+}
+
+
+void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
+                       uint32_t mmu_offset, uint32_t gtt_start,
+                       uint32_t gtt_pages)
+{
+       uint32_t *v;
+       uint32_t start = psb_mmu_pd_index(mmu_offset);
+       struct psb_mmu_driver *driver = pd->driver;
+       int num_pages = gtt_pages;
+
+       down_read(&driver->sem);
+       spin_lock(&driver->lock);
+
+       v = kmap_atomic(pd->p, KM_USER0);
+       v += start;
+
+       while (gtt_pages--) {
+               *v++ = gtt_start | pd->pd_mask;
+               gtt_start += PAGE_SIZE;
+       }
+
+       /*ttm_tt_cache_flush(&pd->p, num_pages);*/
+       psb_pages_clflush(pd->driver, &pd->p, num_pages);
+       kunmap_atomic(v, KM_USER0);
+       spin_unlock(&driver->lock);
+
+       if (pd->hw_context != -1)
+               atomic_set(&pd->driver->needs_tlbflush, 1);
+
+       up_read(&pd->driver->sem);
+       psb_mmu_flush_pd(pd->driver, 0);
+}
+
+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
+{
+       struct psb_mmu_pd *pd;
+
+       /* down_read(&driver->sem); */
+       pd = driver->default_pd;
+       /* up_read(&driver->sem); */
+
+       return pd;
+}
+
+/* Returns the physical address of the PD shared by sgx/msvdx */
+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
+{
+       struct psb_mmu_pd *pd;
+
+       pd = psb_mmu_get_default_pd(driver);
+       return page_to_pfn(pd->p) << PAGE_SHIFT;
+}
+
+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
+{
+       psb_mmu_free_pagedir(driver->default_pd);
+       kfree(driver);
+}
+
+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
+                                       int trap_pagefaults,
+                                       int invalid_type,
+                                       struct drm_psb_private *dev_priv)
+{
+       struct psb_mmu_driver *driver;
+
+       driver = kmalloc(sizeof(*driver), GFP_KERNEL);
+
+       if (!driver)
+               return NULL;
+       driver->dev_priv = dev_priv;
+
+       driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
+                                             invalid_type);
+       if (!driver->default_pd)
+               goto out_err1;
+
+       spin_lock_init(&driver->lock);
+       init_rwsem(&driver->sem);
+       down_write(&driver->sem);
+       driver->register_map = registers;
+       atomic_set(&driver->needs_tlbflush, 1);
+
+       driver->has_clflush = 0;
+
+       if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
+               uint32_t tfms, misc, cap0, cap4, clflush_size;
+
+               /*
+                * clflush size is determined at kernel setup for x86_64
+                *  but not for i386. We have to do it here.
+                */
+
+               cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
+               clflush_size = ((misc >> 8) & 0xff) * 8;
+               driver->has_clflush = 1;
+               driver->clflush_add =
+                   PAGE_SIZE * clflush_size / sizeof(uint32_t);
+               driver->clflush_mask = driver->clflush_add - 1;
+               driver->clflush_mask = ~driver->clflush_mask;
+       }
+
+       up_write(&driver->sem);
+       return driver;
+
+out_err1:
+       kfree(driver);
+       return NULL;
+}
+
+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
+                              unsigned long address, uint32_t num_pages,
+                              uint32_t desired_tile_stride,
+                              uint32_t hw_tile_stride)
+{
+       struct psb_mmu_pt *pt;
+       uint32_t rows = 1;
+       uint32_t i;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long add;
+       unsigned long row_add;
+       unsigned long clflush_add = pd->driver->clflush_add;
+       unsigned long clflush_mask = pd->driver->clflush_mask;
+
+       if (!pd->driver->has_clflush) {
+               /*ttm_tt_cache_flush(&pd->p, num_pages);*/
+               psb_pages_clflush(pd->driver, &pd->p, num_pages);
+               return;
+       }
+
+       if (hw_tile_stride)
+               rows = num_pages / desired_tile_stride;
+       else
+               desired_tile_stride = num_pages;
+
+       add = desired_tile_stride << PAGE_SHIFT;
+       row_add = hw_tile_stride << PAGE_SHIFT;
+       mb();
+       for (i = 0; i < rows; ++i) {
+
+               addr = address;
+               end = addr + add;
+
+               do {
+                       next = psb_pd_addr_end(addr, end);
+                       pt = psb_mmu_pt_map_lock(pd, addr);
+                       if (!pt)
+                               continue;
+                       do {
+                               psb_clflush(&pt->v
+                                           [psb_mmu_pt_index(addr)]);
+                       } while (addr +=
+                                clflush_add,
+                                (addr & clflush_mask) < next);
+
+                       psb_mmu_pt_unmap_unlock(pt);
+               } while (addr = next, next != end);
+               address += row_add;
+       }
+       mb();
+}
+
+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+                                unsigned long address, uint32_t num_pages)
+{
+       struct psb_mmu_pt *pt;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long f_address = address;
+
+       down_read(&pd->driver->sem);
+
+       addr = address;
+       end = addr + (num_pages << PAGE_SHIFT);
+
+       do {
+               next = psb_pd_addr_end(addr, end);
+               pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+               if (!pt)
+                       goto out;
+               do {
+                       psb_mmu_invalidate_pte(pt, addr);
+                       --pt->count;
+               } while (addr += PAGE_SIZE, addr < next);
+               psb_mmu_pt_unmap_unlock(pt);
+
+       } while (addr = next, next != end);
+
+out:
+       if (pd->hw_context != -1)
+               psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+       up_read(&pd->driver->sem);
+
+       if (pd->hw_context != -1)
+               psb_mmu_flush(pd->driver, 0);
+
+       return;
+}
+
+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
+                         uint32_t num_pages, uint32_t desired_tile_stride,
+                         uint32_t hw_tile_stride)
+{
+       struct psb_mmu_pt *pt;
+       uint32_t rows = 1;
+       uint32_t i;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long add;
+       unsigned long row_add;
+       unsigned long f_address = address;
+
+       if (hw_tile_stride)
+               rows = num_pages / desired_tile_stride;
+       else
+               desired_tile_stride = num_pages;
+
+       add = desired_tile_stride << PAGE_SHIFT;
+       row_add = hw_tile_stride << PAGE_SHIFT;
+
+       /* down_read(&pd->driver->sem); */
+
+       /* Make sure we only need to flush this processor's cache */
+
+       for (i = 0; i < rows; ++i) {
+
+               addr = address;
+               end = addr + add;
+
+               do {
+                       next = psb_pd_addr_end(addr, end);
+                       pt = psb_mmu_pt_map_lock(pd, addr);
+                       if (!pt)
+                               continue;
+                       do {
+                               psb_mmu_invalidate_pte(pt, addr);
+                               --pt->count;
+
+                       } while (addr += PAGE_SIZE, addr < next);
+                       psb_mmu_pt_unmap_unlock(pt);
+
+               } while (addr = next, next != end);
+               address += row_add;
+       }
+       if (pd->hw_context != -1)
+               psb_mmu_flush_ptes(pd, f_address, num_pages,
+                                  desired_tile_stride, hw_tile_stride);
+
+       /* up_read(&pd->driver->sem); */
+
+       if (pd->hw_context != -1)
+               psb_mmu_flush(pd->driver, 0);
+}
+
+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
+                               unsigned long address, uint32_t num_pages,
+                               int type)
+{
+       struct psb_mmu_pt *pt;
+       uint32_t pte;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long f_address = address;
+       int ret = 0;
+
+       down_read(&pd->driver->sem);
+
+       addr = address;
+       end = addr + (num_pages << PAGE_SHIFT);
+
+       do {
+               next = psb_pd_addr_end(addr, end);
+               pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+               if (!pt) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               do {
+                       pte = psb_mmu_mask_pte(start_pfn++, type);
+                       psb_mmu_set_pte(pt, addr, pte);
+                       pt->count++;
+               } while (addr += PAGE_SIZE, addr < next);
+               psb_mmu_pt_unmap_unlock(pt);
+
+       } while (addr = next, next != end);
+
+out:
+       if (pd->hw_context != -1)
+               psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+       up_read(&pd->driver->sem);
+
+       if (pd->hw_context != -1)
+               psb_mmu_flush(pd->driver, 1);
+
+       return ret;
+}
+
+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+                        unsigned long address, uint32_t num_pages,
+                        uint32_t desired_tile_stride,
+                        uint32_t hw_tile_stride, int type)
+{
+       struct psb_mmu_pt *pt;
+       uint32_t rows = 1;
+       uint32_t i;
+       uint32_t pte;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long add;
+       unsigned long row_add;
+       unsigned long f_address = address;
+       int ret = 0;
+
+       if (hw_tile_stride) {
+               if (num_pages % desired_tile_stride != 0)
+                       return -EINVAL;
+               rows = num_pages / desired_tile_stride;
+       } else {
+               desired_tile_stride = num_pages;
+       }
+
+       add = desired_tile_stride << PAGE_SHIFT;
+       row_add = hw_tile_stride << PAGE_SHIFT;
+
+       down_read(&pd->driver->sem);
+
+       for (i = 0; i < rows; ++i) {
+
+               addr = address;
+               end = addr + add;
+
+               do {
+                       next = psb_pd_addr_end(addr, end);
+                       pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+                       if (!pt) {
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+                       do {
+                               pte =
+                                   psb_mmu_mask_pte(page_to_pfn(*pages++),
+                                                    type);
+                               psb_mmu_set_pte(pt, addr, pte);
+                               pt->count++;
+                       } while (addr += PAGE_SIZE, addr < next);
+                       psb_mmu_pt_unmap_unlock(pt);
+
+               } while (addr = next, next != end);
+
+               address += row_add;
+       }
+out:
+       if (pd->hw_context != -1)
+               psb_mmu_flush_ptes(pd, f_address, num_pages,
+                                  desired_tile_stride, hw_tile_stride);
+
+       up_read(&pd->driver->sem);
+
+       if (pd->hw_context != -1)
+               psb_mmu_flush(pd->driver, 1);
+
+       return ret;
+}
+
+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+                          unsigned long *pfn)
+{
+       int ret;
+       struct psb_mmu_pt *pt;
+       uint32_t tmp;
+       spinlock_t *lock = &pd->driver->lock;
+
+       down_read(&pd->driver->sem);
+       pt = psb_mmu_pt_map_lock(pd, virtual);
+       if (!pt) {
+               uint32_t *v;
+
+               spin_lock(lock);
+               v = kmap_atomic(pd->p, KM_USER0);
+               tmp = v[psb_mmu_pd_index(virtual)];
+               kunmap_atomic(v, KM_USER0);
+               spin_unlock(lock);
+
+               if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
+                   !(pd->invalid_pte & PSB_PTE_VALID)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               ret = 0;
+               *pfn = pd->invalid_pte >> PAGE_SHIFT;
+               goto out;
+       }
+       tmp = pt->v[psb_mmu_pt_index(virtual)];
+       if (!(tmp & PSB_PTE_VALID)) {
+               ret = -EINVAL;
+       } else {
+               ret = 0;
+               *pfn = tmp >> PAGE_SHIFT;
+       }
+       psb_mmu_pt_unmap_unlock(pt);
+out:
+       up_read(&pd->driver->sem);
+       return ret;
+}
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
new file mode 100644 (file)
index 0000000..2da1f36
--- /dev/null
@@ -0,0 +1,252 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+/* MID device specific descriptors */
+
+struct oaktrail_vbt {
+       s8 signature[4];        /*4 bytes,"$GCT" */
+       u8 revision;
+       u8 size;
+       u8 checksum;
+       void *oaktrail_gct;
+} __packed;
+
+struct oaktrail_timing_info {
+       u16 pixel_clock;
+       u8 hactive_lo;
+       u8 hblank_lo;
+       u8 hblank_hi:4;
+       u8 hactive_hi:4;
+       u8 vactive_lo;
+       u8 vblank_lo;
+       u8 vblank_hi:4;
+       u8 vactive_hi:4;
+       u8 hsync_offset_lo;
+       u8 hsync_pulse_width_lo;
+       u8 vsync_pulse_width_lo:4;
+       u8 vsync_offset_lo:4;
+       u8 vsync_pulse_width_hi:2;
+       u8 vsync_offset_hi:2;
+       u8 hsync_pulse_width_hi:2;
+       u8 hsync_offset_hi:2;
+       u8 width_mm_lo;
+       u8 height_mm_lo;
+       u8 height_mm_hi:4;
+       u8 width_mm_hi:4;
+       u8 hborder;
+       u8 vborder;
+       u8 unknown0:1;
+       u8 hsync_positive:1;
+       u8 vsync_positive:1;
+       u8 separate_sync:2;
+       u8 stereo:1;
+       u8 unknown6:1;
+       u8 interlaced:1;
+} __packed;
+
+struct gct_r10_timing_info {
+       u16 pixel_clock;
+       u32 hactive_lo:8;
+       u32 hactive_hi:4;
+       u32 hblank_lo:8;
+       u32 hblank_hi:4;
+       u32 hsync_offset_lo:8;
+       u16 hsync_offset_hi:2;
+       u16 hsync_pulse_width_lo:8;
+       u16 hsync_pulse_width_hi:2;
+       u16 hsync_positive:1;
+       u16 rsvd_1:3;
+       u8  vactive_lo:8;
+       u16 vactive_hi:4;
+       u16 vblank_lo:8;
+       u16 vblank_hi:4;
+       u16 vsync_offset_lo:4;
+       u16 vsync_offset_hi:2;
+       u16 vsync_pulse_width_lo:4;
+       u16 vsync_pulse_width_hi:2;
+       u16 vsync_positive:1;
+       u16 rsvd_2:3;
+} __packed;
+
+struct oaktrail_panel_descriptor_v1 {
+       u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
+                               /* 0x61190 if MIPI */
+       u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
+       u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+       u32 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */
+                                               /* Register 0x61210 */
+       struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
+       u16 Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */
+                               /* Bit 0, Frequency, 15 bits,0 - 32767Hz */
+                       /* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */
+       u16 Panel_MIPI_Display_Descriptor;
+                       /*16 bits, Defined as follows: */
+                       /* if MIPI, 0x0000 if LVDS */
+                       /* Bit 0, Type, 2 bits, */
+                       /* 0: Type-1, */
+                       /* 1: Type-2, */
+                       /* 2: Type-3, */
+                       /* 3: Type-4 */
+                       /* Bit 2, Pixel Format, 4 bits */
+                       /* Bit0: 16bpp (not supported in LNC), */
+                       /* Bit1: 18bpp loosely packed, */
+                       /* Bit2: 18bpp packed, */
+                       /* Bit3: 24bpp */
+                       /* Bit 6, Reserved, 2 bits, 00b */
+                       /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
+                       /* Bit 14, Reserved, 2 bits, 00b */
+} __packed;
+
+struct oaktrail_panel_descriptor_v2 {
+       u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
+                               /* 0x61190 if MIPI */
+       u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
+       u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+       u8 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */
+                                               /* Register 0x61210 */
+       struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
+       u16 Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/
+                               /*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/
+       u8 Panel_Initial_Brightness;/* [7:0] 0 - 100% */
+                       /*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/
+       u16 Panel_MIPI_Display_Descriptor;
+                       /*16 bits, Defined as follows: */
+                       /* if MIPI, 0x0000 if LVDS */
+                       /* Bit 0, Type, 2 bits, */
+                       /* 0: Type-1, */
+                       /* 1: Type-2, */
+                       /* 2: Type-3, */
+                       /* 3: Type-4 */
+                       /* Bit 2, Pixel Format, 4 bits */
+                       /* Bit0: 16bpp (not supported in LNC), */
+                       /* Bit1: 18bpp loosely packed, */
+                       /* Bit2: 18bpp packed, */
+                       /* Bit3: 24bpp */
+                       /* Bit 6, Reserved, 2 bits, 00b */
+                       /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
+                       /* Bit 14, Reserved, 2 bits, 00b */
+} __packed;
+
+union oaktrail_panel_rx {
+       struct {
+               u16 NumberOfLanes:2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
+                       /* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
+               u16 MaxLaneFreq:3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */
+               /*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/
+               u16 SupportedVideoTransferMode:2; /*0: Non-burst only */
+                                       /* 1: Burst and non-burst */
+                                       /* 2/3: Reserved */
+               u16 HSClkBehavior:1; /*0: Continuous, 1: Non-continuous*/
+               u16 DuoDisplaySupport:1; /*1 bit,0: No, 1: Yes*/
+               u16 ECC_ChecksumCapabilities:1;/*1 bit,0: No, 1: Yes*/
+               u16 BidirectionalCommunication:1;/*1 bit,0: No, 1: Yes */
+               u16 Rsvd:5;/*5 bits,00000b */
+       } panelrx;
+       u16 panel_receiver;
+} __packed;
+
+struct oaktrail_gct_v1 {
+       union { /*8 bits,Defined as follows: */
+               struct {
+                       u8 PanelType:4; /*4 bits, Bit field for panels*/
+                                       /* 0 - 3: 0 = LVDS, 1 = MIPI*/
+                                       /*2 bits,Specifies which of the*/
+                       u8 BootPanelIndex:2;
+                                       /* 4 panels to use by default*/
+                       u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
+                                       /* the 4 MIPI DSI receivers to use*/
+               } PD;
+               u8 PanelDescriptor;
+       };
+       struct oaktrail_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
+       union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
+} __packed;
+
+struct oaktrail_gct_v2 {
+       union { /*8 bits,Defined as follows: */
+               struct {
+                       u8 PanelType:4; /*4 bits, Bit field for panels*/
+                                       /* 0 - 3: 0 = LVDS, 1 = MIPI*/
+                                       /*2 bits,Specifies which of the*/
+                       u8 BootPanelIndex:2;
+                                       /* 4 panels to use by default*/
+                       u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
+                                       /* the 4 MIPI DSI receivers to use*/
+               } PD;
+               u8 PanelDescriptor;
+       };
+       struct oaktrail_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
+       union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
+} __packed;
+
+struct oaktrail_gct_data {
+       u8 bpi; /* boot panel index, number of panel used during boot */
+       u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
+       struct oaktrail_timing_info DTD; /* timing info for the selected panel */
+       u32 Panel_Port_Control;
+       u32 PP_On_Sequencing;/*1 dword,Register 0x61208,*/
+       u32 PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+       u32 PP_Cycle_Delay;
+       u16 Panel_Backlight_Inverter_Descriptor;
+       u16 Panel_MIPI_Display_Descriptor;
+} __packed;
+
+#define MODE_SETTING_IN_CRTC           0x1
+#define MODE_SETTING_IN_ENCODER                0x2
+#define MODE_SETTING_ON_GOING          0x3
+#define MODE_SETTING_IN_DSR            0x4
+#define MODE_SETTING_ENCODER_DONE      0x8
+
+#define GCT_R10_HEADER_SIZE            16
+#define GCT_R10_DISPLAY_DESC_SIZE      28
+
+/*
+ *     Moorestown HDMI interfaces
+ */
+
+struct oaktrail_hdmi_dev {
+       struct pci_dev *dev;
+       void __iomem *regs;
+       unsigned int mmio, mmio_len;
+       int dpms_mode;
+       struct hdmi_i2c_dev *i2c_dev;
+
+       /* register state */
+       u32 saveDPLL_CTRL;
+       u32 saveDPLL_DIV_CTRL;
+       u32 saveDPLL_ADJUST;
+       u32 saveDPLL_UPDATE;
+       u32 saveDPLL_CLK_ENABLE;
+       u32 savePCH_HTOTAL_B;
+       u32 savePCH_HBLANK_B;
+       u32 savePCH_HSYNC_B;
+       u32 savePCH_VTOTAL_B;
+       u32 savePCH_VBLANK_B;
+       u32 savePCH_VSYNC_B;
+       u32 savePCH_PIPEBCONF;
+       u32 savePCH_PIPEBSRC;
+};
+
+extern void oaktrail_hdmi_setup(struct drm_device *dev);
+extern void oaktrail_hdmi_teardown(struct drm_device *dev);
+extern int  oaktrail_hdmi_i2c_init(struct pci_dev *dev);
+extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
+extern void oaktrail_hdmi_save(struct drm_device *dev);
+extern void oaktrail_hdmi_restore(struct drm_device *dev);
+extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
new file mode 100644 (file)
index 0000000..9d12a3e
--- /dev/null
@@ -0,0 +1,604 @@
+/*
+ * Copyright Â© 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "power.h"
+
+struct psb_intel_range_t {
+       int min, max;
+};
+
+struct oaktrail_limit_t {
+       struct psb_intel_range_t dot, m, p1;
+};
+
+struct oaktrail_clock_t {
+       /* derived values */
+       int dot;
+       int m;
+       int p1;
+};
+
+#define MRST_LIMIT_LVDS_100L       0
+#define MRST_LIMIT_LVDS_83         1
+#define MRST_LIMIT_LVDS_100        2
+
+#define MRST_DOT_MIN             19750
+#define MRST_DOT_MAX             120000
+#define MRST_M_MIN_100L                    20
+#define MRST_M_MIN_100             10
+#define MRST_M_MIN_83              12
+#define MRST_M_MAX_100L                    34
+#define MRST_M_MAX_100             17
+#define MRST_M_MAX_83              20
+#define MRST_P1_MIN                2
+#define MRST_P1_MAX_0              7
+#define MRST_P1_MAX_1              8
+
+static const struct oaktrail_limit_t oaktrail_limits[] = {
+       {                       /* MRST_LIMIT_LVDS_100L */
+        .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+        .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
+        .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+        },
+       {                       /* MRST_LIMIT_LVDS_83L */
+        .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+        .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
+        .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
+        },
+       {                       /* MRST_LIMIT_LVDS_100 */
+        .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+        .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
+        .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+        },
+};
+
+#define MRST_M_MIN         10
+static const u32 oaktrail_m_converts[] = {
+       0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
+       0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
+       0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
+};
+
+static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
+{
+       const struct oaktrail_limit_t *limit = NULL;
+       struct drm_device *dev = crtc->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+           || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
+               switch (dev_priv->core_freq) {
+               case 100:
+                       limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
+                       break;
+               case 166:
+                       limit = &oaktrail_limits[MRST_LIMIT_LVDS_83];
+                       break;
+               case 200:
+                       limit = &oaktrail_limits[MRST_LIMIT_LVDS_100];
+                       break;
+               }
+       } else {
+               limit = NULL;
+               dev_err(dev->dev, "oaktrail_limit Wrong display type.\n");
+       }
+
+       return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock)
+{
+       clock->dot = (refclk * clock->m) / (14 * clock->p1);
+}
+
+void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock)
+{
+       pr_debug("%s: dotclock = %d,  m = %d, p1 = %d.\n",
+            prefix, clock->dot, clock->m, clock->p1);
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given refclk,
+ * or FALSE.  Divisor values are the actual divisors for
+ */
+static bool
+mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
+               struct oaktrail_clock_t *best_clock)
+{
+       struct oaktrail_clock_t clock;
+       const struct oaktrail_limit_t *limit = oaktrail_limit(crtc);
+       int err = target;
+
+       memset(best_clock, 0, sizeof(*best_clock));
+
+       for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+               for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
+                    clock.p1++) {
+                       int this_err;
+
+                       oaktrail_clock(refclk, &clock);
+
+                       this_err = abs(clock.dot - target);
+                       if (this_err < err) {
+                               *best_clock = clock;
+                               err = this_err;
+                       }
+               }
+       }
+       dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
+       return err != target;
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       u32 temp;
+       bool enabled;
+
+       if (!gma_power_begin(dev, true))
+               return;
+
+       /* XXX: When our outputs are all unaware of DPMS modes other than off
+        * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+        */
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+               /* Enable the DPLL */
+               temp = REG_READ(dpll_reg);
+               if ((temp & DPLL_VCO_ENABLE) == 0) {
+                       REG_WRITE(dpll_reg, temp);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+                       REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+                       REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+               }
+               /* Enable the pipe */
+               temp = REG_READ(pipeconf_reg);
+               if ((temp & PIPEACONF_ENABLE) == 0)
+                       REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+               /* Enable the plane */
+               temp = REG_READ(dspcntr_reg);
+               if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+                       REG_WRITE(dspcntr_reg,
+                                 temp | DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+               }
+
+               psb_intel_crtc_load_lut(crtc);
+
+               /* Give the overlay scaler a chance to enable
+                  if it's on this pipe */
+               /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+               break;
+       case DRM_MODE_DPMS_OFF:
+               /* Give the overlay scaler a chance to disable
+                * if it's on this pipe */
+               /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+               /* Disable the VGA plane that we never use */
+               REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+               /* Disable display plane */
+               temp = REG_READ(dspcntr_reg);
+               if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+                       REG_WRITE(dspcntr_reg,
+                                 temp & ~DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+                       REG_READ(dspbase_reg);
+               }
+
+               /* Next, disable display pipes */
+               temp = REG_READ(pipeconf_reg);
+               if ((temp & PIPEACONF_ENABLE) != 0) {
+                       REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+                       REG_READ(pipeconf_reg);
+               }
+               /* Wait for for the pipe disable to take effect. */
+               psb_intel_wait_for_vblank(dev);
+
+               temp = REG_READ(dpll_reg);
+               if ((temp & DPLL_VCO_ENABLE) != 0) {
+                       REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+               }
+
+               /* Wait for the clocks to turn off. */
+               udelay(150);
+               break;
+       }
+
+       enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+
+       /*Set FIFO Watermarks*/
+       REG_WRITE(DSPARB, 0x3FFF);
+       REG_WRITE(DSPFW1, 0x3F88080A);
+       REG_WRITE(DSPFW2, 0x0b060808);
+       REG_WRITE(DSPFW3, 0x0);
+       REG_WRITE(DSPFW4, 0x08030404);
+       REG_WRITE(DSPFW5, 0x04040404);
+       REG_WRITE(DSPFW6, 0x78);
+       REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
+       /* Must write Bit 14 of the Chicken Bit Register */
+
+       gma_power_end(dev);
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int oaktrail_panel_fitter_pipe(struct drm_device *dev)
+{
+       u32 pfit_control;
+
+       pfit_control = REG_READ(PFIT_CONTROL);
+
+       /* See if the panel fitter is in use */
+       if ((pfit_control & PFIT_ENABLE) == 0)
+               return -1;
+       return (pfit_control >> 29) & 3;
+}
+
+static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
+                             struct drm_display_mode *mode,
+                             struct drm_display_mode *adjusted_mode,
+                             int x, int y,
+                             struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int pipe = psb_intel_crtc->pipe;
+       int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
+       int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+       int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+       int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+       int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+       int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+       int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+       int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+       int refclk = 0;
+       struct oaktrail_clock_t clock;
+       u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+       bool ok, is_sdvo = false;
+       bool is_crt = false, is_lvds = false, is_tv = false;
+       bool is_mipi = false;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct psb_intel_encoder *psb_intel_encoder = NULL;
+       uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
+       struct drm_connector *connector;
+
+       if (!gma_power_begin(dev, true))
+               return 0;
+
+       memcpy(&psb_intel_crtc->saved_mode,
+               mode,
+               sizeof(struct drm_display_mode));
+       memcpy(&psb_intel_crtc->saved_adjusted_mode,
+               adjusted_mode,
+               sizeof(struct drm_display_mode));
+
+       list_for_each_entry(connector, &mode_config->connector_list, head) {
+               if (!connector->encoder || connector->encoder->crtc != crtc)
+                       continue;
+
+               psb_intel_encoder = psb_intel_attached_encoder(connector);
+
+               switch (psb_intel_encoder->type) {
+               case INTEL_OUTPUT_LVDS:
+                       is_lvds = true;
+                       break;
+               case INTEL_OUTPUT_SDVO:
+                       is_sdvo = true;
+                       break;
+               case INTEL_OUTPUT_TVOUT:
+                       is_tv = true;
+                       break;
+               case INTEL_OUTPUT_ANALOG:
+                       is_crt = true;
+                       break;
+               case INTEL_OUTPUT_MIPI:
+                       is_mipi = true;
+                       break;
+               }
+       }
+
+       /* Disable the VGA plane that we never use */
+       REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+       /* Disable the panel fitter if it was on our pipe */
+       if (oaktrail_panel_fitter_pipe(dev) == pipe)
+               REG_WRITE(PFIT_CONTROL, 0);
+
+       REG_WRITE(pipesrc_reg,
+                 ((mode->crtc_hdisplay - 1) << 16) |
+                 (mode->crtc_vdisplay - 1));
+
+       if (psb_intel_encoder)
+               drm_connector_property_get_value(connector,
+                       dev->mode_config.scaling_mode_property, &scalingType);
+
+       if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
+               /* Moorestown doesn't have register support for centering so
+                * we need to mess with the h/vblank and h/vsync start and
+                * ends to get centering */
+               int offsetX = 0, offsetY = 0;
+
+               offsetX = (adjusted_mode->crtc_hdisplay -
+                          mode->crtc_hdisplay) / 2;
+               offsetY = (adjusted_mode->crtc_vdisplay -
+                          mode->crtc_vdisplay) / 2;
+
+               REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+                       ((adjusted_mode->crtc_htotal - 1) << 16));
+               REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+                       ((adjusted_mode->crtc_vtotal - 1) << 16));
+               REG_WRITE(hblank_reg,
+                       (adjusted_mode->crtc_hblank_start - offsetX - 1) |
+                       ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
+               REG_WRITE(hsync_reg,
+                       (adjusted_mode->crtc_hsync_start - offsetX - 1) |
+                       ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
+               REG_WRITE(vblank_reg,
+                       (adjusted_mode->crtc_vblank_start - offsetY - 1) |
+                       ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
+               REG_WRITE(vsync_reg,
+                       (adjusted_mode->crtc_vsync_start - offsetY - 1) |
+                       ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
+       } else {
+               REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+                       ((adjusted_mode->crtc_htotal - 1) << 16));
+               REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+                       ((adjusted_mode->crtc_vtotal - 1) << 16));
+               REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+                       ((adjusted_mode->crtc_hblank_end - 1) << 16));
+               REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+                       ((adjusted_mode->crtc_hsync_end - 1) << 16));
+               REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+                       ((adjusted_mode->crtc_vblank_end - 1) << 16));
+               REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+                       ((adjusted_mode->crtc_vsync_end - 1) << 16));
+       }
+
+       /* Flush the plane changes */
+       {
+               struct drm_crtc_helper_funcs *crtc_funcs =
+                   crtc->helper_private;
+               crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+       }
+
+       /* setup pipeconf */
+       pipeconf = REG_READ(pipeconf_reg);
+
+       /* Set up the display plane register */
+       dspcntr = REG_READ(dspcntr_reg);
+       dspcntr |= DISPPLANE_GAMMA_ENABLE;
+
+       if (pipe == 0)
+               dspcntr |= DISPPLANE_SEL_PIPE_A;
+       else
+               dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+       dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE;
+       dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE;
+
+       if (is_mipi)
+               goto oaktrail_crtc_mode_set_exit;
+
+       refclk = dev_priv->core_freq * 1000;
+
+       dpll = 0;               /*BIT16 = 0 for 100MHz reference */
+
+       ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
+
+       if (!ok) {
+               dev_dbg(dev->dev, "mrstFindBestPLL fail in oaktrail_crtc_mode_set.\n");
+       } else {
+               dev_dbg(dev->dev, "oaktrail_crtc_mode_set pixel clock = %d,"
+                        "m = %x, p1 = %x.\n", clock.dot, clock.m,
+                        clock.p1);
+       }
+
+       fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
+
+       dpll |= DPLL_VGA_MODE_DIS;
+
+
+       dpll |= DPLL_VCO_ENABLE;
+
+       if (is_lvds)
+               dpll |= DPLLA_MODE_LVDS;
+       else
+               dpll |= DPLLB_MODE_DAC_SERIAL;
+
+       if (is_sdvo) {
+               int sdvo_pixel_multiply =
+                   adjusted_mode->clock / mode->clock;
+
+               dpll |= DPLL_DVO_HIGH_SPEED;
+               dpll |=
+                   (sdvo_pixel_multiply -
+                    1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+       }
+
+
+       /* compute bitmask from p1 value */
+       dpll |= (1 << (clock.p1 - 2)) << 17;
+
+       dpll |= DPLL_VCO_ENABLE;
+
+       mrstPrintPll("chosen", &clock);
+
+       if (dpll & DPLL_VCO_ENABLE) {
+               REG_WRITE(fp_reg, fp);
+               REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
+               REG_READ(dpll_reg);
+               /* Check the DPLLA lock bit PIPEACONF[29] */
+               udelay(150);
+       }
+
+       REG_WRITE(fp_reg, fp);
+       REG_WRITE(dpll_reg, dpll);
+       REG_READ(dpll_reg);
+       /* Wait for the clocks to stabilize. */
+       udelay(150);
+
+       /* write it again -- the BIOS does, after all */
+       REG_WRITE(dpll_reg, dpll);
+       REG_READ(dpll_reg);
+       /* Wait for the clocks to stabilize. */
+       udelay(150);
+
+       REG_WRITE(pipeconf_reg, pipeconf);
+       REG_READ(pipeconf_reg);
+       psb_intel_wait_for_vblank(dev);
+
+       REG_WRITE(dspcntr_reg, dspcntr);
+       psb_intel_wait_for_vblank(dev);
+
+oaktrail_crtc_mode_set_exit:
+       gma_power_end(dev);
+       return 0;
+}
+
+static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+int oaktrail_pipe_set_base(struct drm_crtc *crtc,
+                           int x, int y, struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+       int pipe = psb_intel_crtc->pipe;
+       unsigned long start, offset;
+
+       int dspbase = (pipe == 0 ? DSPALINOFF : DSPBBASE);
+       int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+       int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       u32 dspcntr;
+       int ret = 0;
+
+       /* no fb bound */
+       if (!crtc->fb) {
+               dev_dbg(dev->dev, "No FB bound\n");
+               return 0;
+       }
+
+       if (!gma_power_begin(dev, true))
+               return 0;
+
+       start = psbfb->gtt->offset;
+       offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+       REG_WRITE(dspstride, crtc->fb->pitches[0]);
+
+       dspcntr = REG_READ(dspcntr_reg);
+       dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+       switch (crtc->fb->bits_per_pixel) {
+       case 8:
+               dspcntr |= DISPPLANE_8BPP;
+               break;
+       case 16:
+               if (crtc->fb->depth == 15)
+                       dspcntr |= DISPPLANE_15_16BPP;
+               else
+                       dspcntr |= DISPPLANE_16BPP;
+               break;
+       case 24:
+       case 32:
+               dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+               break;
+       default:
+               dev_err(dev->dev, "Unknown color depth\n");
+               ret = -EINVAL;
+               goto pipe_set_base_exit;
+       }
+       REG_WRITE(dspcntr_reg, dspcntr);
+
+       REG_WRITE(dspbase, offset);
+       REG_READ(dspbase);
+       REG_WRITE(dspsurf, start);
+       REG_READ(dspsurf);
+
+pipe_set_base_exit:
+       gma_power_end(dev);
+       return ret;
+}
+
+static void oaktrail_crtc_prepare(struct drm_crtc *crtc)
+{
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void oaktrail_crtc_commit(struct drm_crtc *crtc)
+{
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
+       .dpms = oaktrail_crtc_dpms,
+       .mode_fixup = oaktrail_crtc_mode_fixup,
+       .mode_set = oaktrail_crtc_mode_set,
+       .mode_set_base = oaktrail_pipe_set_base,
+       .prepare = oaktrail_crtc_prepare,
+       .commit = oaktrail_crtc_commit,
+};
+
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
new file mode 100644 (file)
index 0000000..63aea2f
--- /dev/null
@@ -0,0 +1,512 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <asm/mrst.h>
+#include <asm/intel_scu_ipc.h>
+#include "mid_bios.h"
+#include "intel_bios.h"
+
+static int oaktrail_output_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       if (dev_priv->iLVDS_enable)
+               oaktrail_lvds_init(dev, &dev_priv->mode_dev);
+       else
+               dev_err(dev->dev, "DSI is not supported\n");
+       if (dev_priv->hdmi_priv)
+               oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
+       return 0;
+}
+
+/*
+ *     Provide the low level interfaces for the Moorestown backlight
+ */
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+#define MRST_BLC_MAX_PWM_REG_FREQ          0xFFFF
+#define BLC_PWM_PRECISION_FACTOR 100   /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+#define BLC_ADJUSTMENT_MAX 100
+
+static struct backlight_device *oaktrail_backlight_device;
+static int oaktrail_brightness;
+
+static int oaktrail_set_brightness(struct backlight_device *bd)
+{
+       struct drm_device *dev = bl_get_data(oaktrail_backlight_device);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int level = bd->props.brightness;
+       u32 blc_pwm_ctl;
+       u32 max_pwm_blc;
+
+       /* Percentage 1-100% being valid */
+       if (level < 1)
+               level = 1;
+
+       if (gma_power_begin(dev, 0)) {
+               /* Calculate and set the brightness value */
+               max_pwm_blc = REG_READ(BLC_PWM_CTL) >> 16;
+               blc_pwm_ctl = level * max_pwm_blc / 100;
+
+               /* Adjust the backlight level with the percent in
+                * dev_priv->blc_adj1;
+                */
+               blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
+               blc_pwm_ctl = blc_pwm_ctl / 100;
+
+               /* Adjust the backlight level with the percent in
+                * dev_priv->blc_adj2;
+                */
+               blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj2;
+               blc_pwm_ctl = blc_pwm_ctl / 100;
+
+               /* force PWM bit on */
+               REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
+               REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << 16) | blc_pwm_ctl);
+               gma_power_end(dev);
+       }
+       oaktrail_brightness = level;
+       return 0;
+}
+
+static int oaktrail_get_brightness(struct backlight_device *bd)
+{
+       /* return locally cached var instead of HW read (due to DPST etc.) */
+       /* FIXME: ideally return actual value in case firmware fiddled with
+          it */
+       return oaktrail_brightness;
+}
+
+static int device_backlight_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long core_clock;
+       u16 bl_max_freq;
+       uint32_t value;
+       uint32_t blc_pwm_precision_factor;
+
+       dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
+       dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
+       bl_max_freq = 256;
+       /* this needs to be set elsewhere */
+       blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
+
+       core_clock = dev_priv->core_freq;
+
+       value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+       value *= blc_pwm_precision_factor;
+       value /= bl_max_freq;
+       value /= blc_pwm_precision_factor;
+
+       if (value > (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
+                       return -ERANGE;
+
+       if (gma_power_begin(dev, false)) {
+               REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
+               REG_WRITE(BLC_PWM_CTL, value | (value << 16));
+               gma_power_end(dev);
+       }
+       return 0;
+}
+
+static const struct backlight_ops oaktrail_ops = {
+       .get_brightness = oaktrail_get_brightness,
+       .update_status  = oaktrail_set_brightness,
+};
+
+int oaktrail_backlight_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int ret;
+       struct backlight_properties props;
+
+       memset(&props, 0, sizeof(struct backlight_properties));
+       props.max_brightness = 100;
+       props.type = BACKLIGHT_PLATFORM;
+
+       oaktrail_backlight_device = backlight_device_register("oaktrail-bl",
+                               NULL, (void *)dev, &oaktrail_ops, &props);
+
+       if (IS_ERR(oaktrail_backlight_device))
+               return PTR_ERR(oaktrail_backlight_device);
+
+       ret = device_backlight_init(dev);
+       if (ret < 0) {
+               backlight_device_unregister(oaktrail_backlight_device);
+               return ret;
+       }
+       oaktrail_backlight_device->props.brightness = 100;
+       oaktrail_backlight_device->props.max_brightness = 100;
+       backlight_update_status(oaktrail_backlight_device);
+       dev_priv->backlight_device = oaktrail_backlight_device;
+       return 0;
+}
+
+#endif
+
+/*
+ *     Provide the Moorestown specific chip logic and low level methods
+ *     for power management
+ */
+
+static void oaktrail_init_pm(struct drm_device *dev)
+{
+}
+
+/**
+ *     oaktrail_save_display_registers -       save registers lost on suspend
+ *     @dev: our DRM device
+ *
+ *     Save the state we need in order to be able to restore the interface
+ *     upon resume from suspend
+ */
+static int oaktrail_save_display_registers(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int i;
+       u32 pp_stat;
+
+       /* Display arbitration control + watermarks */
+       dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
+       dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
+       dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
+       dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
+       dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
+       dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
+       dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
+       dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+
+       /* Pipe & plane A info */
+       dev_priv->savePIPEACONF = PSB_RVDC32(PIPEACONF);
+       dev_priv->savePIPEASRC = PSB_RVDC32(PIPEASRC);
+       dev_priv->saveFPA0 = PSB_RVDC32(MRST_FPA0);
+       dev_priv->saveFPA1 = PSB_RVDC32(MRST_FPA1);
+       dev_priv->saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
+       dev_priv->saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
+       dev_priv->saveHBLANK_A = PSB_RVDC32(HBLANK_A);
+       dev_priv->saveHSYNC_A = PSB_RVDC32(HSYNC_A);
+       dev_priv->saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
+       dev_priv->saveVBLANK_A = PSB_RVDC32(VBLANK_A);
+       dev_priv->saveVSYNC_A = PSB_RVDC32(VSYNC_A);
+       dev_priv->saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
+       dev_priv->saveDSPACNTR = PSB_RVDC32(DSPACNTR);
+       dev_priv->saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
+       dev_priv->saveDSPAADDR = PSB_RVDC32(DSPABASE);
+       dev_priv->saveDSPASURF = PSB_RVDC32(DSPASURF);
+       dev_priv->saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
+       dev_priv->saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
+
+       /* Save cursor regs */
+       dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
+       dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
+       dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
+
+       /* Save palette (gamma) */
+       for (i = 0; i < 256; i++)
+               dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2));
+
+       if (dev_priv->hdmi_priv)
+               oaktrail_hdmi_save(dev);
+
+       /* Save performance state */
+       dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
+
+       /* LVDS state */
+       dev_priv->savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
+       dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
+       dev_priv->savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
+       dev_priv->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
+       dev_priv->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
+       dev_priv->saveLVDS = PSB_RVDC32(LVDS);
+       dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
+       dev_priv->savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
+       dev_priv->savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
+       dev_priv->savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
+
+       /* HW overlay */
+       dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
+       dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
+       dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
+       dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
+       dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
+       dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
+       dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
+
+       /* DPST registers */
+       dev_priv->saveHISTOGRAM_INT_CONTROL_REG =
+                                       PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+       dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG =
+                                       PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+       dev_priv->savePWM_CONTROL_LOGIC = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+       if (dev_priv->iLVDS_enable) {
+               /* Shut down the panel */
+               PSB_WVDC32(0, PP_CONTROL);
+
+               do {
+                       pp_stat = PSB_RVDC32(PP_STATUS);
+               } while (pp_stat & 0x80000000);
+
+               /* Turn off the plane */
+               PSB_WVDC32(0x58000000, DSPACNTR);
+               /* Trigger the plane disable */
+               PSB_WVDC32(0, DSPASURF);
+
+               /* Wait ~4 ticks */
+               msleep(4);
+
+               /* Turn off pipe */
+               PSB_WVDC32(0x0, PIPEACONF);
+               /* Wait ~8 ticks */
+               msleep(8);
+
+               /* Turn off PLLs */
+               PSB_WVDC32(0, MRST_DPLL_A);
+       }
+       return 0;
+}
+
+/**
+ *     oaktrail_restore_display_registers      -       restore lost register state
+ *     @dev: our DRM device
+ *
+ *     Restore register state that was lost during suspend and resume.
+ */
+static int oaktrail_restore_display_registers(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 pp_stat;
+       int i;
+
+       /* Display arbitration + watermarks */
+       PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
+       PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
+       PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
+       PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
+       PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
+       PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
+       PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
+       PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
+
+       /* Make sure VGA plane is off. it initializes to on after reset!*/
+       PSB_WVDC32(0x80000000, VGACNTRL);
+
+       /* set the plls */
+       PSB_WVDC32(dev_priv->saveFPA0, MRST_FPA0);
+       PSB_WVDC32(dev_priv->saveFPA1, MRST_FPA1);
+
+       /* Actually enable it */
+       PSB_WVDC32(dev_priv->saveDPLL_A, MRST_DPLL_A);
+       DRM_UDELAY(150);
+
+       /* Restore mode */
+       PSB_WVDC32(dev_priv->saveHTOTAL_A, HTOTAL_A);
+       PSB_WVDC32(dev_priv->saveHBLANK_A, HBLANK_A);
+       PSB_WVDC32(dev_priv->saveHSYNC_A, HSYNC_A);
+       PSB_WVDC32(dev_priv->saveVTOTAL_A, VTOTAL_A);
+       PSB_WVDC32(dev_priv->saveVBLANK_A, VBLANK_A);
+       PSB_WVDC32(dev_priv->saveVSYNC_A, VSYNC_A);
+       PSB_WVDC32(dev_priv->savePIPEASRC, PIPEASRC);
+       PSB_WVDC32(dev_priv->saveBCLRPAT_A, BCLRPAT_A);
+
+       /* Restore performance mode*/
+       PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE);
+
+       /* Enable the pipe*/
+       if (dev_priv->iLVDS_enable)
+               PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
+
+       /* Set up the plane*/
+       PSB_WVDC32(dev_priv->saveDSPALINOFF, DSPALINOFF);
+       PSB_WVDC32(dev_priv->saveDSPASTRIDE, DSPASTRIDE);
+       PSB_WVDC32(dev_priv->saveDSPATILEOFF, DSPATILEOFF);
+
+       /* Enable the plane */
+       PSB_WVDC32(dev_priv->saveDSPACNTR, DSPACNTR);
+       PSB_WVDC32(dev_priv->saveDSPASURF, DSPASURF);
+
+       /* Enable Cursor A */
+       PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
+       PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
+       PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
+
+       /* Restore palette (gamma) */
+       for (i = 0; i < 256; i++)
+               PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i << 2));
+
+       if (dev_priv->hdmi_priv)
+               oaktrail_hdmi_restore(dev);
+
+       if (dev_priv->iLVDS_enable) {
+               PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
+               PSB_WVDC32(dev_priv->saveLVDS, LVDS); /*port 61180h*/
+               PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
+               PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
+               PSB_WVDC32(dev_priv->savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
+               PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
+               PSB_WVDC32(dev_priv->savePP_ON_DELAYS, LVDSPP_ON);
+               PSB_WVDC32(dev_priv->savePP_OFF_DELAYS, LVDSPP_OFF);
+               PSB_WVDC32(dev_priv->savePP_DIVISOR, PP_CYCLE);
+               PSB_WVDC32(dev_priv->savePP_CONTROL, PP_CONTROL);
+       }
+
+       /* Wait for cycle delay */
+       do {
+               pp_stat = PSB_RVDC32(PP_STATUS);
+       } while (pp_stat & 0x08000000);
+
+       /* Wait for panel power up */
+       do {
+               pp_stat = PSB_RVDC32(PP_STATUS);
+       } while (pp_stat & 0x10000000);
+
+       /* Restore HW overlay */
+       PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
+       PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
+       PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
+       PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
+       PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
+       PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
+       PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
+
+       /* DPST registers */
+       PSB_WVDC32(dev_priv->saveHISTOGRAM_INT_CONTROL_REG,
+                                               HISTOGRAM_INT_CONTROL);
+       PSB_WVDC32(dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG,
+                                               HISTOGRAM_LOGIC_CONTROL);
+       PSB_WVDC32(dev_priv->savePWM_CONTROL_LOGIC, PWM_CONTROL_LOGIC);
+
+       return 0;
+}
+
+/**
+ *     oaktrail_power_down     -       power down the display island
+ *     @dev: our DRM device
+ *
+ *     Power down the display interface of our device
+ */
+static int oaktrail_power_down(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 pwr_mask ;
+       u32 pwr_sts;
+
+       pwr_mask = PSB_PWRGT_DISPLAY_MASK;
+       outl(pwr_mask, dev_priv->ospm_base + PSB_PM_SSC);
+
+       while (true) {
+               pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+               if ((pwr_sts & pwr_mask) == pwr_mask)
+                       break;
+               else
+                       udelay(10);
+       }
+       return 0;
+}
+
+/*
+ * oaktrail_power_up
+ *
+ * Restore power to the specified island(s) (powergating)
+ */
+static int oaktrail_power_up(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
+       u32 pwr_sts, pwr_cnt;
+
+       pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
+       pwr_cnt &= ~pwr_mask;
+       outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
+
+       while (true) {
+               pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+               if ((pwr_sts & pwr_mask) == 0)
+                       break;
+               else
+                       udelay(10);
+       }
+       return 0;
+}
+
+
+static int oaktrail_chip_setup(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
+       int ret;
+       
+       ret = mid_chip_setup(dev);
+       if (ret < 0)
+               return ret;
+       if (vbt->size == 0) {
+               /* Now pull the BIOS data */
+               gma_intel_opregion_init(dev);
+               psb_intel_init_bios(dev);
+       }
+       return 0;
+}
+
+static void oaktrail_teardown(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
+
+       oaktrail_hdmi_teardown(dev);
+       if (vbt->size == 0)
+               psb_intel_destroy_bios(dev);
+}
+
+const struct psb_ops oaktrail_chip_ops = {
+       .name = "Oaktrail",
+       .accel_2d = 1,
+       .pipes = 2,
+       .crtcs = 2,
+       .sgx_offset = MRST_SGX_OFFSET,
+
+       .chip_setup = oaktrail_chip_setup,
+       .chip_teardown = oaktrail_teardown,
+       .crtc_helper = &oaktrail_helper_funcs,
+       .crtc_funcs = &psb_intel_crtc_funcs,
+
+       .output_init = oaktrail_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+       .backlight_init = oaktrail_backlight_init,
+#endif
+
+       .init_pm = oaktrail_init_pm,
+       .save_regs = oaktrail_save_display_registers,
+       .restore_regs = oaktrail_restore_display_registers,
+       .power_down = oaktrail_power_down,
+       .power_up = oaktrail_power_up,
+
+       .i2c_bus = 1,
+};
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
new file mode 100644 (file)
index 0000000..36878a6
--- /dev/null
@@ -0,0 +1,859 @@
+/*
+ * Copyright Â© 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Li Peng <peng.li@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_drv.h"
+
+#define HDMI_READ(reg)         readl(hdmi_dev->regs + (reg))
+#define HDMI_WRITE(reg, val)   writel(val, hdmi_dev->regs + (reg))
+
+#define HDMI_HCR       0x1000
+#define HCR_ENABLE_HDCP                (1 << 5)
+#define HCR_ENABLE_AUDIO       (1 << 2)
+#define HCR_ENABLE_PIXEL       (1 << 1)
+#define HCR_ENABLE_TMDS                (1 << 0)
+
+#define HDMI_HICR      0x1004
+#define HDMI_HSR       0x1008
+#define HDMI_HISR      0x100C
+#define HDMI_DETECT_HDP                (1 << 0)
+
+#define HDMI_VIDEO_REG 0x3000
+#define HDMI_UNIT_EN           (1 << 7)
+#define HDMI_MODE_OUTPUT       (1 << 0)
+#define HDMI_HBLANK_A  0x3100
+
+#define HDMI_AUDIO_CTRL        0x4000
+#define HDMI_ENABLE_AUDIO      (1 << 0)
+
+#define PCH_HTOTAL_B   0x3100
+#define PCH_HBLANK_B   0x3104
+#define PCH_HSYNC_B    0x3108
+#define PCH_VTOTAL_B   0x310C
+#define PCH_VBLANK_B   0x3110
+#define PCH_VSYNC_B    0x3114
+#define PCH_PIPEBSRC   0x311C
+
+#define PCH_PIPEB_DSL  0x3800
+#define PCH_PIPEB_SLC  0x3804
+#define PCH_PIPEBCONF  0x3808
+#define PCH_PIPEBSTAT  0x3824
+
+#define CDVO_DFT       0x5000
+#define CDVO_SLEWRATE  0x5004
+#define CDVO_STRENGTH  0x5008
+#define CDVO_RCOMP     0x500C
+
+#define DPLL_CTRL       0x6000
+#define DPLL_PDIV_SHIFT                16
+#define DPLL_PDIV_MASK         (0xf << 16)
+#define DPLL_PWRDN             (1 << 4)
+#define DPLL_RESET             (1 << 3)
+#define DPLL_FASTEN            (1 << 2)
+#define DPLL_ENSTAT            (1 << 1)
+#define DPLL_DITHEN            (1 << 0)
+
+#define DPLL_DIV_CTRL   0x6004
+#define DPLL_CLKF_MASK         0xffffffc0
+#define DPLL_CLKR_MASK         (0x3f)
+
+#define DPLL_CLK_ENABLE 0x6008
+#define DPLL_EN_DISP           (1 << 31)
+#define DPLL_SEL_HDMI          (1 << 8)
+#define DPLL_EN_HDMI           (1 << 1)
+#define DPLL_EN_VGA            (1 << 0)
+
+#define DPLL_ADJUST     0x600C
+#define DPLL_STATUS     0x6010
+#define DPLL_UPDATE     0x6014
+#define DPLL_DFT        0x6020
+
+struct intel_range {
+       int     min, max;
+};
+
+struct oaktrail_hdmi_limit {
+       struct intel_range vco, np, nr, nf;
+};
+
+struct oaktrail_hdmi_clock {
+       int np;
+       int nr;
+       int nf;
+       int dot;
+};
+
+#define VCO_MIN                320000
+#define VCO_MAX                1650000
+#define        NP_MIN          1
+#define        NP_MAX          15
+#define        NR_MIN          1
+#define        NR_MAX          64
+#define NF_MIN         2
+#define NF_MAX         4095
+
+static const struct oaktrail_hdmi_limit oaktrail_hdmi_limit = {
+       .vco = { .min = VCO_MIN,                .max = VCO_MAX },
+       .np  = { .min = NP_MIN,                 .max = NP_MAX  },
+       .nr  = { .min = NR_MIN,                 .max = NR_MAX  },
+       .nf  = { .min = NF_MIN,                 .max = NF_MAX  },
+};
+
+static void wait_for_vblank(struct drm_device *dev)
+{
+       /* FIXME: Can we do this as a sleep ? */
+       /* Wait for 20ms, i.e. one cycle at 50hz. */
+       mdelay(20);
+}
+
+static void scu_busy_loop(void *scu_base)
+{
+       u32 status = 0;
+       u32 loop_count = 0;
+
+       status = readl(scu_base + 0x04);
+       while (status & 1) {
+               udelay(1); /* scu processing time is in few u secods */
+               status = readl(scu_base + 0x04);
+               loop_count++;
+               /* break if scu doesn't reset busy bit after huge retry */
+               if (loop_count > 1000) {
+                       DRM_DEBUG_KMS("SCU IPC timed out");
+                       return;
+               }
+       }
+}
+
+static void oaktrail_hdmi_reset(struct drm_device *dev)
+{
+       void *base;
+       /* FIXME: at least make these defines */
+       unsigned int scu_ipc_mmio = 0xff11c000;
+       int scu_len = 1024;
+
+       base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
+       if (base == NULL) {
+               DRM_ERROR("failed to map SCU mmio\n");
+               return;
+       }
+
+       /* scu ipc: assert hdmi controller reset */
+       writel(0xff11d118, base + 0x0c);
+       writel(0x7fffffdf, base + 0x80);
+       writel(0x42005, base + 0x0);
+       scu_busy_loop(base);
+
+       /* scu ipc: de-assert hdmi controller reset */
+       writel(0xff11d118, base + 0x0c);
+       writel(0x7fffffff, base + 0x80);
+       writel(0x42005, base + 0x0);
+       scu_busy_loop(base);
+
+       iounmap(base);
+}
+
+static void oaktrail_hdmi_audio_enable(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+
+       HDMI_WRITE(HDMI_HCR, 0x67);
+       HDMI_READ(HDMI_HCR);
+
+       HDMI_WRITE(0x51a8, 0x10);
+       HDMI_READ(0x51a8);
+
+       HDMI_WRITE(HDMI_AUDIO_CTRL, 0x1);
+       HDMI_READ(HDMI_AUDIO_CTRL);
+}
+
+static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+
+       HDMI_WRITE(0x51a8, 0x0);
+       HDMI_READ(0x51a8);
+
+       HDMI_WRITE(HDMI_AUDIO_CTRL, 0x0);
+       HDMI_READ(HDMI_AUDIO_CTRL);
+
+       HDMI_WRITE(HDMI_HCR, 0x47);
+       HDMI_READ(HDMI_HCR);
+}
+
+void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct drm_device *dev = crtc->dev;
+       u32 temp;
+
+       switch (mode) {
+       case DRM_MODE_DPMS_OFF:
+               /* Disable VGACNTRL */
+               REG_WRITE(VGACNTRL, 0x80000000);
+
+               /* Disable plane */
+               temp = REG_READ(DSPBCNTR);
+               if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+                       REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
+                       REG_READ(DSPBCNTR);
+                       /* Flush the plane changes */
+                       REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+                       REG_READ(DSPBSURF);
+               }
+
+               /* Disable pipe B */
+               temp = REG_READ(PIPEBCONF);
+               if ((temp & PIPEACONF_ENABLE) != 0) {
+                       REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+                       REG_READ(PIPEBCONF);
+               }
+
+               /* Disable LNW Pipes, etc */
+               temp = REG_READ(PCH_PIPEBCONF);
+               if ((temp & PIPEACONF_ENABLE) != 0) {
+                       REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+                       REG_READ(PCH_PIPEBCONF);
+               }
+               /* wait for pipe off */
+               udelay(150);
+               /* Disable dpll */
+               temp = REG_READ(DPLL_CTRL);
+               if ((temp & DPLL_PWRDN) == 0) {
+                       REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
+                       REG_WRITE(DPLL_STATUS, 0x1);
+               }
+               /* wait for dpll off */
+               udelay(150);
+               break;
+       case DRM_MODE_DPMS_ON:
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+               /* Enable dpll */
+               temp = REG_READ(DPLL_CTRL);
+               if ((temp & DPLL_PWRDN) != 0) {
+                       REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
+                       temp = REG_READ(DPLL_CLK_ENABLE);
+                       REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
+                       REG_READ(DPLL_CLK_ENABLE);
+               }
+               /* wait for dpll warm up */
+               udelay(150);
+
+               /* Enable pipe B */
+               temp = REG_READ(PIPEBCONF);
+               if ((temp & PIPEACONF_ENABLE) == 0) {
+                       REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
+                       REG_READ(PIPEBCONF);
+               }
+
+               /* Enable LNW Pipe B */
+               temp = REG_READ(PCH_PIPEBCONF);
+               if ((temp & PIPEACONF_ENABLE) == 0) {
+                       REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
+                       REG_READ(PCH_PIPEBCONF);
+               }
+               wait_for_vblank(dev);
+
+               /* Enable plane */
+               temp = REG_READ(DSPBCNTR);
+               if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+                       REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+                       REG_READ(DSPBSURF);
+               }
+               psb_intel_crtc_load_lut(crtc);
+       }
+       /* DSPARB */
+       REG_WRITE(DSPARB, 0x00003fbf);
+       /* FW1 */
+       REG_WRITE(0x70034, 0x3f880a0a);
+       /* FW2 */
+       REG_WRITE(0x70038, 0x0b060808);
+       /* FW4 */
+       REG_WRITE(0x70050, 0x08030404);
+       /* FW5 */
+       REG_WRITE(0x70054, 0x04040404);
+       /* LNC Chicken Bits */
+       REG_WRITE(0x70400, 0x4000);
+}
+
+
+static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
+{
+       static int dpms_mode = -1;
+
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+       u32 temp;
+
+       if (dpms_mode == mode)
+               return;
+
+       if (mode != DRM_MODE_DPMS_ON)
+               temp = 0x0;
+       else
+               temp = 0x99;
+
+       dpms_mode = mode;
+       HDMI_WRITE(HDMI_VIDEO_REG, temp);
+}
+
+static unsigned int htotal_calculate(struct drm_display_mode *mode)
+{
+       u32 htotal, new_crtc_htotal;
+
+       htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
+
+       /*
+        * 1024 x 768  new_crtc_htotal = 0x1024;
+        * 1280 x 1024 new_crtc_htotal = 0x0c34;
+        */
+       new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
+
+       return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
+}
+
+static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
+                               int refclk, struct oaktrail_hdmi_clock *best_clock)
+{
+       int np_min, np_max, nr_min, nr_max;
+       int np, nr, nf;
+
+       np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
+       np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
+       if (np_min < oaktrail_hdmi_limit.np.min)
+               np_min = oaktrail_hdmi_limit.np.min;
+       if (np_max > oaktrail_hdmi_limit.np.max)
+               np_max = oaktrail_hdmi_limit.np.max;
+
+       nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
+       nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
+       if (nr_min < oaktrail_hdmi_limit.nr.min)
+               nr_min = oaktrail_hdmi_limit.nr.min;
+       if (nr_max > oaktrail_hdmi_limit.nr.max)
+               nr_max = oaktrail_hdmi_limit.nr.max;
+
+       np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
+       nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
+       nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
+       DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
+
+       /*
+        * 1024 x 768  np = 1; nr = 0x26; nf = 0x0fd8000;
+        * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
+        */
+       best_clock->np = np;
+       best_clock->nr = nr - 1;
+       best_clock->nf = (nf << 14);
+}
+
+int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
+                           struct drm_display_mode *mode,
+                           struct drm_display_mode *adjusted_mode,
+                           int x, int y,
+                           struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+       int pipe = 1;
+       int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+       int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+       int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+       int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+       int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+       int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+       int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+       int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+       int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       int refclk;
+       struct oaktrail_hdmi_clock clock;
+       u32 dspcntr, pipeconf, dpll, temp;
+       int dspcntr_reg = DSPBCNTR;
+
+       /* Disable the VGA plane that we never use */
+       REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+       /* XXX: Disable the panel fitter if it was on our pipe */
+
+       /* Disable dpll if necessary */
+       dpll = REG_READ(DPLL_CTRL);
+       if ((dpll & DPLL_PWRDN) == 0) {
+               REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
+               REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
+               REG_WRITE(DPLL_STATUS, 0x1);
+       }
+       udelay(150);
+
+       /* reset controller: FIXME - can we sort out the ioremap mess ? */
+       iounmap(hdmi_dev->regs);
+       oaktrail_hdmi_reset(dev);
+
+       /* program and enable dpll */
+       refclk = 25000;
+       oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
+
+       /* Setting DPLL */
+       dpll = REG_READ(DPLL_CTRL);
+       dpll &= ~DPLL_PDIV_MASK;
+       dpll &= ~(DPLL_PWRDN | DPLL_RESET);
+       REG_WRITE(DPLL_CTRL, 0x00000008);
+       REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
+       REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
+       REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
+       REG_WRITE(DPLL_UPDATE, 0x80000000);
+       REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
+       udelay(150);
+
+       hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
+       if (hdmi_dev->regs == NULL) {
+               DRM_ERROR("failed to do hdmi mmio mapping\n");
+               return -ENOMEM;
+       }
+
+       /* configure HDMI */
+       HDMI_WRITE(0x1004, 0x1fd);
+       HDMI_WRITE(0x2000, 0x1);
+       HDMI_WRITE(0x2008, 0x0);
+       HDMI_WRITE(0x3130, 0x8);
+       HDMI_WRITE(0x101c, 0x1800810);
+
+       temp = htotal_calculate(adjusted_mode);
+       REG_WRITE(htot_reg, temp);
+       REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+       REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+       REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+       REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+       REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+       REG_WRITE(pipesrc_reg,
+               ((mode->crtc_hdisplay - 1) << 16) |  (mode->crtc_vdisplay - 1));
+
+       REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+       REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+       REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+       REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+       REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+       REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+       REG_WRITE(PCH_PIPEBSRC,
+               ((mode->crtc_hdisplay - 1) << 16) |  (mode->crtc_vdisplay - 1));
+
+       temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+       HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) |  temp);
+
+       REG_WRITE(dspsize_reg,
+                       ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+       REG_WRITE(dsppos_reg, 0);
+
+       /* Flush the plane changes */
+       {
+               struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+               crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+       }
+
+       /* Set up the display plane register */
+       dspcntr = REG_READ(dspcntr_reg);
+       dspcntr |= DISPPLANE_GAMMA_ENABLE;
+       dspcntr |= DISPPLANE_SEL_PIPE_B;
+       dspcntr |= DISPLAY_PLANE_ENABLE;
+
+       /* setup pipeconf */
+       pipeconf = REG_READ(pipeconf_reg);
+       pipeconf |= PIPEACONF_ENABLE;
+
+       REG_WRITE(pipeconf_reg, pipeconf);
+       REG_READ(pipeconf_reg);
+
+       REG_WRITE(PCH_PIPEBCONF, pipeconf);
+       REG_READ(PCH_PIPEBCONF);
+       wait_for_vblank(dev);
+
+       REG_WRITE(dspcntr_reg, dspcntr);
+       wait_for_vblank(dev);
+
+       return 0;
+}
+
+static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
+                               struct drm_display_mode *mode)
+{
+       if (mode->clock > 165000)
+               return MODE_CLOCK_HIGH;
+       if (mode->clock < 20000)
+               return MODE_CLOCK_LOW;
+
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       return MODE_OK;
+}
+
+static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder,
+                                struct drm_display_mode *mode,
+                                struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static enum drm_connector_status
+oaktrail_hdmi_detect(struct drm_connector *connector, bool force)
+{
+       enum drm_connector_status status;
+       struct drm_device *dev = connector->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+       u32 temp;
+
+       temp = HDMI_READ(HDMI_HSR);
+       DRM_DEBUG_KMS("HDMI_HSR %x\n", temp);
+
+       if ((temp & HDMI_DETECT_HDP) != 0)
+               status = connector_status_connected;
+       else
+               status = connector_status_disconnected;
+
+       return status;
+}
+
+static const unsigned char raw_edid[] = {
+       0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x10, 0xac, 0x2f, 0xa0,
+       0x53, 0x55, 0x33, 0x30, 0x16, 0x13, 0x01, 0x03, 0x0e, 0x3a, 0x24, 0x78,
+       0xea, 0xe9, 0xf5, 0xac, 0x51, 0x30, 0xb4, 0x25, 0x11, 0x50, 0x54, 0xa5,
+       0x4b, 0x00, 0x81, 0x80, 0xa9, 0x40, 0x71, 0x4f, 0xb3, 0x00, 0x01, 0x01,
+       0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
+       0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x46, 0x6c, 0x21, 0x00, 0x00, 0x1a,
+       0x00, 0x00, 0x00, 0xff, 0x00, 0x47, 0x4e, 0x37, 0x32, 0x31, 0x39, 0x35,
+       0x52, 0x30, 0x33, 0x55, 0x53, 0x0a, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x44,
+       0x45, 0x4c, 0x4c, 0x20, 0x32, 0x37, 0x30, 0x39, 0x57, 0x0a, 0x20, 0x20,
+       0x00, 0x00, 0x00, 0xfd, 0x00, 0x38, 0x4c, 0x1e, 0x53, 0x11, 0x00, 0x0a,
+       0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x8d
+};
+
+static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct i2c_adapter *i2c_adap;
+       struct edid *edid;
+       struct drm_display_mode *mode, *t;
+       int i = 0, ret = 0;
+
+       i2c_adap = i2c_get_adapter(3);
+       if (i2c_adap == NULL) {
+               DRM_ERROR("No ddc adapter available!\n");
+               edid = (struct edid *)raw_edid;
+       } else {
+               edid = (struct edid *)raw_edid;
+               /* FIXME ? edid = drm_get_edid(connector, i2c_adap); */
+       }
+
+       if (edid) {
+               drm_mode_connector_update_edid_property(connector, edid);
+               ret = drm_add_edid_modes(connector, edid);
+               connector->display_info.raw_edid = NULL;
+       }
+
+       /*
+        * prune modes that require frame buffer bigger than stolen mem
+        */
+       list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+               if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
+                       i++;
+                       drm_mode_remove(connector, mode);
+               }
+       }
+       return ret - i;
+}
+
+static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+
+       oaktrail_hdmi_audio_enable(dev);
+       return;
+}
+
+static void oaktrail_hdmi_destroy(struct drm_connector *connector)
+{
+       return;
+}
+
+static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
+       .dpms = oaktrail_hdmi_dpms,
+       .mode_fixup = oaktrail_hdmi_mode_fixup,
+       .prepare = psb_intel_encoder_prepare,
+       .mode_set = oaktrail_hdmi_mode_set,
+       .commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_helper_funcs
+                                       oaktrail_hdmi_connector_helper_funcs = {
+       .get_modes = oaktrail_hdmi_get_modes,
+       .mode_valid = oaktrail_hdmi_mode_valid,
+       .best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .detect = oaktrail_hdmi_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = oaktrail_hdmi_destroy,
+};
+
+static void oaktrail_hdmi_enc_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
+       .destroy = oaktrail_hdmi_enc_destroy,
+};
+
+void oaktrail_hdmi_init(struct drm_device *dev,
+                                       struct psb_intel_mode_device *mode_dev)
+{
+       struct psb_intel_encoder *psb_intel_encoder;
+       struct psb_intel_connector *psb_intel_connector;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+
+       psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+       if (!psb_intel_encoder)
+               return;
+
+       psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+       if (!psb_intel_connector)
+               goto failed_connector;
+
+       connector = &psb_intel_connector->base;
+       encoder = &psb_intel_encoder->base;
+       drm_connector_init(dev, connector,
+                          &oaktrail_hdmi_connector_funcs,
+                          DRM_MODE_CONNECTOR_DVID);
+
+       drm_encoder_init(dev, encoder,
+                        &oaktrail_hdmi_enc_funcs,
+                        DRM_MODE_ENCODER_TMDS);
+
+       psb_intel_connector_attach_encoder(psb_intel_connector,
+                                          psb_intel_encoder);
+
+       psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
+       drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs);
+       drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs);
+
+       connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+       connector->interlace_allowed = false;
+       connector->doublescan_allowed = false;
+       drm_sysfs_connector_add(connector);
+
+       return;
+
+failed_connector:
+       kfree(psb_intel_encoder);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
+       {}
+};
+
+void oaktrail_hdmi_setup(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct pci_dev *pdev;
+       struct oaktrail_hdmi_dev *hdmi_dev;
+       int ret;
+
+       pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x080d, NULL);
+       if (!pdev)
+               return;
+
+       hdmi_dev = kzalloc(sizeof(struct oaktrail_hdmi_dev), GFP_KERNEL);
+       if (!hdmi_dev) {
+               dev_err(dev->dev, "failed to allocate memory\n");
+               goto out;
+       }
+
+
+       ret = pci_enable_device(pdev);
+       if (ret) {
+               dev_err(dev->dev, "failed to enable hdmi controller\n");
+               goto free;
+       }
+
+       hdmi_dev->mmio = pci_resource_start(pdev, 0);
+       hdmi_dev->mmio_len = pci_resource_len(pdev, 0);
+       hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
+       if (!hdmi_dev->regs) {
+               dev_err(dev->dev, "failed to map hdmi mmio\n");
+               goto free;
+       }
+
+       hdmi_dev->dev = pdev;
+       pci_set_drvdata(pdev, hdmi_dev);
+
+       /* Initialize i2c controller */
+       ret = oaktrail_hdmi_i2c_init(hdmi_dev->dev);
+       if (ret)
+               dev_err(dev->dev, "HDMI I2C initialization failed\n");
+
+       dev_priv->hdmi_priv = hdmi_dev;
+       oaktrail_hdmi_audio_disable(dev);
+       return;
+
+free:
+       kfree(hdmi_dev);
+out:
+       return;
+}
+
+void oaktrail_hdmi_teardown(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+       struct pci_dev *pdev;
+
+       if (hdmi_dev) {
+               pdev = hdmi_dev->dev;
+               pci_set_drvdata(pdev, NULL);
+               oaktrail_hdmi_i2c_exit(pdev);
+               iounmap(hdmi_dev->regs);
+               kfree(hdmi_dev);
+               pci_dev_put(pdev);
+       }
+}
+
+/* save HDMI register state */
+void oaktrail_hdmi_save(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+       int i;
+
+       /* dpll */
+       hdmi_dev->saveDPLL_CTRL = PSB_RVDC32(DPLL_CTRL);
+       hdmi_dev->saveDPLL_DIV_CTRL = PSB_RVDC32(DPLL_DIV_CTRL);
+       hdmi_dev->saveDPLL_ADJUST = PSB_RVDC32(DPLL_ADJUST);
+       hdmi_dev->saveDPLL_UPDATE = PSB_RVDC32(DPLL_UPDATE);
+       hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
+
+       /* pipe B */
+       dev_priv->savePIPEBCONF = PSB_RVDC32(PIPEBCONF);
+       dev_priv->savePIPEBSRC  = PSB_RVDC32(PIPEBSRC);
+       dev_priv->saveHTOTAL_B  = PSB_RVDC32(HTOTAL_B);
+       dev_priv->saveHBLANK_B  = PSB_RVDC32(HBLANK_B);
+       dev_priv->saveHSYNC_B   = PSB_RVDC32(HSYNC_B);
+       dev_priv->saveVTOTAL_B  = PSB_RVDC32(VTOTAL_B);
+       dev_priv->saveVBLANK_B  = PSB_RVDC32(VBLANK_B);
+       dev_priv->saveVSYNC_B   = PSB_RVDC32(VSYNC_B);
+
+       hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
+       hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
+       hdmi_dev->savePCH_HTOTAL_B = PSB_RVDC32(PCH_HTOTAL_B);
+       hdmi_dev->savePCH_HBLANK_B = PSB_RVDC32(PCH_HBLANK_B);
+       hdmi_dev->savePCH_HSYNC_B  = PSB_RVDC32(PCH_HSYNC_B);
+       hdmi_dev->savePCH_VTOTAL_B = PSB_RVDC32(PCH_VTOTAL_B);
+       hdmi_dev->savePCH_VBLANK_B = PSB_RVDC32(PCH_VBLANK_B);
+       hdmi_dev->savePCH_VSYNC_B  = PSB_RVDC32(PCH_VSYNC_B);
+
+       /* plane */
+       dev_priv->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR);
+       dev_priv->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE);
+       dev_priv->saveDSPBADDR = PSB_RVDC32(DSPBBASE);
+       dev_priv->saveDSPBSURF = PSB_RVDC32(DSPBSURF);
+       dev_priv->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF);
+       dev_priv->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF);
+
+       /* cursor B */
+       dev_priv->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
+       dev_priv->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
+       dev_priv->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
+
+       /* save palette */
+       for (i = 0; i < 256; i++)
+               dev_priv->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2));
+}
+
+/* restore HDMI register state */
+void oaktrail_hdmi_restore(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+       int i;
+
+       /* dpll */
+       PSB_WVDC32(hdmi_dev->saveDPLL_CTRL, DPLL_CTRL);
+       PSB_WVDC32(hdmi_dev->saveDPLL_DIV_CTRL, DPLL_DIV_CTRL);
+       PSB_WVDC32(hdmi_dev->saveDPLL_ADJUST, DPLL_ADJUST);
+       PSB_WVDC32(hdmi_dev->saveDPLL_UPDATE, DPLL_UPDATE);
+       PSB_WVDC32(hdmi_dev->saveDPLL_CLK_ENABLE, DPLL_CLK_ENABLE);
+       DRM_UDELAY(150);
+
+       /* pipe */
+       PSB_WVDC32(dev_priv->savePIPEBSRC, PIPEBSRC);
+       PSB_WVDC32(dev_priv->saveHTOTAL_B, HTOTAL_B);
+       PSB_WVDC32(dev_priv->saveHBLANK_B, HBLANK_B);
+       PSB_WVDC32(dev_priv->saveHSYNC_B,  HSYNC_B);
+       PSB_WVDC32(dev_priv->saveVTOTAL_B, VTOTAL_B);
+       PSB_WVDC32(dev_priv->saveVBLANK_B, VBLANK_B);
+       PSB_WVDC32(dev_priv->saveVSYNC_B,  VSYNC_B);
+
+       PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
+       PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
+       PSB_WVDC32(hdmi_dev->savePCH_HBLANK_B, PCH_HBLANK_B);
+       PSB_WVDC32(hdmi_dev->savePCH_HSYNC_B,  PCH_HSYNC_B);
+       PSB_WVDC32(hdmi_dev->savePCH_VTOTAL_B, PCH_VTOTAL_B);
+       PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
+       PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B,  PCH_VSYNC_B);
+
+       PSB_WVDC32(dev_priv->savePIPEBCONF, PIPEBCONF);
+       PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
+
+       /* plane */
+       PSB_WVDC32(dev_priv->saveDSPBLINOFF, DSPBLINOFF);
+       PSB_WVDC32(dev_priv->saveDSPBSTRIDE, DSPBSTRIDE);
+       PSB_WVDC32(dev_priv->saveDSPBTILEOFF, DSPBTILEOFF);
+       PSB_WVDC32(dev_priv->saveDSPBCNTR, DSPBCNTR);
+       PSB_WVDC32(dev_priv->saveDSPBSURF, DSPBSURF);
+
+       /* cursor B */
+       PSB_WVDC32(dev_priv->saveDSPBCURSOR_CTRL, CURBCNTR);
+       PSB_WVDC32(dev_priv->saveDSPBCURSOR_POS, CURBPOS);
+       PSB_WVDC32(dev_priv->saveDSPBCURSOR_BASE, CURBBASE);
+
+       /* restore palette */
+       for (i = 0; i < 256; i++)
+               PSB_WVDC32(dev_priv->save_palette_b[i], PALETTE_B + (i << 2));
+}
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
new file mode 100644 (file)
index 0000000..7054408
--- /dev/null
@@ -0,0 +1,328 @@
+/*
+ * Copyright Â© 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Li Peng <peng.li@intel.com>
+ */
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include "psb_drv.h"
+
+#define HDMI_READ(reg)         readl(hdmi_dev->regs + (reg))
+#define HDMI_WRITE(reg, val)   writel(val, hdmi_dev->regs + (reg))
+
+#define HDMI_HCR       0x1000
+#define HCR_DETECT_HDP         (1 << 6)
+#define HCR_ENABLE_HDCP                (1 << 5)
+#define HCR_ENABLE_AUDIO       (1 << 2)
+#define HCR_ENABLE_PIXEL       (1 << 1)
+#define HCR_ENABLE_TMDS                (1 << 0)
+#define HDMI_HICR      0x1004
+#define HDMI_INTR_I2C_ERROR    (1 << 4)
+#define HDMI_INTR_I2C_FULL     (1 << 3)
+#define HDMI_INTR_I2C_DONE     (1 << 2)
+#define HDMI_INTR_HPD          (1 << 0)
+#define HDMI_HSR       0x1008
+#define HDMI_HISR      0x100C
+#define HDMI_HI2CRDB0  0x1200
+#define HDMI_HI2CHCR   0x1240
+#define HI2C_HDCP_WRITE                (0 << 2)
+#define HI2C_HDCP_RI_READ      (1 << 2)
+#define HI2C_HDCP_READ         (2 << 2)
+#define HI2C_EDID_READ         (3 << 2)
+#define HI2C_READ_CONTINUE     (1 << 1)
+#define HI2C_ENABLE_TRANSACTION        (1 << 0)
+
+#define HDMI_ICRH      0x1100
+#define HDMI_HI2CTDR0  0x1244
+#define HDMI_HI2CTDR1  0x1248
+
+#define I2C_STAT_INIT          0
+#define I2C_READ_DONE          1
+#define I2C_TRANSACTION_DONE   2
+
+struct hdmi_i2c_dev {
+       struct i2c_adapter *adap;
+       struct mutex i2c_lock;
+       struct completion complete;
+       int status;
+       struct i2c_msg *msg;
+       int buf_offset;
+};
+
+static void hdmi_i2c_irq_enable(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+       u32 temp;
+
+       temp = HDMI_READ(HDMI_HICR);
+       temp |= (HDMI_INTR_I2C_ERROR | HDMI_INTR_I2C_FULL | HDMI_INTR_I2C_DONE);
+       HDMI_WRITE(HDMI_HICR, temp);
+       HDMI_READ(HDMI_HICR);
+}
+
+static void hdmi_i2c_irq_disable(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+       HDMI_WRITE(HDMI_HICR, 0x0);
+       HDMI_READ(HDMI_HICR);
+}
+
+static int xfer_read(struct i2c_adapter *adap, struct i2c_msg *pmsg)
+{
+       struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+       struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+       u32 temp;
+
+       i2c_dev->status = I2C_STAT_INIT;
+       i2c_dev->msg = pmsg;
+       i2c_dev->buf_offset = 0;
+       INIT_COMPLETION(i2c_dev->complete);
+
+       /* Enable I2C transaction */
+       temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION;
+       HDMI_WRITE(HDMI_HI2CHCR, temp);
+       HDMI_READ(HDMI_HI2CHCR);
+
+       while (i2c_dev->status != I2C_TRANSACTION_DONE)
+               wait_for_completion_interruptible_timeout(&i2c_dev->complete,
+                                                               10 * HZ);
+
+       return 0;
+}
+
+static int xfer_write(struct i2c_adapter *adap, struct i2c_msg *pmsg)
+{
+       /*
+        * XXX: i2c write seems isn't useful for EDID probe, don't do anything
+        */
+       return 0;
+}
+
+static int oaktrail_hdmi_i2c_access(struct i2c_adapter *adap,
+                               struct i2c_msg *pmsg,
+                               int num)
+{
+       struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+       struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+       int i, err = 0;
+
+       mutex_lock(&i2c_dev->i2c_lock);
+
+       /* Enable i2c unit */
+       HDMI_WRITE(HDMI_ICRH, 0x00008760);
+
+       /* Enable irq */
+       hdmi_i2c_irq_enable(hdmi_dev);
+       for (i = 0; i < num; i++) {
+               if (pmsg->len && pmsg->buf) {
+                       if (pmsg->flags & I2C_M_RD)
+                               err = xfer_read(adap, pmsg);
+                       else
+                               err = xfer_write(adap, pmsg);
+               }
+               pmsg++;         /* next message */
+       }
+
+       /* Disable irq */
+       hdmi_i2c_irq_disable(hdmi_dev);
+
+       mutex_unlock(&i2c_dev->i2c_lock);
+
+       return i;
+}
+
+static u32 oaktrail_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm oaktrail_hdmi_i2c_algorithm = {
+       .master_xfer    = oaktrail_hdmi_i2c_access,
+       .functionality  = oaktrail_hdmi_i2c_func,
+};
+
+static struct i2c_adapter oaktrail_hdmi_i2c_adapter = {
+       .name           = "oaktrail_hdmi_i2c",
+       .nr             = 3,
+       .owner          = THIS_MODULE,
+       .class          = I2C_CLASS_DDC,
+       .algo           = &oaktrail_hdmi_i2c_algorithm,
+};
+
+static void hdmi_i2c_read(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+       struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+       struct i2c_msg *msg = i2c_dev->msg;
+       u8 *buf = msg->buf;
+       u32 temp;
+       int i, offset;
+
+       offset = i2c_dev->buf_offset;
+       for (i = 0; i < 0x10; i++) {
+               temp = HDMI_READ(HDMI_HI2CRDB0 + (i * 4));
+               memcpy(buf + (offset + i * 4), &temp, 4);
+       }
+       i2c_dev->buf_offset += (0x10 * 4);
+
+       /* clearing read buffer full intr */
+       temp = HDMI_READ(HDMI_HISR);
+       HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_FULL);
+       HDMI_READ(HDMI_HISR);
+
+       /* continue read transaction */
+       temp = HDMI_READ(HDMI_HI2CHCR);
+       HDMI_WRITE(HDMI_HI2CHCR, temp | HI2C_READ_CONTINUE);
+       HDMI_READ(HDMI_HI2CHCR);
+
+       i2c_dev->status = I2C_READ_DONE;
+       return;
+}
+
+static void hdmi_i2c_transaction_done(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+       struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+       u32 temp;
+
+       /* clear transaction done intr */
+       temp = HDMI_READ(HDMI_HISR);
+       HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_DONE);
+       HDMI_READ(HDMI_HISR);
+
+
+       temp = HDMI_READ(HDMI_HI2CHCR);
+       HDMI_WRITE(HDMI_HI2CHCR, temp & ~HI2C_ENABLE_TRANSACTION);
+       HDMI_READ(HDMI_HI2CHCR);
+
+       i2c_dev->status = I2C_TRANSACTION_DONE;
+       return;
+}
+
+static irqreturn_t oaktrail_hdmi_i2c_handler(int this_irq, void *dev)
+{
+       struct oaktrail_hdmi_dev *hdmi_dev = dev;
+       struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+       u32 stat;
+
+       stat = HDMI_READ(HDMI_HISR);
+
+       if (stat & HDMI_INTR_HPD) {
+               HDMI_WRITE(HDMI_HISR, stat | HDMI_INTR_HPD);
+               HDMI_READ(HDMI_HISR);
+       }
+
+       if (stat & HDMI_INTR_I2C_FULL)
+               hdmi_i2c_read(hdmi_dev);
+
+       if (stat & HDMI_INTR_I2C_DONE)
+               hdmi_i2c_transaction_done(hdmi_dev);
+
+       complete(&i2c_dev->complete);
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * choose alternate function 2 of GPIO pin 52, 53,
+ * which is used by HDMI I2C logic
+ */
+static void oaktrail_hdmi_i2c_gpio_fix(void)
+{
+       void *base;
+       unsigned int gpio_base = 0xff12c000;
+       int gpio_len = 0x1000;
+       u32 temp;
+
+       base = ioremap((resource_size_t)gpio_base, gpio_len);
+       if (base == NULL) {
+               DRM_ERROR("gpio ioremap fail\n");
+               return;
+       }
+
+       temp = readl(base + 0x44);
+       DRM_DEBUG_DRIVER("old gpio val %x\n", temp);
+       writel((temp | 0x00000a00), (base +  0x44));
+       temp = readl(base + 0x44);
+       DRM_DEBUG_DRIVER("new gpio val %x\n", temp);
+
+       iounmap(base);
+}
+
+int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
+{
+       struct oaktrail_hdmi_dev *hdmi_dev;
+       struct hdmi_i2c_dev *i2c_dev;
+       int ret;
+
+       hdmi_dev = pci_get_drvdata(dev);
+
+       i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
+       if (i2c_dev == NULL) {
+               DRM_ERROR("Can't allocate interface\n");
+               ret = -ENOMEM;
+               goto exit;
+       }
+
+       i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
+       i2c_dev->status = I2C_STAT_INIT;
+       init_completion(&i2c_dev->complete);
+       mutex_init(&i2c_dev->i2c_lock);
+       i2c_set_adapdata(&oaktrail_hdmi_i2c_adapter, hdmi_dev);
+       hdmi_dev->i2c_dev = i2c_dev;
+
+       /* Enable HDMI I2C function on gpio */
+       oaktrail_hdmi_i2c_gpio_fix();
+
+       /* request irq */
+       ret = request_irq(dev->irq, oaktrail_hdmi_i2c_handler, IRQF_SHARED,
+                         oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
+       if (ret) {
+               DRM_ERROR("Failed to request IRQ for I2C controller\n");
+               goto err;
+       }
+
+       /* Adapter registration */
+       ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
+       return ret;
+
+err:
+       kfree(i2c_dev);
+exit:
+       return ret;
+}
+
+void oaktrail_hdmi_i2c_exit(struct pci_dev *dev)
+{
+       struct oaktrail_hdmi_dev *hdmi_dev;
+       struct hdmi_i2c_dev *i2c_dev;
+
+       hdmi_dev = pci_get_drvdata(dev);
+       if (i2c_del_adapter(&oaktrail_hdmi_i2c_adapter))
+               DRM_DEBUG_DRIVER("Failed to delete hdmi-i2c adapter\n");
+
+       i2c_dev = hdmi_dev->i2c_dev;
+       kfree(i2c_dev);
+       free_irq(dev->irq, hdmi_dev);
+}
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
new file mode 100644 (file)
index 0000000..238bbe1
--- /dev/null
@@ -0,0 +1,449 @@
+/*
+ * Copyright Â© 2006-2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ *     Dave Airlie <airlied@linux.ie>
+ *     Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+#include <asm/mrst.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+/* The max/min PWM frequency in BPCR[31:17] - */
+/* The smallest number is 1 (not 0) that can fit in the
+ * 15-bit field of the and then*/
+/* shifts to the left by one bit to get the actual 16-bit
+ * value that the 15-bits correspond to.*/
+#define MRST_BLC_MAX_PWM_REG_FREQ          0xFFFF
+#define BRIGHTNESS_MAX_LEVEL 100
+
+/**
+ * Sets the power state for the panel.
+ */
+static void oaktrail_lvds_set_power(struct drm_device *dev,
+                               struct psb_intel_encoder *psb_intel_encoder,
+                               bool on)
+{
+       u32 pp_status;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (!gma_power_begin(dev, true))
+               return;
+
+       if (on) {
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+                         POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
+               dev_priv->is_lvds_on = true;
+               if (dev_priv->ops->lvds_bl_power)
+                       dev_priv->ops->lvds_bl_power(dev, true);
+       } else {
+               if (dev_priv->ops->lvds_bl_power)
+                       dev_priv->ops->lvds_bl_power(dev, false);
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+                         ~POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while (pp_status & PP_ON);
+               dev_priv->is_lvds_on = false;
+               pm_request_idle(&dev->pdev->dev);
+       }
+       gma_power_end(dev);
+}
+
+static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                               to_psb_intel_encoder(encoder);
+
+       if (mode == DRM_MODE_DPMS_ON)
+               oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
+       else
+               oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
+
+       /* XXX: We never power down the LVDS pairs. */
+}
+
+static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_connector *connector = NULL;
+       struct drm_crtc *crtc = encoder->crtc;
+       u32 lvds_port;
+       uint64_t v = DRM_MODE_SCALE_FULLSCREEN;
+
+       if (!gma_power_begin(dev, true))
+               return;
+
+       /*
+        * The LVDS pin pair will already have been turned on in the
+        * psb_intel_crtc_mode_set since it has a large impact on the DPLL
+        * settings.
+        */
+       lvds_port = (REG_READ(LVDS) &
+                   (~LVDS_PIPEB_SELECT)) |
+                   LVDS_PORT_EN |
+                   LVDS_BORDER_EN;
+
+       /* If the firmware says dither on Moorestown, or the BIOS does
+          on Oaktrail then enable dithering */
+       if (mode_dev->panel_wants_dither || dev_priv->lvds_dither)
+               lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
+
+       REG_WRITE(LVDS, lvds_port);
+
+       /* Find the connector we're trying to set up */
+       list_for_each_entry(connector, &mode_config->connector_list, head) {
+               if (!connector->encoder || connector->encoder->crtc != crtc)
+                       continue;
+       }
+
+       if (!connector) {
+               DRM_ERROR("Couldn't find connector when setting mode");
+               return;
+       }
+
+       drm_connector_property_get_value(
+               connector,
+               dev->mode_config.scaling_mode_property,
+               &v);
+
+       if (v == DRM_MODE_SCALE_NO_SCALE)
+               REG_WRITE(PFIT_CONTROL, 0);
+       else if (v == DRM_MODE_SCALE_ASPECT) {
+               if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) ||
+                   (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
+                       if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) ==
+                           (mode->hdisplay * adjusted_mode->crtc_vdisplay))
+                               REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+                       else if ((adjusted_mode->crtc_hdisplay *
+                               mode->vdisplay) > (mode->hdisplay *
+                               adjusted_mode->crtc_vdisplay))
+                               REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
+                                         PFIT_SCALING_MODE_PILLARBOX);
+                       else
+                               REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
+                                         PFIT_SCALING_MODE_LETTERBOX);
+               } else
+                       REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+       } else /*(v == DRM_MODE_SCALE_FULLSCREEN)*/
+               REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+
+       gma_power_end(dev);
+}
+
+static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                               to_psb_intel_encoder(encoder);
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+       if (!gma_power_begin(dev, true))
+               return;
+
+       mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+       mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+                                         BACKLIGHT_DUTY_CYCLE_MASK);
+       oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
+       gma_power_end(dev);
+}
+
+static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 ret;
+
+       if (gma_power_begin(dev, false)) {
+               ret = ((REG_READ(BLC_PWM_CTL) &
+                         BACKLIGHT_MODULATION_FREQ_MASK) >>
+                         BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+               gma_power_end(dev);
+       } else
+               ret = ((dev_priv->saveBLC_PWM_CTL &
+                         BACKLIGHT_MODULATION_FREQ_MASK) >>
+                         BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+       return ret;
+}
+
+static void oaktrail_lvds_commit(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                               to_psb_intel_encoder(encoder);
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+       if (mode_dev->backlight_duty_cycle == 0)
+               mode_dev->backlight_duty_cycle =
+                                       oaktrail_lvds_get_max_backlight(dev);
+       oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
+}
+
+static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
+       .dpms = oaktrail_lvds_dpms,
+       .mode_fixup = psb_intel_lvds_mode_fixup,
+       .prepare = oaktrail_lvds_prepare,
+       .mode_set = oaktrail_lvds_mode_set,
+       .commit = oaktrail_lvds_commit,
+};
+
+static struct drm_display_mode lvds_configuration_modes[] = {
+       /* hard coded fixed mode for TPO LTPS LPJ040K001A */
+       { DRM_MODE("800x480",  DRM_MODE_TYPE_DRIVER, 33264, 800, 836,
+                  846, 1056, 0, 480, 489, 491, 525, 0, 0) },
+       /* hard coded fixed mode for LVDS 800x480 */
+       { DRM_MODE("800x480",  DRM_MODE_TYPE_DRIVER, 30994, 800, 801,
+                  802, 1024, 0, 480, 481, 482, 525, 0, 0) },
+       /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
+       { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1072,
+                  1104, 1184, 0, 600, 603, 604, 608, 0, 0) },
+       /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
+       { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1104,
+                  1136, 1184, 0, 600, 603, 604, 608, 0, 0) },
+       /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
+       { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 48885, 1024, 1124,
+                  1204, 1312, 0, 600, 607, 610, 621, 0, 0) },
+       /* hard coded fixed mode for LVDS 1024x768 */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+                  1184, 1344, 0, 768, 771, 777, 806, 0, 0) },
+       /* hard coded fixed mode for LVDS 1366x768 */
+       { DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 77500, 1366, 1430,
+                  1558, 1664, 0, 768, 769, 770, 776, 0, 0) },
+};
+
+/* Returns the panel fixed mode from configuration. */
+
+static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
+                                       struct psb_intel_mode_device *mode_dev)
+{
+       struct drm_display_mode *mode = NULL;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
+
+       mode_dev->panel_fixed_mode = NULL;
+
+       /* Use the firmware provided data on Moorestown */
+       if (dev_priv->vbt_data.size != 0x00) { /*if non-zero, then use vbt*/
+               mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+               if (!mode)
+                       return;
+
+               mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+               mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+               mode->hsync_start = mode->hdisplay + \
+                               ((ti->hsync_offset_hi << 8) | \
+                               ti->hsync_offset_lo);
+               mode->hsync_end = mode->hsync_start + \
+                               ((ti->hsync_pulse_width_hi << 8) | \
+                               ti->hsync_pulse_width_lo);
+               mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
+                                                       ti->hblank_lo);
+               mode->vsync_start = \
+                       mode->vdisplay + ((ti->vsync_offset_hi << 4) | \
+                                               ti->vsync_offset_lo);
+               mode->vsync_end = \
+                       mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \
+                                               ti->vsync_pulse_width_lo);
+               mode->vtotal = mode->vdisplay + \
+                               ((ti->vblank_hi << 8) | ti->vblank_lo);
+               mode->clock = ti->pixel_clock * 10;
+#if 0
+               printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
+               printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
+               printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
+               printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
+               printk(KERN_INFO "htotal is %d\n", mode->htotal);
+               printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
+               printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
+               printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
+               printk(KERN_INFO "clock is %d\n", mode->clock);
+#endif
+               mode_dev->panel_fixed_mode = mode;
+       }
+
+       /* Use the BIOS VBT mode if available */
+       if (mode_dev->panel_fixed_mode == NULL && mode_dev->vbt_mode)
+               mode_dev->panel_fixed_mode = drm_mode_duplicate(dev,
+                                               mode_dev->vbt_mode);
+
+       /* Then try the LVDS VBT mode */
+       if (mode_dev->panel_fixed_mode == NULL)
+               if (dev_priv->lfp_lvds_vbt_mode)
+                       mode_dev->panel_fixed_mode =
+                               drm_mode_duplicate(dev,
+                                       dev_priv->lfp_lvds_vbt_mode);
+       /* Then guess */
+       if (mode_dev->panel_fixed_mode == NULL)
+               mode_dev->panel_fixed_mode
+                       = drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
+
+       drm_mode_set_name(mode_dev->panel_fixed_mode);
+       drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);
+}
+
+/**
+ * oaktrail_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void oaktrail_lvds_init(struct drm_device *dev,
+                   struct psb_intel_mode_device *mode_dev)
+{
+       struct psb_intel_encoder *psb_intel_encoder;
+       struct psb_intel_connector *psb_intel_connector;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct edid *edid;
+       int ret = 0;
+       struct i2c_adapter *i2c_adap;
+       struct drm_display_mode *scan;  /* *modes, *bios_mode; */
+
+       psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+       if (!psb_intel_encoder)
+               return;
+
+       psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+       if (!psb_intel_connector)
+               goto failed_connector;
+
+       connector = &psb_intel_connector->base;
+       encoder = &psb_intel_encoder->base;
+       dev_priv->is_lvds_on = true;
+       drm_connector_init(dev, connector,
+                          &psb_intel_lvds_connector_funcs,
+                          DRM_MODE_CONNECTOR_LVDS);
+
+       drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
+                        DRM_MODE_ENCODER_LVDS);
+
+       psb_intel_connector_attach_encoder(psb_intel_connector,
+                                          psb_intel_encoder);
+       psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+       drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs);
+       drm_connector_helper_add(connector,
+                                &psb_intel_lvds_connector_helper_funcs);
+       connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+       connector->interlace_allowed = false;
+       connector->doublescan_allowed = false;
+
+       drm_connector_attach_property(connector,
+                                       dev->mode_config.scaling_mode_property,
+                                       DRM_MODE_SCALE_FULLSCREEN);
+       drm_connector_attach_property(connector,
+                                       dev_priv->backlight_property,
+                                       BRIGHTNESS_MAX_LEVEL);
+
+       mode_dev->panel_wants_dither = false;
+       if (dev_priv->vbt_data.size != 0x00)
+               mode_dev->panel_wants_dither = (dev_priv->gct_data.
+                       Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
+        if (dev_priv->lvds_dither)
+                mode_dev->panel_wants_dither = 1;
+
+       /*
+        * LVDS discovery:
+        * 1) check for EDID on DDC
+        * 2) check for VBT data
+        * 3) check to see if LVDS is already on
+        *    if none of the above, no panel
+        * 4) make sure lid is open
+        *    if closed, act like it's not there for now
+        */
+
+       i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
+       if (i2c_adap == NULL)
+               dev_err(dev->dev, "No ddc adapter available!\n");
+       /*
+        * Attempt to get the fixed panel mode from DDC.  Assume that the
+        * preferred mode is the right one.
+        */
+       if (i2c_adap) {
+               edid = drm_get_edid(connector, i2c_adap);
+               if (edid) {
+                       drm_mode_connector_update_edid_property(connector,
+                                                                       edid);
+                       ret = drm_add_edid_modes(connector, edid);
+                       kfree(edid);
+               }
+
+               list_for_each_entry(scan, &connector->probed_modes, head) {
+                       if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+                               mode_dev->panel_fixed_mode =
+                                   drm_mode_duplicate(dev, scan);
+                               goto out;       /* FIXME: check for quirks */
+                       }
+               }
+       }
+       /*
+        * If we didn't get EDID, try geting panel timing
+        * from configuration data
+        */
+       oaktrail_lvds_get_configuration_mode(dev, mode_dev);
+
+       if (mode_dev->panel_fixed_mode) {
+               mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+               goto out;       /* FIXME: check for quirks */
+       }
+
+       /* If we still don't have a mode after all that, give up. */
+       if (!mode_dev->panel_fixed_mode) {
+               dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
+               goto failed_find;
+       }
+
+out:
+       drm_sysfs_connector_add(connector);
+       return;
+
+failed_find:
+       dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
+       if (psb_intel_encoder->ddc_bus)
+               psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+
+/* failed_ddc: */
+
+       drm_encoder_cleanup(encoder);
+       drm_connector_cleanup(connector);
+       kfree(psb_intel_connector);
+failed_connector:
+       kfree(psb_intel_encoder);
+}
+
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
new file mode 100644 (file)
index 0000000..9402569
--- /dev/null
@@ -0,0 +1,316 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Benjamin Defnet <benjamin.r.defnet@intel.com>
+ *    Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ * Massively reworked
+ *    Alan Cox <alan@linux.intel.com>
+ */
+
+#include "power.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+
+static struct mutex power_mutex;       /* Serialize power ops */
+static spinlock_t power_ctrl_lock;     /* Serialize power claim */
+
+/**
+ *     gma_power_init          -       initialise power manager
+ *     @dev: our device
+ *
+ *     Set up for power management tracking of our hardware.
+ */
+void gma_power_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       /* FIXME: Move APM/OSPM base into relevant device code */
+       dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
+       dev_priv->ospm_base &= 0xffff;
+
+       dev_priv->display_power = true; /* We start active */
+       dev_priv->display_count = 0;    /* Currently no users */
+       dev_priv->suspended = false;    /* And not suspended */
+       spin_lock_init(&power_ctrl_lock);
+       mutex_init(&power_mutex);
+
+       dev_priv->ops->init_pm(dev);
+}
+
+/**
+ *     gma_power_uninit        -       end power manager
+ *     @dev: device to end for
+ *
+ *     Undo the effects of gma_power_init
+ */
+void gma_power_uninit(struct drm_device *dev)
+{
+       pm_runtime_disable(&dev->pdev->dev);
+       pm_runtime_set_suspended(&dev->pdev->dev);
+}
+
+/**
+ *     gma_suspend_display     -       suspend the display logic
+ *     @dev: our DRM device
+ *
+ *     Suspend the display logic of the graphics interface
+ */
+static void gma_suspend_display(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (dev_priv->suspended)
+               return;
+       dev_priv->ops->save_regs(dev);
+       dev_priv->ops->power_down(dev);
+       dev_priv->display_power = false;
+}
+
+/**
+ *     gma_resume_display      -       resume display side logic
+ *
+ *     Resume the display hardware restoring state and enabling
+ *     as necessary.
+ */
+static void gma_resume_display(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (dev_priv->suspended == false)
+               return;
+
+       /* turn on the display power island */
+       dev_priv->ops->power_up(dev);
+       dev_priv->suspended = false;
+       dev_priv->display_power = true;
+
+       PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+       pci_write_config_word(pdev, PSB_GMCH_CTRL,
+                       dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
+       dev_priv->ops->restore_regs(dev);
+}
+
+/**
+ *     gma_suspend_pci         -       suspend PCI side
+ *     @pdev: PCI device
+ *
+ *     Perform the suspend processing on our PCI device state
+ */
+static void gma_suspend_pci(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int bsm, vbt;
+
+       if (dev_priv->suspended)
+               return;
+
+       pci_save_state(pdev);
+       pci_read_config_dword(pdev, 0x5C, &bsm);
+       dev_priv->saveBSM = bsm;
+       pci_read_config_dword(pdev, 0xFC, &vbt);
+       dev_priv->saveVBT = vbt;
+       pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
+       pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
+
+       pci_disable_device(pdev);
+       pci_set_power_state(pdev, PCI_D3hot);
+
+       dev_priv->suspended = true;
+}
+
+/**
+ *     gma_resume_pci          -       resume helper
+ *     @dev: our PCI device
+ *
+ *     Perform the resume processing on our PCI device state - rewrite
+ *     register state and re-enable the PCI device
+ */
+static bool gma_resume_pci(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int ret;
+
+       if (!dev_priv->suspended)
+               return true;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       pci_write_config_dword(pdev, 0x5c, dev_priv->saveBSM);
+       pci_write_config_dword(pdev, 0xFC, dev_priv->saveVBT);
+       /* restoring MSI address and data in PCIx space */
+       pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
+       pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
+       ret = pci_enable_device(pdev);
+
+       if (ret != 0)
+               dev_err(&pdev->dev, "pci_enable failed: %d\n", ret);
+       else
+               dev_priv->suspended = false;
+       return !dev_priv->suspended;
+}
+
+/**
+ *     gma_power_suspend               -       bus callback for suspend
+ *     @pdev: our PCI device
+ *     @state: suspend type
+ *
+ *     Called back by the PCI layer during a suspend of the system. We
+ *     perform the necessary shut down steps and save enough state that
+ *     we can undo this when resume is called.
+ */
+int gma_power_suspend(struct device *_dev)
+{
+       struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       mutex_lock(&power_mutex);
+       if (!dev_priv->suspended) {
+               if (dev_priv->display_count) {
+                       mutex_unlock(&power_mutex);
+                       return -EBUSY;
+               }
+               psb_irq_uninstall(dev);
+               gma_suspend_display(dev);
+               gma_suspend_pci(pdev);
+       }
+       mutex_unlock(&power_mutex);
+       return 0;
+}
+
+/**
+ *     gma_power_resume                -       resume power
+ *     @pdev: PCI device
+ *
+ *     Resume the PCI side of the graphics and then the displays
+ */
+int gma_power_resume(struct device *_dev)
+{
+       struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
+       struct drm_device *dev = pci_get_drvdata(pdev);
+
+       mutex_lock(&power_mutex);
+       gma_resume_pci(pdev);
+       gma_resume_display(pdev);
+       psb_irq_preinstall(dev);
+       psb_irq_postinstall(dev);
+       mutex_unlock(&power_mutex);
+       return 0;
+}
+
+/**
+ *     gma_power_is_on         -       returne true if power is on
+ *     @dev: our DRM device
+ *
+ *     Returns true if the display island power is on at this moment
+ */
+bool gma_power_is_on(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       return dev_priv->display_power;
+}
+
+/**
+ *     gma_power_begin         -       begin requiring power
+ *     @dev: our DRM device
+ *     @force_on: true to force power on
+ *
+ *     Begin an action that requires the display power island is enabled.
+ *     We refcount the islands.
+ */
+bool gma_power_begin(struct drm_device *dev, bool force_on)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&power_ctrl_lock, flags);
+       /* Power already on ? */
+       if (dev_priv->display_power) {
+               dev_priv->display_count++;
+               pm_runtime_get(&dev->pdev->dev);
+               spin_unlock_irqrestore(&power_ctrl_lock, flags);
+               return true;
+       }
+       if (force_on == false)
+               goto out_false;
+
+       /* Ok power up needed */
+       ret = gma_resume_pci(dev->pdev);
+       if (ret == 0) {
+               psb_irq_preinstall(dev);
+               psb_irq_postinstall(dev);
+               pm_runtime_get(&dev->pdev->dev);
+               dev_priv->display_count++;
+               spin_unlock_irqrestore(&power_ctrl_lock, flags);
+               return true;
+       }
+out_false:
+       spin_unlock_irqrestore(&power_ctrl_lock, flags);
+       return false;
+}
+
+/**
+ *     gma_power_end           -       end use of power
+ *     @dev: Our DRM device
+ *
+ *     Indicate that one of our gma_power_begin() requested periods when
+ *     the diplay island power is needed has completed.
+ */
+void gma_power_end(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long flags;
+       spin_lock_irqsave(&power_ctrl_lock, flags);
+       dev_priv->display_count--;
+       WARN_ON(dev_priv->display_count < 0);
+       spin_unlock_irqrestore(&power_ctrl_lock, flags);
+       pm_runtime_put(&dev->pdev->dev);
+}
+
+int psb_runtime_suspend(struct device *dev)
+{
+       return gma_power_suspend(dev);
+}
+
+int psb_runtime_resume(struct device *dev)
+{
+       return gma_power_resume(dev);;
+}
+
+int psb_runtime_idle(struct device *dev)
+{
+       struct drm_device *drmdev = pci_get_drvdata(to_pci_dev(dev));
+       struct drm_psb_private *dev_priv = drmdev->dev_private;
+       if (dev_priv->display_count)
+               return 0;
+       else
+               return 1;
+}
diff --git a/drivers/gpu/drm/gma500/power.h b/drivers/gpu/drm/gma500/power.h
new file mode 100644 (file)
index 0000000..1969d2e
--- /dev/null
@@ -0,0 +1,67 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Benjamin Defnet <benjamin.r.defnet@intel.com>
+ *    Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ * Massively reworked
+ *    Alan Cox <alan@linux.intel.com>
+ */
+#ifndef _PSB_POWERMGMT_H_
+#define _PSB_POWERMGMT_H_
+
+#include <linux/pci.h>
+#include <drm/drmP.h>
+
+void gma_power_init(struct drm_device *dev);
+void gma_power_uninit(struct drm_device *dev);
+
+/*
+ * The kernel bus power management  will call these functions
+ */
+int gma_power_suspend(struct device *dev);
+int gma_power_resume(struct device *dev);
+
+/*
+ * These are the functions the driver should use to wrap all hw access
+ * (i.e. register reads and writes)
+ */
+bool gma_power_begin(struct drm_device *dev, bool force);
+void gma_power_end(struct drm_device *dev);
+
+/*
+ * Use this function to do an instantaneous check for if the hw is on.
+ * Only use this in cases where you know the mutex is already held such
+ * as in irq install/uninstall and you need to
+ * prevent a deadlock situation.  Otherwise use gma_power_begin().
+ */
+bool gma_power_is_on(struct drm_device *dev);
+
+/*
+ * GFX-Runtime PM callbacks
+ */
+int psb_runtime_suspend(struct device *dev);
+int psb_runtime_resume(struct device *dev);
+int psb_runtime_idle(struct device *dev);
+
+#endif /*_PSB_POWERMGMT_H_*/
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
new file mode 100644 (file)
index 0000000..e5f5906
--- /dev/null
@@ -0,0 +1,328 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+
+
+static int psb_output_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       psb_intel_lvds_init(dev, &dev_priv->mode_dev);
+       psb_intel_sdvo_init(dev, SDVOB);
+       return 0;
+}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+/*
+ *     Poulsbo Backlight Interfaces
+ */
+
+#define BLC_PWM_PRECISION_FACTOR 100   /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+
+#define PSB_BLC_PWM_PRECISION_FACTOR    10
+#define PSB_BLC_MAX_PWM_REG_FREQ        0xFFFE
+#define PSB_BLC_MIN_PWM_REG_FREQ        0x2
+
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT    (16)
+
+static int psb_brightness;
+static struct backlight_device *psb_backlight_device;
+
+static int psb_get_brightness(struct backlight_device *bd)
+{
+       /* return locally cached var instead of HW read (due to DPST etc.) */
+       /* FIXME: ideally return actual value in case firmware fiddled with
+          it */
+       return psb_brightness;
+}
+
+
+static int psb_backlight_setup(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long core_clock;
+       /* u32 bl_max_freq; */
+       /* unsigned long value; */
+       u16 bl_max_freq;
+       uint32_t value;
+       uint32_t blc_pwm_precision_factor;
+
+       /* get bl_max_freq and pol from dev_priv*/
+       if (!dev_priv->lvds_bl) {
+               dev_err(dev->dev, "Has no valid LVDS backlight info\n");
+               return -ENOENT;
+       }
+       bl_max_freq = dev_priv->lvds_bl->freq;
+       blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
+
+       core_clock = dev_priv->core_freq;
+
+       value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+       value *= blc_pwm_precision_factor;
+       value /= bl_max_freq;
+       value /= blc_pwm_precision_factor;
+
+       if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
+                value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
+                               return -ERANGE;
+       else {
+               value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+               REG_WRITE(BLC_PWM_CTL,
+                       (value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (value));
+       }
+       return 0;
+}
+
+static int psb_set_brightness(struct backlight_device *bd)
+{
+       struct drm_device *dev = bl_get_data(psb_backlight_device);
+       int level = bd->props.brightness;
+
+       /* Percentage 1-100% being valid */
+       if (level < 1)
+               level = 1;
+
+       psb_intel_lvds_set_brightness(dev, level);
+       psb_brightness = level;
+       return 0;
+}
+
+static const struct backlight_ops psb_ops = {
+       .get_brightness = psb_get_brightness,
+       .update_status  = psb_set_brightness,
+};
+
+static int psb_backlight_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int ret;
+       struct backlight_properties props;
+
+       memset(&props, 0, sizeof(struct backlight_properties));
+       props.max_brightness = 100;
+       props.type = BACKLIGHT_PLATFORM;
+
+       psb_backlight_device = backlight_device_register("psb-bl",
+                                       NULL, (void *)dev, &psb_ops, &props);
+       if (IS_ERR(psb_backlight_device))
+               return PTR_ERR(psb_backlight_device);
+
+       ret = psb_backlight_setup(dev);
+       if (ret < 0) {
+               backlight_device_unregister(psb_backlight_device);
+               psb_backlight_device = NULL;
+               return ret;
+       }
+       psb_backlight_device->props.brightness = 100;
+       psb_backlight_device->props.max_brightness = 100;
+       backlight_update_status(psb_backlight_device);
+       dev_priv->backlight_device = psb_backlight_device;
+       return 0;
+}
+
+#endif
+
+/*
+ *     Provide the Poulsbo specific chip logic and low level methods
+ *     for power management
+ */
+
+static void psb_init_pm(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       u32 gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
+       gating &= ~3;   /* Disable 2D clock gating */
+       gating |= 1;
+       PSB_WSGX32(gating, PSB_CR_CLKGATECTL);
+       PSB_RSGX32(PSB_CR_CLKGATECTL);
+}
+
+/**
+ *     psb_save_display_registers      -       save registers lost on suspend
+ *     @dev: our DRM device
+ *
+ *     Save the state we need in order to be able to restore the interface
+ *     upon resume from suspend
+ */
+static int psb_save_display_registers(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc;
+       struct drm_connector *connector;
+
+       /* Display arbitration control + watermarks */
+       dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
+       dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
+       dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
+       dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
+       dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
+       dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
+       dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
+       dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+
+       /* Save crtc and output state */
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               if (drm_helper_crtc_in_use(crtc))
+                       crtc->funcs->save(crtc);
+       }
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               connector->funcs->save(connector);
+
+       mutex_unlock(&dev->mode_config.mutex);
+       return 0;
+}
+
+/**
+ *     psb_restore_display_registers   -       restore lost register state
+ *     @dev: our DRM device
+ *
+ *     Restore register state that was lost during suspend and resume.
+ */
+static int psb_restore_display_registers(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc;
+       struct drm_connector *connector;
+
+       /* Display arbitration + watermarks */
+       PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
+       PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
+       PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
+       PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
+       PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
+       PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
+       PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
+       PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
+
+       /*make sure VGA plane is off. it initializes to on after reset!*/
+       PSB_WVDC32(0x80000000, VGACNTRL);
+
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               if (drm_helper_crtc_in_use(crtc))
+                       crtc->funcs->restore(crtc);
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               connector->funcs->restore(connector);
+
+       mutex_unlock(&dev->mode_config.mutex);
+       return 0;
+}
+
+static int psb_power_down(struct drm_device *dev)
+{
+       return 0;
+}
+
+static int psb_power_up(struct drm_device *dev)
+{
+       return 0;
+}
+
+static void psb_get_core_freq(struct drm_device *dev)
+{
+       uint32_t clock;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
+       /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
+
+       pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
+       pci_read_config_dword(pci_root, 0xD4, &clock);
+       pci_dev_put(pci_root);
+
+       switch (clock & 0x07) {
+       case 0:
+               dev_priv->core_freq = 100;
+               break;
+       case 1:
+               dev_priv->core_freq = 133;
+               break;
+       case 2:
+               dev_priv->core_freq = 150;
+               break;
+       case 3:
+               dev_priv->core_freq = 178;
+               break;
+       case 4:
+               dev_priv->core_freq = 200;
+               break;
+       case 5:
+       case 6:
+       case 7:
+               dev_priv->core_freq = 266;
+       default:
+               dev_priv->core_freq = 0;
+       }
+}
+
+static int psb_chip_setup(struct drm_device *dev)
+{
+       psb_get_core_freq(dev);
+       gma_intel_setup_gmbus(dev);
+       gma_intel_opregion_init(dev);
+       psb_intel_init_bios(dev);
+       return 0;
+}
+
+static void psb_chip_teardown(struct drm_device *dev)
+{
+       gma_intel_teardown_gmbus(dev);
+}
+
+const struct psb_ops psb_chip_ops = {
+       .name = "Poulsbo",
+       .accel_2d = 1,
+       .pipes = 2,
+       .crtcs = 2,
+       .sgx_offset = PSB_SGX_OFFSET,
+       .chip_setup = psb_chip_setup,
+       .chip_teardown = psb_chip_teardown,
+
+       .crtc_helper = &psb_intel_helper_funcs,
+       .crtc_funcs = &psb_intel_crtc_funcs,
+
+       .output_init = psb_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+       .backlight_init = psb_backlight_init,
+#endif
+
+       .init_pm = psb_init_pm,
+       .save_regs = psb_save_display_registers,
+       .restore_regs = psb_restore_display_registers,
+       .power_down = psb_power_down,
+       .power_up = psb_power_up,
+};
+
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
new file mode 100644 (file)
index 0000000..f14768f
--- /dev/null
@@ -0,0 +1,703 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "framebuffer.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+#include "mid_bios.h"
+#include <drm/drm_pciids.h>
+#include "power.h"
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
+#include <acpi/video.h>
+#include <linux/module.h>
+
+static int drm_psb_trap_pagefaults;
+
+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+
+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
+
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+       { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+       { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+#if defined(CONFIG_DRM_GMA600)
+       { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+       { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+       { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+       { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+       { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+       { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+       { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+       { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+       /* Atom E620 */
+       { 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+#endif
+#if defined(CONFIG_DRM_GMA3600)
+       { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+       { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+       { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+       { 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+       { 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+       { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+       { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+       { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+#endif
+       { 0, 0, 0}
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+/*
+ * Standard IOCTLs.
+ */
+
+#define DRM_IOCTL_PSB_ADB      \
+               DRM_IOWR(DRM_GMA_ADB + DRM_COMMAND_BASE, uint32_t)
+#define DRM_IOCTL_PSB_MODE_OPERATION   \
+               DRM_IOWR(DRM_GMA_MODE_OPERATION + DRM_COMMAND_BASE, \
+                        struct drm_psb_mode_operation_arg)
+#define DRM_IOCTL_PSB_STOLEN_MEMORY    \
+               DRM_IOWR(DRM_GMA_STOLEN_MEMORY + DRM_COMMAND_BASE, \
+                        struct drm_psb_stolen_memory_arg)
+#define DRM_IOCTL_PSB_GAMMA    \
+               DRM_IOWR(DRM_GMA_GAMMA + DRM_COMMAND_BASE, \
+                        struct drm_psb_dpst_lut_arg)
+#define DRM_IOCTL_PSB_DPST_BL  \
+               DRM_IOWR(DRM_GMA_DPST_BL + DRM_COMMAND_BASE, \
+                        uint32_t)
+#define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID    \
+               DRM_IOWR(DRM_GMA_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
+                        struct drm_psb_get_pipe_from_crtc_id_arg)
+#define DRM_IOCTL_PSB_GEM_CREATE       \
+               DRM_IOWR(DRM_GMA_GEM_CREATE + DRM_COMMAND_BASE, \
+                        struct drm_psb_gem_create)
+#define DRM_IOCTL_PSB_GEM_MMAP \
+               DRM_IOWR(DRM_GMA_GEM_MMAP + DRM_COMMAND_BASE, \
+                        struct drm_psb_gem_mmap)
+
+static int psb_adb_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+                                   struct drm_file *file_priv);
+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+                                  struct drm_file *file_priv);
+static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv);
+static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *file_priv);
+
+#define PSB_IOCTL_DEF(ioctl, func, flags) \
+       [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
+
+static struct drm_ioctl_desc psb_ioctls[] = {
+       PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
+       PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl,
+                     DRM_AUTH),
+       PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl,
+                     DRM_AUTH),
+       PSB_IOCTL_DEF(DRM_IOCTL_PSB_GAMMA, psb_gamma_ioctl, DRM_AUTH),
+       PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
+       PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID,
+                                       psb_intel_get_pipe_from_crtc_id, 0),
+       PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_CREATE, psb_gem_create_ioctl,
+                                               DRM_UNLOCKED | DRM_AUTH),
+       PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_MMAP, psb_gem_mmap_ioctl,
+                                               DRM_UNLOCKED | DRM_AUTH),
+};
+
+static void psb_lastclose(struct drm_device *dev)
+{
+       return;
+}
+
+static void psb_do_takedown(struct drm_device *dev)
+{
+}
+
+static int psb_do_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_gtt *pg = &dev_priv->gtt;
+
+       uint32_t stolen_gtt;
+
+       int ret = -ENOMEM;
+
+       if (pg->mmu_gatt_start & 0x0FFFFFFF) {
+               dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+
+       stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
+       stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       stolen_gtt =
+           (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
+
+       dev_priv->gatt_free_offset = pg->mmu_gatt_start +
+           (stolen_gtt << PAGE_SHIFT) * 1024;
+
+       if (1 || drm_debug) {
+               uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
+               uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
+               DRM_INFO("SGX core id = 0x%08x\n", core_id);
+               DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
+                        (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
+                        _PSB_CC_REVISION_MAJOR_SHIFT,
+                        (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
+                        _PSB_CC_REVISION_MINOR_SHIFT);
+               DRM_INFO
+                   ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
+                    (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
+                    _PSB_CC_REVISION_MAINTENANCE_SHIFT,
+                    (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
+                    _PSB_CC_REVISION_DESIGNER_SHIFT);
+       }
+
+
+       spin_lock_init(&dev_priv->irqmask_lock);
+       spin_lock_init(&dev_priv->lock_2d);
+
+       PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
+       PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
+       PSB_RSGX32(PSB_CR_BIF_BANK1);
+       PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK,
+                                                       PSB_CR_BIF_CTRL);
+       psb_spank(dev_priv);
+
+       /* mmu_gatt ?? */
+       PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+       return 0;
+out_err:
+       psb_do_takedown(dev);
+       return ret;
+}
+
+static int psb_driver_unload(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       /* Kill vblank etc here */
+
+       gma_backlight_exit(dev);
+
+       psb_modeset_cleanup(dev);
+
+       if (dev_priv) {
+               psb_lid_timer_takedown(dev_priv);
+               gma_intel_opregion_exit(dev);
+
+               if (dev_priv->ops->chip_teardown)
+                       dev_priv->ops->chip_teardown(dev);
+               psb_do_takedown(dev);
+
+
+               if (dev_priv->pf_pd) {
+                       psb_mmu_free_pagedir(dev_priv->pf_pd);
+                       dev_priv->pf_pd = NULL;
+               }
+               if (dev_priv->mmu) {
+                       struct psb_gtt *pg = &dev_priv->gtt;
+
+                       down_read(&pg->sem);
+                       psb_mmu_remove_pfn_sequence(
+                               psb_mmu_get_default_pd
+                               (dev_priv->mmu),
+                               pg->mmu_gatt_start,
+                               dev_priv->vram_stolen_size >> PAGE_SHIFT);
+                       up_read(&pg->sem);
+                       psb_mmu_driver_takedown(dev_priv->mmu);
+                       dev_priv->mmu = NULL;
+               }
+               psb_gtt_takedown(dev);
+               if (dev_priv->scratch_page) {
+                       __free_page(dev_priv->scratch_page);
+                       dev_priv->scratch_page = NULL;
+               }
+               if (dev_priv->vdc_reg) {
+                       iounmap(dev_priv->vdc_reg);
+                       dev_priv->vdc_reg = NULL;
+               }
+               if (dev_priv->sgx_reg) {
+                       iounmap(dev_priv->sgx_reg);
+                       dev_priv->sgx_reg = NULL;
+               }
+
+               kfree(dev_priv);
+               dev->dev_private = NULL;
+
+               /*destroy VBT data*/
+               psb_intel_destroy_bios(dev);
+       }
+
+       gma_power_uninit(dev);
+
+       return 0;
+}
+
+
+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+       struct drm_psb_private *dev_priv;
+       unsigned long resource_start;
+       struct psb_gtt *pg;
+       unsigned long irqflags;
+       int ret = -ENOMEM;
+       uint32_t tt_pages;
+       struct drm_connector *connector;
+       struct psb_intel_encoder *psb_intel_encoder;
+
+       dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+       if (dev_priv == NULL)
+               return -ENOMEM;
+
+       dev_priv->ops = (struct psb_ops *)chipset;
+       dev_priv->dev = dev;
+       dev->dev_private = (void *) dev_priv;
+
+       if (!IS_PSB(dev)) {
+               if (pci_enable_msi(dev->pdev))
+                       dev_warn(dev->dev, "Enabling MSI failed!\n");
+       }
+
+       dev_priv->num_pipe = dev_priv->ops->pipes;
+
+       resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
+
+       dev_priv->vdc_reg =
+           ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
+       if (!dev_priv->vdc_reg)
+               goto out_err;
+
+       dev_priv->sgx_reg = ioremap(resource_start + dev_priv->ops->sgx_offset,
+                                                       PSB_SGX_SIZE);
+       if (!dev_priv->sgx_reg)
+               goto out_err;
+
+       ret = dev_priv->ops->chip_setup(dev);
+       if (ret)
+               goto out_err;
+
+       /* Init OSPM support */
+       gma_power_init(dev);
+
+       ret = -ENOMEM;
+
+       dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
+       if (!dev_priv->scratch_page)
+               goto out_err;
+
+       set_pages_uc(dev_priv->scratch_page, 1);
+
+       ret = psb_gtt_init(dev, 0);
+       if (ret)
+               goto out_err;
+
+       dev_priv->mmu = psb_mmu_driver_init((void *)0,
+                                       drm_psb_trap_pagefaults, 0,
+                                       dev_priv);
+       if (!dev_priv->mmu)
+               goto out_err;
+
+       pg = &dev_priv->gtt;
+
+       tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
+               (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
+
+
+       dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
+       if (!dev_priv->pf_pd)
+               goto out_err;
+
+       psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
+       psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
+
+       ret = psb_do_init(dev);
+       if (ret)
+               return ret;
+
+       PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
+       PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
+
+/*     igd_opregion_init(&dev_priv->opregion_dev); */
+       acpi_video_register();
+       if (dev_priv->lid_state)
+               psb_lid_timer_init(dev_priv);
+
+       ret = drm_vblank_init(dev, dev_priv->num_pipe);
+       if (ret)
+               goto out_err;
+
+       /*
+        * Install interrupt handlers prior to powering off SGX or else we will
+        * crash.
+        */
+       dev_priv->vdc_irq_mask = 0;
+       dev_priv->pipestat[0] = 0;
+       dev_priv->pipestat[1] = 0;
+       dev_priv->pipestat[2] = 0;
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+       PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+       PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
+       PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+       if (IS_PSB(dev) && drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_irq_install(dev);
+
+       dev->vblank_disable_allowed = 1;
+
+       dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+
+       dev->driver->get_vblank_counter = psb_get_vblank_counter;
+
+       psb_modeset_init(dev);
+       psb_fbdev_init(dev);
+       drm_kms_helper_poll_init(dev);
+
+       /* Only add backlight support if we have LVDS output */
+       list_for_each_entry(connector, &dev->mode_config.connector_list,
+                           head) {
+               psb_intel_encoder = psb_intel_attached_encoder(connector);
+
+               switch (psb_intel_encoder->type) {
+               case INTEL_OUTPUT_LVDS:
+               case INTEL_OUTPUT_MIPI:
+                       ret = gma_backlight_init(dev);
+                       break;
+               }
+       }
+
+       if (ret)
+               return ret;
+#if 0
+       /*enable runtime pm at last*/
+       pm_runtime_enable(&dev->pdev->dev);
+       pm_runtime_set_active(&dev->pdev->dev);
+#endif
+       /*Intel drm driver load is done, continue doing pvr load*/
+       return 0;
+out_err:
+       psb_driver_unload(dev);
+       return ret;
+}
+
+int psb_driver_device_is_agp(struct drm_device *dev)
+{
+       return 0;
+}
+
+static inline void get_brightness(struct backlight_device *bd)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+       if (bd) {
+               bd->props.brightness = bd->ops->get_brightness(bd);
+               backlight_update_status(bd);
+       }
+#endif
+}
+
+static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_psb_private *dev_priv = psb_priv(dev);
+       uint32_t *arg = data;
+
+       dev_priv->blc_adj2 = *arg;
+       get_brightness(dev_priv->backlight_device);
+       return 0;
+}
+
+static int psb_adb_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       struct drm_psb_private *dev_priv = psb_priv(dev);
+       uint32_t *arg = data;
+
+       dev_priv->blc_adj1 = *arg;
+       get_brightness(dev_priv->backlight_device);
+       return 0;
+}
+
+static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv)
+{
+       struct drm_psb_dpst_lut_arg *lut_arg = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       struct drm_connector *connector;
+       struct psb_intel_crtc *psb_intel_crtc;
+       int i = 0;
+       int32_t obj_id;
+
+       obj_id = lut_arg->output_id;
+       obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               dev_dbg(dev->dev, "Invalid Connector object.\n");
+               return -EINVAL;
+       }
+
+       connector = obj_to_connector(obj);
+       crtc = connector->encoder->crtc;
+       psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+       for (i = 0; i < 256; i++)
+               psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
+
+       psb_intel_crtc_load_lut(crtc);
+
+       return 0;
+}
+
+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       uint32_t obj_id;
+       uint16_t op;
+       struct drm_mode_modeinfo *umode;
+       struct drm_display_mode *mode = NULL;
+       struct drm_psb_mode_operation_arg *arg;
+       struct drm_mode_object *obj;
+       struct drm_connector *connector;
+       struct drm_connector_helper_funcs *connector_funcs;
+       int ret = 0;
+       int resp = MODE_OK;
+
+       arg = (struct drm_psb_mode_operation_arg *)data;
+       obj_id = arg->obj_id;
+       op = arg->operation;
+
+       switch (op) {
+       case PSB_MODE_OPERATION_MODE_VALID:
+               umode = &arg->mode;
+
+               mutex_lock(&dev->mode_config.mutex);
+
+               obj = drm_mode_object_find(dev, obj_id,
+                                       DRM_MODE_OBJECT_CONNECTOR);
+               if (!obj) {
+                       ret = -EINVAL;
+                       goto mode_op_out;
+               }
+
+               connector = obj_to_connector(obj);
+
+               mode = drm_mode_create(dev);
+               if (!mode) {
+                       ret = -ENOMEM;
+                       goto mode_op_out;
+               }
+
+               /* drm_crtc_convert_umode(mode, umode); */
+               {
+                       mode->clock = umode->clock;
+                       mode->hdisplay = umode->hdisplay;
+                       mode->hsync_start = umode->hsync_start;
+                       mode->hsync_end = umode->hsync_end;
+                       mode->htotal = umode->htotal;
+                       mode->hskew = umode->hskew;
+                       mode->vdisplay = umode->vdisplay;
+                       mode->vsync_start = umode->vsync_start;
+                       mode->vsync_end = umode->vsync_end;
+                       mode->vtotal = umode->vtotal;
+                       mode->vscan = umode->vscan;
+                       mode->vrefresh = umode->vrefresh;
+                       mode->flags = umode->flags;
+                       mode->type = umode->type;
+                       strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
+                       mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+               }
+
+               connector_funcs = (struct drm_connector_helper_funcs *)
+                                  connector->helper_private;
+
+               if (connector_funcs->mode_valid) {
+                       resp = connector_funcs->mode_valid(connector, mode);
+                       arg->data = resp;
+               }
+
+               /*do some clean up work*/
+               if (mode)
+                       drm_mode_destroy(dev, mode);
+mode_op_out:
+               mutex_unlock(&dev->mode_config.mutex);
+               return ret;
+
+       default:
+               dev_dbg(dev->dev, "Unsupported psb mode operation\n");
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+                                  struct drm_file *file_priv)
+{
+       struct drm_psb_private *dev_priv = psb_priv(dev);
+       struct drm_psb_stolen_memory_arg *arg = data;
+
+       arg->base = dev_priv->stolen_base;
+       arg->size = dev_priv->vram_stolen_size;
+
+       return 0;
+}
+
+static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
+{
+       return 0;
+}
+
+static void psb_driver_close(struct drm_device *dev, struct drm_file *priv)
+{
+}
+
+static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
+                              unsigned long arg)
+{
+       struct drm_file *file_priv = filp->private_data;
+       struct drm_device *dev = file_priv->minor->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       static unsigned int runtime_allowed;
+
+       if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
+               runtime_allowed++;
+               pm_runtime_allow(&dev->pdev->dev);
+               dev_priv->rpm_enabled = 1;
+       }
+       return drm_ioctl(filp, cmd, arg);
+       /* FIXME: do we need to wrap the other side of this */
+}
+
+
+/* When a client dies:
+ *    - Check for and clean up flipped page state
+ */
+void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
+{
+}
+
+static void psb_remove(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       drm_put_dev(dev);
+}
+
+static const struct dev_pm_ops psb_pm_ops = {
+       .resume = gma_power_resume,
+       .suspend = gma_power_suspend,
+       .runtime_suspend = psb_runtime_suspend,
+       .runtime_resume = psb_runtime_resume,
+       .runtime_idle = psb_runtime_idle,
+};
+
+static struct vm_operations_struct psb_gem_vm_ops = {
+       .fault = psb_gem_fault,
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+};
+
+static const struct file_operations psb_gem_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = psb_unlocked_ioctl,
+       .mmap = drm_gem_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .read = drm_read,
+};
+
+static struct drm_driver driver = {
+       .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
+                          DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
+       .load = psb_driver_load,
+       .unload = psb_driver_unload,
+
+       .ioctls = psb_ioctls,
+       .num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
+       .device_is_agp = psb_driver_device_is_agp,
+       .irq_preinstall = psb_irq_preinstall,
+       .irq_postinstall = psb_irq_postinstall,
+       .irq_uninstall = psb_irq_uninstall,
+       .irq_handler = psb_irq_handler,
+       .enable_vblank = psb_enable_vblank,
+       .disable_vblank = psb_disable_vblank,
+       .get_vblank_counter = psb_get_vblank_counter,
+       .lastclose = psb_lastclose,
+       .open = psb_driver_open,
+       .preclose = psb_driver_preclose,
+       .postclose = psb_driver_close,
+       .reclaim_buffers = drm_core_reclaim_buffers,
+
+       .gem_init_object = psb_gem_init_object,
+       .gem_free_object = psb_gem_free_object,
+       .gem_vm_ops = &psb_gem_vm_ops,
+       .dumb_create = psb_gem_dumb_create,
+       .dumb_map_offset = psb_gem_dumb_map_gtt,
+       .dumb_destroy = psb_gem_dumb_destroy,
+       .fops = &psb_gem_fops,
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = PSB_DRM_DRIVER_DATE,
+       .major = PSB_DRM_DRIVER_MAJOR,
+       .minor = PSB_DRM_DRIVER_MINOR,
+       .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
+};
+
+static struct pci_driver psb_pci_driver = {
+       .name = DRIVER_NAME,
+       .id_table = pciidlist,
+       .probe = psb_probe,
+       .remove = psb_remove,
+       .driver.pm = &psb_pm_ops,
+};
+
+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static int __init psb_init(void)
+{
+       return drm_pci_init(&driver, &psb_pci_driver);
+}
+
+static void __exit psb_exit(void)
+{
+       drm_pci_exit(&driver, &psb_pci_driver);
+}
+
+late_initcall(psb_init);
+module_exit(psb_exit);
+
+MODULE_AUTHOR("Alan Cox <alan@linux.intel.com> and others");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
new file mode 100644 (file)
index 0000000..eb1568a
--- /dev/null
@@ -0,0 +1,956 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_DRV_H_
+#define _PSB_DRV_H_
+
+#include <linux/kref.h>
+
+#include <drm/drmP.h>
+#include "drm_global.h"
+#include "gem_glue.h"
+#include "gma_drm.h"
+#include "psb_reg.h"
+#include "psb_intel_drv.h"
+#include "gtt.h"
+#include "power.h"
+#include "oaktrail.h"
+
+/* Append new drm mode definition here, align with libdrm definition */
+#define DRM_MODE_SCALE_NO_SCALE        2
+
+enum {
+       CHIP_PSB_8108 = 0,              /* Poulsbo */
+       CHIP_PSB_8109 = 1,              /* Poulsbo */
+       CHIP_MRST_4100 = 2,             /* Moorestown/Oaktrail */
+       CHIP_MFLD_0130 = 3,             /* Medfield */
+};
+
+#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
+#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
+#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
+
+/*
+ * Driver definitions
+ */
+
+#define DRIVER_NAME "gma500"
+#define DRIVER_DESC "DRM driver for the Intel GMA500"
+
+#define PSB_DRM_DRIVER_DATE "2011-06-06"
+#define PSB_DRM_DRIVER_MAJOR 1
+#define PSB_DRM_DRIVER_MINOR 0
+#define PSB_DRM_DRIVER_PATCHLEVEL 0
+
+/*
+ *     Hardware offsets
+ */
+#define PSB_VDC_OFFSET          0x00000000
+#define PSB_VDC_SIZE            0x000080000
+#define MRST_MMIO_SIZE          0x0000C0000
+#define MDFLD_MMIO_SIZE          0x000100000
+#define PSB_SGX_SIZE            0x8000
+#define PSB_SGX_OFFSET          0x00040000
+#define MRST_SGX_OFFSET                 0x00080000
+/*
+ *     PCI resource identifiers
+ */
+#define PSB_MMIO_RESOURCE       0
+#define PSB_GATT_RESOURCE       2
+#define PSB_GTT_RESOURCE        3
+/*
+ *     PCI configuration
+ */
+#define PSB_GMCH_CTRL           0x52
+#define PSB_BSM                         0x5C
+#define _PSB_GMCH_ENABLED       0x4
+#define PSB_PGETBL_CTL          0x2020
+#define _PSB_PGETBL_ENABLED     0x00000001
+#define PSB_SGX_2D_SLAVE_PORT   0x4000
+
+/* To get rid of */
+#define PSB_TT_PRIV0_LIMIT      (256*1024*1024)
+#define PSB_TT_PRIV0_PLIMIT     (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
+
+/*
+ *     SGX side MMU definitions (these can probably go)
+ */
+
+/*
+ *     Flags for external memory type field.
+ */
+#define PSB_MMU_CACHED_MEMORY    0x0001        /* Bind to MMU only */
+#define PSB_MMU_RO_MEMORY        0x0002        /* MMU RO memory */
+#define PSB_MMU_WO_MEMORY        0x0004        /* MMU WO memory */
+/*
+ *     PTE's and PDE's
+ */
+#define PSB_PDE_MASK             0x003FFFFF
+#define PSB_PDE_SHIFT            22
+#define PSB_PTE_SHIFT            12
+/*
+ *     Cache control
+ */
+#define PSB_PTE_VALID            0x0001        /* PTE / PDE valid */
+#define PSB_PTE_WO               0x0002        /* Write only */
+#define PSB_PTE_RO               0x0004        /* Read only */
+#define PSB_PTE_CACHED           0x0008        /* CPU cache coherent */
+
+/*
+ *     VDC registers and bits
+ */
+#define PSB_MSVDX_CLOCKGATING    0x2064
+#define PSB_TOPAZ_CLOCKGATING    0x2068
+#define PSB_HWSTAM               0x2098
+#define PSB_INSTPM               0x20C0
+#define PSB_INT_IDENTITY_R        0x20A4
+#define _MDFLD_PIPEC_EVENT_FLAG   (1<<2)
+#define _MDFLD_PIPEC_VBLANK_FLAG  (1<<3)
+#define _PSB_DPST_PIPEB_FLAG      (1<<4)
+#define _MDFLD_PIPEB_EVENT_FLAG   (1<<4)
+#define _PSB_VSYNC_PIPEB_FLAG    (1<<5)
+#define _PSB_DPST_PIPEA_FLAG      (1<<6)
+#define _PSB_PIPEA_EVENT_FLAG     (1<<6)
+#define _PSB_VSYNC_PIPEA_FLAG    (1<<7)
+#define _MDFLD_MIPIA_FLAG        (1<<16)
+#define _MDFLD_MIPIC_FLAG        (1<<17)
+#define _PSB_IRQ_SGX_FLAG        (1<<18)
+#define _PSB_IRQ_MSVDX_FLAG      (1<<19)
+#define _LNC_IRQ_TOPAZ_FLAG      (1<<20)
+
+#define _PSB_PIPE_EVENT_FLAG   (_PSB_VSYNC_PIPEA_FLAG | \
+                                _PSB_VSYNC_PIPEB_FLAG)
+
+/* This flag includes all the display IRQ bits excepts the vblank irqs. */
+#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | \
+                                 _MDFLD_PIPEB_EVENT_FLAG | \
+                                 _PSB_PIPEA_EVENT_FLAG | \
+                                 _PSB_VSYNC_PIPEA_FLAG | \
+                                 _MDFLD_MIPIA_FLAG | \
+                                 _MDFLD_MIPIC_FLAG)
+#define PSB_INT_IDENTITY_R       0x20A4
+#define PSB_INT_MASK_R           0x20A8
+#define PSB_INT_ENABLE_R         0x20A0
+
+#define _PSB_MMU_ER_MASK      0x0001FF00
+#define _PSB_MMU_ER_HOST      (1 << 16)
+#define GPIOA                  0x5010
+#define GPIOB                  0x5014
+#define GPIOC                  0x5018
+#define GPIOD                  0x501c
+#define GPIOE                  0x5020
+#define GPIOF                  0x5024
+#define GPIOG                  0x5028
+#define GPIOH                  0x502c
+#define GPIO_CLOCK_DIR_MASK            (1 << 0)
+#define GPIO_CLOCK_DIR_IN              (0 << 1)
+#define GPIO_CLOCK_DIR_OUT             (1 << 1)
+#define GPIO_CLOCK_VAL_MASK            (1 << 2)
+#define GPIO_CLOCK_VAL_OUT             (1 << 3)
+#define GPIO_CLOCK_VAL_IN              (1 << 4)
+#define GPIO_CLOCK_PULLUP_DISABLE      (1 << 5)
+#define GPIO_DATA_DIR_MASK             (1 << 8)
+#define GPIO_DATA_DIR_IN               (0 << 9)
+#define GPIO_DATA_DIR_OUT              (1 << 9)
+#define GPIO_DATA_VAL_MASK             (1 << 10)
+#define GPIO_DATA_VAL_OUT              (1 << 11)
+#define GPIO_DATA_VAL_IN               (1 << 12)
+#define GPIO_DATA_PULLUP_DISABLE       (1 << 13)
+
+#define VCLK_DIVISOR_VGA0   0x6000
+#define VCLK_DIVISOR_VGA1   0x6004
+#define VCLK_POST_DIV      0x6010
+
+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
+#define PSB_COMM_USER_IRQ (1024 >> 2)
+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
+#define PSB_COMM_FW (2048 >> 2)
+
+#define PSB_UIRQ_VISTEST              1
+#define PSB_UIRQ_OOM_REPLY            2
+#define PSB_UIRQ_FIRE_TA_REPLY        3
+#define PSB_UIRQ_FIRE_RASTER_REPLY     4
+
+#define PSB_2D_SIZE (256*1024*1024)
+#define PSB_MAX_RELOC_PAGES 1024
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+
+
+#define PSB_2D_SIZE (256*1024*1024)
+#define PSB_MAX_RELOC_PAGES 1024
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
+#define PSB_LID_DELAY (DRM_HZ / 10)
+
+#define MDFLD_PNW_B0 0x04
+#define MDFLD_PNW_C0 0x08
+
+#define MDFLD_DSR_2D_3D_0      (1 << 0)
+#define MDFLD_DSR_2D_3D_2      (1 << 1)
+#define MDFLD_DSR_CURSOR_0     (1 << 2)
+#define MDFLD_DSR_CURSOR_2     (1 << 3)
+#define MDFLD_DSR_OVERLAY_0    (1 << 4)
+#define MDFLD_DSR_OVERLAY_2    (1 << 5)
+#define MDFLD_DSR_MIPI_CONTROL (1 << 6)
+#define MDFLD_DSR_DAMAGE_MASK_0        ((1 << 0) | (1 << 2) | (1 << 4))
+#define MDFLD_DSR_DAMAGE_MASK_2        ((1 << 1) | (1 << 3) | (1 << 5))
+#define MDFLD_DSR_2D_3D        (MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2)
+
+#define MDFLD_DSR_RR           45
+#define MDFLD_DPU_ENABLE       (1 << 31)
+#define MDFLD_DSR_FULLSCREEN   (1 << 30)
+#define MDFLD_DSR_DELAY                (DRM_HZ / MDFLD_DSR_RR)
+
+#define PSB_PWR_STATE_ON               1
+#define PSB_PWR_STATE_OFF              2
+
+#define PSB_PMPOLICY_NOPM              0
+#define PSB_PMPOLICY_CLOCKGATING       1
+#define PSB_PMPOLICY_POWERDOWN         2
+
+#define PSB_PMSTATE_POWERUP            0
+#define PSB_PMSTATE_CLOCKGATED         1
+#define PSB_PMSTATE_POWERDOWN          2
+#define PSB_PCIx_MSI_ADDR_LOC          0x94
+#define PSB_PCIx_MSI_DATA_LOC          0x98
+
+/* Medfield crystal settings */
+#define KSEL_CRYSTAL_19 1
+#define KSEL_BYPASS_19 5
+#define KSEL_BYPASS_25 6
+#define KSEL_BYPASS_83_100 7
+
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+
+struct psb_intel_opregion {
+       struct opregion_header *header;
+       struct opregion_acpi *acpi;
+       struct opregion_swsci *swsci;
+       struct opregion_asle *asle;
+       int enabled;
+};
+
+struct sdvo_device_mapping {
+       u8 initialized;
+       u8 dvo_port;
+       u8 slave_addr;
+       u8 dvo_wiring;
+       u8 i2c_pin;
+       u8 i2c_speed;
+       u8 ddc_pin;
+};
+
+struct intel_gmbus {
+       struct i2c_adapter adapter;
+       struct i2c_adapter *force_bit;
+       u32 reg0;
+};
+
+struct psb_ops;
+
+#define PSB_NUM_PIPE           3
+
+struct drm_psb_private {
+       struct drm_device *dev;
+       const struct psb_ops *ops;
+
+       struct psb_gtt gtt;
+
+       /* GTT Memory manager */
+       struct psb_gtt_mm *gtt_mm;
+       struct page *scratch_page;
+       u32 *gtt_map;
+       uint32_t stolen_base;
+       void *vram_addr;
+       unsigned long vram_stolen_size;
+       int gtt_initialized;
+       u16 gmch_ctrl;          /* Saved GTT setup */
+       u32 pge_ctl;
+
+       struct mutex gtt_mutex;
+       struct resource *gtt_mem;       /* Our PCI resource */
+
+       struct psb_mmu_driver *mmu;
+       struct psb_mmu_pd *pf_pd;
+
+       /*
+        * Register base
+        */
+
+       uint8_t *sgx_reg;
+       uint8_t *vdc_reg;
+       uint32_t gatt_free_offset;
+
+       /*
+        * Fencing / irq.
+        */
+
+       uint32_t vdc_irq_mask;
+       uint32_t pipestat[PSB_NUM_PIPE];
+
+       spinlock_t irqmask_lock;
+
+       /*
+        * Power
+        */
+
+       bool suspended;
+       bool display_power;
+       int display_count;
+
+       /*
+        * Modesetting
+        */
+       struct psb_intel_mode_device mode_dev;
+
+       struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
+       struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
+       uint32_t num_pipe;
+
+       /*
+        * OSPM info (Power management base) (can go ?)
+        */
+       uint32_t ospm_base;
+
+       /*
+        * Sizes info
+        */
+
+       u32 fuse_reg_value;
+       u32 video_device_fuse;
+
+       /* PCI revision ID for B0:D2:F0 */
+       uint8_t platform_rev_id;
+
+       /* gmbus */
+       struct intel_gmbus *gmbus;
+
+       /* Used by SDVO */
+       int crt_ddc_pin;
+       /* FIXME: The mappings should be parsed from bios but for now we can
+                 pretend there are no mappings available */
+       struct sdvo_device_mapping sdvo_mappings[2];
+       u32 hotplug_supported_mask;
+       struct drm_property *broadcast_rgb_property;
+       struct drm_property *force_audio_property;
+
+       /*
+        * LVDS info
+        */
+       int backlight_duty_cycle;       /* restore backlight to this value */
+       bool panel_wants_dither;
+       struct drm_display_mode *panel_fixed_mode;
+       struct drm_display_mode *lfp_lvds_vbt_mode;
+       struct drm_display_mode *sdvo_lvds_vbt_mode;
+
+       struct bdb_lvds_backlight *lvds_bl; /* LVDS backlight info from VBT */
+       struct psb_intel_i2c_chan *lvds_i2c_bus; /* FIXME: Remove this? */
+
+       /* Feature bits from the VBIOS */
+       unsigned int int_tv_support:1;
+       unsigned int lvds_dither:1;
+       unsigned int lvds_vbt:1;
+       unsigned int int_crt_support:1;
+       unsigned int lvds_use_ssc:1;
+       int lvds_ssc_freq;
+       bool is_lvds_on;
+       bool is_mipi_on;
+       u32 mipi_ctrl_display;
+
+       unsigned int core_freq;
+       uint32_t iLVDS_enable;
+
+       /* Runtime PM state */
+       int rpm_enabled;
+
+       /* MID specific */
+       struct oaktrail_vbt vbt_data;
+       struct oaktrail_gct_data gct_data;
+
+       /* MIPI Panel type etc */
+       int panel_id;
+       bool dual_mipi;         /* dual display - DPI & DBI */
+       bool dpi_panel_on;      /* The DPI panel power is on */
+       bool dpi_panel_on2;     /* The DPI panel power is on */
+       bool dbi_panel_on;      /* The DBI panel power is on */
+       bool dbi_panel_on2;     /* The DBI panel power is on */
+       u32 dsr_fb_update;      /* DSR FB update counter */
+
+       /* Moorestown HDMI state */
+       struct oaktrail_hdmi_dev *hdmi_priv;
+
+       /* Moorestown pipe config register value cache */
+       uint32_t pipeconf;
+       uint32_t pipeconf1;
+       uint32_t pipeconf2;
+
+       /* Moorestown plane control register value cache */
+       uint32_t dspcntr;
+       uint32_t dspcntr1;
+       uint32_t dspcntr2;
+
+       /* Moorestown MM backlight cache */
+       uint8_t saveBKLTCNT;
+       uint8_t saveBKLTREQ;
+       uint8_t saveBKLTBRTL;
+
+       /*
+        * Register state
+        */
+       uint32_t saveDSPACNTR;
+       uint32_t saveDSPBCNTR;
+       uint32_t savePIPEACONF;
+       uint32_t savePIPEBCONF;
+       uint32_t savePIPEASRC;
+       uint32_t savePIPEBSRC;
+       uint32_t saveFPA0;
+       uint32_t saveFPA1;
+       uint32_t saveDPLL_A;
+       uint32_t saveDPLL_A_MD;
+       uint32_t saveHTOTAL_A;
+       uint32_t saveHBLANK_A;
+       uint32_t saveHSYNC_A;
+       uint32_t saveVTOTAL_A;
+       uint32_t saveVBLANK_A;
+       uint32_t saveVSYNC_A;
+       uint32_t saveDSPASTRIDE;
+       uint32_t saveDSPASIZE;
+       uint32_t saveDSPAPOS;
+       uint32_t saveDSPABASE;
+       uint32_t saveDSPASURF;
+       uint32_t saveDSPASTATUS;
+       uint32_t saveFPB0;
+       uint32_t saveFPB1;
+       uint32_t saveDPLL_B;
+       uint32_t saveDPLL_B_MD;
+       uint32_t saveHTOTAL_B;
+       uint32_t saveHBLANK_B;
+       uint32_t saveHSYNC_B;
+       uint32_t saveVTOTAL_B;
+       uint32_t saveVBLANK_B;
+       uint32_t saveVSYNC_B;
+       uint32_t saveDSPBSTRIDE;
+       uint32_t saveDSPBSIZE;
+       uint32_t saveDSPBPOS;
+       uint32_t saveDSPBBASE;
+       uint32_t saveDSPBSURF;
+       uint32_t saveDSPBSTATUS;
+       uint32_t saveVCLK_DIVISOR_VGA0;
+       uint32_t saveVCLK_DIVISOR_VGA1;
+       uint32_t saveVCLK_POST_DIV;
+       uint32_t saveVGACNTRL;
+       uint32_t saveADPA;
+       uint32_t saveLVDS;
+       uint32_t saveDVOA;
+       uint32_t saveDVOB;
+       uint32_t saveDVOC;
+       uint32_t savePP_ON;
+       uint32_t savePP_OFF;
+       uint32_t savePP_CONTROL;
+       uint32_t savePP_CYCLE;
+       uint32_t savePFIT_CONTROL;
+       uint32_t savePaletteA[256];
+       uint32_t savePaletteB[256];
+       uint32_t saveBLC_PWM_CTL2;
+       uint32_t saveBLC_PWM_CTL;
+       uint32_t saveCLOCKGATING;
+       uint32_t saveDSPARB;
+       uint32_t saveDSPATILEOFF;
+       uint32_t saveDSPBTILEOFF;
+       uint32_t saveDSPAADDR;
+       uint32_t saveDSPBADDR;
+       uint32_t savePFIT_AUTO_RATIOS;
+       uint32_t savePFIT_PGM_RATIOS;
+       uint32_t savePP_ON_DELAYS;
+       uint32_t savePP_OFF_DELAYS;
+       uint32_t savePP_DIVISOR;
+       uint32_t saveBSM;
+       uint32_t saveVBT;
+       uint32_t saveBCLRPAT_A;
+       uint32_t saveBCLRPAT_B;
+       uint32_t saveDSPALINOFF;
+       uint32_t saveDSPBLINOFF;
+       uint32_t savePERF_MODE;
+       uint32_t saveDSPFW1;
+       uint32_t saveDSPFW2;
+       uint32_t saveDSPFW3;
+       uint32_t saveDSPFW4;
+       uint32_t saveDSPFW5;
+       uint32_t saveDSPFW6;
+       uint32_t saveCHICKENBIT;
+       uint32_t saveDSPACURSOR_CTRL;
+       uint32_t saveDSPBCURSOR_CTRL;
+       uint32_t saveDSPACURSOR_BASE;
+       uint32_t saveDSPBCURSOR_BASE;
+       uint32_t saveDSPACURSOR_POS;
+       uint32_t saveDSPBCURSOR_POS;
+       uint32_t save_palette_a[256];
+       uint32_t save_palette_b[256];
+       uint32_t saveOV_OVADD;
+       uint32_t saveOV_OGAMC0;
+       uint32_t saveOV_OGAMC1;
+       uint32_t saveOV_OGAMC2;
+       uint32_t saveOV_OGAMC3;
+       uint32_t saveOV_OGAMC4;
+       uint32_t saveOV_OGAMC5;
+       uint32_t saveOVC_OVADD;
+       uint32_t saveOVC_OGAMC0;
+       uint32_t saveOVC_OGAMC1;
+       uint32_t saveOVC_OGAMC2;
+       uint32_t saveOVC_OGAMC3;
+       uint32_t saveOVC_OGAMC4;
+       uint32_t saveOVC_OGAMC5;
+
+       /* MSI reg save */
+       uint32_t msi_addr;
+       uint32_t msi_data;
+
+       /* Medfield specific register save state */
+       uint32_t saveHDMIPHYMISCCTL;
+       uint32_t saveHDMIB_CONTROL;
+       uint32_t saveDSPCCNTR;
+       uint32_t savePIPECCONF;
+       uint32_t savePIPECSRC;
+       uint32_t saveHTOTAL_C;
+       uint32_t saveHBLANK_C;
+       uint32_t saveHSYNC_C;
+       uint32_t saveVTOTAL_C;
+       uint32_t saveVBLANK_C;
+       uint32_t saveVSYNC_C;
+       uint32_t saveDSPCSTRIDE;
+       uint32_t saveDSPCSIZE;
+       uint32_t saveDSPCPOS;
+       uint32_t saveDSPCSURF;
+       uint32_t saveDSPCSTATUS;
+       uint32_t saveDSPCLINOFF;
+       uint32_t saveDSPCTILEOFF;
+       uint32_t saveDSPCCURSOR_CTRL;
+       uint32_t saveDSPCCURSOR_BASE;
+       uint32_t saveDSPCCURSOR_POS;
+       uint32_t save_palette_c[256];
+       uint32_t saveOV_OVADD_C;
+       uint32_t saveOV_OGAMC0_C;
+       uint32_t saveOV_OGAMC1_C;
+       uint32_t saveOV_OGAMC2_C;
+       uint32_t saveOV_OGAMC3_C;
+       uint32_t saveOV_OGAMC4_C;
+       uint32_t saveOV_OGAMC5_C;
+
+       /* DSI register save */
+       uint32_t saveDEVICE_READY_REG;
+       uint32_t saveINTR_EN_REG;
+       uint32_t saveDSI_FUNC_PRG_REG;
+       uint32_t saveHS_TX_TIMEOUT_REG;
+       uint32_t saveLP_RX_TIMEOUT_REG;
+       uint32_t saveTURN_AROUND_TIMEOUT_REG;
+       uint32_t saveDEVICE_RESET_REG;
+       uint32_t saveDPI_RESOLUTION_REG;
+       uint32_t saveHORIZ_SYNC_PAD_COUNT_REG;
+       uint32_t saveHORIZ_BACK_PORCH_COUNT_REG;
+       uint32_t saveHORIZ_FRONT_PORCH_COUNT_REG;
+       uint32_t saveHORIZ_ACTIVE_AREA_COUNT_REG;
+       uint32_t saveVERT_SYNC_PAD_COUNT_REG;
+       uint32_t saveVERT_BACK_PORCH_COUNT_REG;
+       uint32_t saveVERT_FRONT_PORCH_COUNT_REG;
+       uint32_t saveHIGH_LOW_SWITCH_COUNT_REG;
+       uint32_t saveINIT_COUNT_REG;
+       uint32_t saveMAX_RET_PAK_REG;
+       uint32_t saveVIDEO_FMT_REG;
+       uint32_t saveEOT_DISABLE_REG;
+       uint32_t saveLP_BYTECLK_REG;
+       uint32_t saveHS_LS_DBI_ENABLE_REG;
+       uint32_t saveTXCLKESC_REG;
+       uint32_t saveDPHY_PARAM_REG;
+       uint32_t saveMIPI_CONTROL_REG;
+       uint32_t saveMIPI;
+       uint32_t saveMIPI_C;
+
+       /* DPST register save */
+       uint32_t saveHISTOGRAM_INT_CONTROL_REG;
+       uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
+       uint32_t savePWM_CONTROL_LOGIC;
+
+       /*
+        * DSI info. 
+        */
+       void * dbi_dsr_info;    
+       void * dbi_dpu_info;
+       void * dsi_configs[2];
+       /*
+        * LID-Switch
+        */
+       spinlock_t lid_lock;
+       struct timer_list lid_timer;
+       struct psb_intel_opregion opregion;
+       u32 *lid_state;
+       u32 lid_last_state;
+
+       /*
+        * Watchdog
+        */
+
+       uint32_t apm_reg;
+       uint16_t apm_base;
+
+       /*
+        * Used for modifying backlight from
+        * xrandr -- consider removing and using HAL instead
+        */
+       struct backlight_device *backlight_device;
+       struct drm_property *backlight_property;
+       uint32_t blc_adj1;
+       uint32_t blc_adj2;
+
+       void *fbdev;
+
+       /* 2D acceleration */
+       spinlock_t lock_2d;
+};
+
+
+/*
+ *     Operations for each board type
+ */
+struct psb_ops {
+       const char *name;
+       unsigned int accel_2d:1;
+       int pipes;              /* Number of output pipes */
+       int crtcs;              /* Number of CRTCs */
+       int sgx_offset;         /* Base offset of SGX device */
+
+       /* Sub functions */
+       struct drm_crtc_helper_funcs const *crtc_helper;
+       struct drm_crtc_funcs const *crtc_funcs;
+
+       /* Setup hooks */
+       int (*chip_setup)(struct drm_device *dev);
+       void (*chip_teardown)(struct drm_device *dev);
+
+       /* Display management hooks */
+       int (*output_init)(struct drm_device *dev);
+       /* Power management hooks */
+       void (*init_pm)(struct drm_device *dev);
+       int (*save_regs)(struct drm_device *dev);
+       int (*restore_regs)(struct drm_device *dev);
+       int (*power_up)(struct drm_device *dev);
+       int (*power_down)(struct drm_device *dev);
+
+       void (*lvds_bl_power)(struct drm_device *dev, bool on);
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+       /* Backlight */
+       int (*backlight_init)(struct drm_device *dev);
+#endif
+       int i2c_bus;            /* I2C bus identifier for Moorestown */
+};
+
+
+
+struct psb_mmu_driver;
+
+extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
+extern int drm_pick_crtcs(struct drm_device *dev);
+
+static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
+{
+       return (struct drm_psb_private *) dev->dev_private;
+}
+
+/*
+ * MMU stuff.
+ */
+
+extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
+                                       int trap_pagefaults,
+                                       int invalid_type,
+                                       struct drm_psb_private *dev_priv);
+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
+                                                *driver);
+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
+                              uint32_t gtt_start, uint32_t gtt_pages);
+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+                                          int trap_pagefaults,
+                                          int invalid_type);
+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
+extern void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot);
+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+                                       unsigned long address,
+                                       uint32_t num_pages);
+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
+                                      uint32_t start_pfn,
+                                      unsigned long address,
+                                      uint32_t num_pages, int type);
+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+                                 unsigned long *pfn);
+
+/*
+ * Enable / disable MMU for different requestors.
+ */
+
+
+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+                               unsigned long address, uint32_t num_pages,
+                               uint32_t desired_tile_stride,
+                               uint32_t hw_tile_stride, int type);
+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
+                                unsigned long address, uint32_t num_pages,
+                                uint32_t desired_tile_stride,
+                                uint32_t hw_tile_stride);
+/*
+ *psb_irq.c
+ */
+
+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+extern int psb_irq_enable_dpst(struct drm_device *dev);
+extern int psb_irq_disable_dpst(struct drm_device *dev);
+extern void psb_irq_preinstall(struct drm_device *dev);
+extern int psb_irq_postinstall(struct drm_device *dev);
+extern void psb_irq_uninstall(struct drm_device *dev);
+extern void psb_irq_turn_on_dpst(struct drm_device *dev);
+extern void psb_irq_turn_off_dpst(struct drm_device *dev);
+
+extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
+extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+extern int psb_enable_vblank(struct drm_device *dev, int crtc);
+extern void psb_disable_vblank(struct drm_device *dev, int crtc);
+void
+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+
+void
+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+
+extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
+
+/*
+ * intel_opregion.c
+ */
+extern int gma_intel_opregion_init(struct drm_device *dev);
+extern int gma_intel_opregion_exit(struct drm_device *dev);
+
+/*
+ * framebuffer.c
+ */
+extern int psbfb_probed(struct drm_device *dev);
+extern int psbfb_remove(struct drm_device *dev,
+                       struct drm_framebuffer *fb);
+/*
+ * accel_2d.c
+ */
+extern void psbfb_copyarea(struct fb_info *info,
+                                       const struct fb_copyarea *region);
+extern int psbfb_sync(struct fb_info *info);
+extern void psb_spank(struct drm_psb_private *dev_priv);
+
+/*
+ * psb_reset.c
+ */
+
+extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
+extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
+
+/* modesetting */
+extern void psb_modeset_init(struct drm_device *dev);
+extern void psb_modeset_cleanup(struct drm_device *dev);
+extern int psb_fbdev_init(struct drm_device *dev);
+
+/* backlight.c */
+int gma_backlight_init(struct drm_device *dev);
+void gma_backlight_exit(struct drm_device *dev);
+
+/* oaktrail_crtc.c */
+extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs;
+
+/* oaktrail_lvds.c */
+extern void oaktrail_lvds_init(struct drm_device *dev,
+                   struct psb_intel_mode_device *mode_dev);
+
+/* psb_intel_display.c */
+extern const struct drm_crtc_helper_funcs psb_intel_helper_funcs;
+extern const struct drm_crtc_funcs psb_intel_crtc_funcs;
+
+/* psb_intel_lvds.c */
+extern const struct drm_connector_helper_funcs
+                                       psb_intel_lvds_connector_helper_funcs;
+extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
+
+/* gem.c */
+extern int psb_gem_init_object(struct drm_gem_object *obj);
+extern void psb_gem_free_object(struct drm_gem_object *obj);
+extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
+                       struct drm_file *file);
+extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+                       struct drm_mode_create_dumb *args);
+extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+                       uint32_t handle);
+extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
+                       uint32_t handle, uint64_t *offset);
+extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+extern int psb_gem_create_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file);
+extern int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
+                                       struct drm_file *file);
+
+/* psb_device.c */
+extern const struct psb_ops psb_chip_ops;
+
+/* oaktrail_device.c */
+extern const struct psb_ops oaktrail_chip_ops;
+
+/* cdv_device.c */
+extern const struct psb_ops cdv_chip_ops;
+
+/*
+ * Debug print bits setting
+ */
+#define PSB_D_GENERAL (1 << 0)
+#define PSB_D_INIT    (1 << 1)
+#define PSB_D_IRQ     (1 << 2)
+#define PSB_D_ENTRY   (1 << 3)
+/* debug the get H/V BP/FP count */
+#define PSB_D_HV      (1 << 4)
+#define PSB_D_DBI_BF  (1 << 5)
+#define PSB_D_PM      (1 << 6)
+#define PSB_D_RENDER  (1 << 7)
+#define PSB_D_REG     (1 << 8)
+#define PSB_D_MSVDX   (1 << 9)
+#define PSB_D_TOPAZ   (1 << 10)
+
+extern int drm_psb_no_fb;
+extern int drm_idle_check_interval;
+
+/*
+ *     Utilities
+ */
+
+static inline u32 MRST_MSG_READ32(uint port, uint offset)
+{
+       int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
+       uint32_t ret_val = 0;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       pci_write_config_dword(pci_root, 0xD0, mcr);
+       pci_read_config_dword(pci_root, 0xD4, &ret_val);
+       pci_dev_put(pci_root);
+       return ret_val;
+}
+static inline void MRST_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+       int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       pci_write_config_dword(pci_root, 0xD4, value);
+       pci_write_config_dword(pci_root, 0xD0, mcr);
+       pci_dev_put(pci_root);
+}
+static inline u32 MDFLD_MSG_READ32(uint port, uint offset)
+{
+       int mcr = (0x10<<24) | (port << 16) | (offset << 8);
+       uint32_t ret_val = 0;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       pci_write_config_dword(pci_root, 0xD0, mcr);
+       pci_read_config_dword(pci_root, 0xD4, &ret_val);
+       pci_dev_put(pci_root);
+       return ret_val;
+}
+static inline void MDFLD_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+       int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       pci_write_config_dword(pci_root, 0xD4, value);
+       pci_write_config_dword(pci_root, 0xD0, mcr);
+       pci_dev_put(pci_root);
+}
+
+static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       return ioread32(dev_priv->vdc_reg + reg);
+}
+
+#define REG_READ(reg)         REGISTER_READ(dev, (reg))
+
+static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
+                                     uint32_t val)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       iowrite32((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE(reg, val)    REGISTER_WRITE(dev, (reg), (val))
+
+static inline void REGISTER_WRITE16(struct drm_device *dev,
+                                       uint32_t reg, uint32_t val)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       iowrite16((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE16(reg, val)    REGISTER_WRITE16(dev, (reg), (val))
+
+static inline void REGISTER_WRITE8(struct drm_device *dev,
+                                      uint32_t reg, uint32_t val)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       iowrite8((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE8(reg, val)           REGISTER_WRITE8(dev, (reg), (val))
+
+#define PSB_WVDC32(_val, _offs)                iowrite32(_val, dev_priv->vdc_reg + (_offs))
+#define PSB_RVDC32(_offs)              ioread32(dev_priv->vdc_reg + (_offs))
+
+/* #define TRAP_SGX_PM_FAULT 1 */
+#ifdef TRAP_SGX_PM_FAULT
+#define PSB_RSGX32(_offs)                                              \
+({                                                                     \
+       if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) {              \
+               printk(KERN_ERR                                         \
+                       "access sgx when it's off!! (READ) %s, %d\n",   \
+              __FILE__, __LINE__);                                     \
+               melay(1000);                                            \
+       }                                                               \
+       ioread32(dev_priv->sgx_reg + (_offs));                          \
+})
+#else
+#define PSB_RSGX32(_offs)              ioread32(dev_priv->sgx_reg + (_offs))
+#endif
+#define PSB_WSGX32(_val, _offs)                iowrite32(_val, dev_priv->sgx_reg + (_offs))
+
+#define MSVDX_REG_DUMP 0
+
+#define PSB_WMSVDX32(_val, _offs)      iowrite32(_val, dev_priv->msvdx_reg + (_offs))
+#define PSB_RMSVDX32(_offs)            ioread32(dev_priv->msvdx_reg + (_offs))
+
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
new file mode 100644 (file)
index 0000000..49e9835
--- /dev/null
@@ -0,0 +1,1446 @@
+/*
+ * Copyright Ã‚© 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "power.h"
+
+struct psb_intel_clock_t {
+       /* given values */
+       int n;
+       int m1, m2;
+       int p1, p2;
+       /* derived values */
+       int dot;
+       int vco;
+       int m;
+       int p;
+};
+
+struct psb_intel_range_t {
+       int min, max;
+};
+
+struct psb_intel_p2_t {
+       int dot_limit;
+       int p2_slow, p2_fast;
+};
+
+#define INTEL_P2_NUM                 2
+
+struct psb_intel_limit_t {
+       struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
+       struct psb_intel_p2_t p2;
+};
+
+#define I8XX_DOT_MIN             25000
+#define I8XX_DOT_MAX            350000
+#define I8XX_VCO_MIN            930000
+#define I8XX_VCO_MAX           1400000
+#define I8XX_N_MIN                   3
+#define I8XX_N_MAX                  16
+#define I8XX_M_MIN                  96
+#define I8XX_M_MAX                 140
+#define I8XX_M1_MIN                 18
+#define I8XX_M1_MAX                 26
+#define I8XX_M2_MIN                  6
+#define I8XX_M2_MAX                 16
+#define I8XX_P_MIN                   4
+#define I8XX_P_MAX                 128
+#define I8XX_P1_MIN                  2
+#define I8XX_P1_MAX                 33
+#define I8XX_P1_LVDS_MIN             1
+#define I8XX_P1_LVDS_MAX             6
+#define I8XX_P2_SLOW                 4
+#define I8XX_P2_FAST                 2
+#define I8XX_P2_LVDS_SLOW            14
+#define I8XX_P2_LVDS_FAST            14        /* No fast option */
+#define I8XX_P2_SLOW_LIMIT      165000
+
+#define I9XX_DOT_MIN             20000
+#define I9XX_DOT_MAX            400000
+#define I9XX_VCO_MIN           1400000
+#define I9XX_VCO_MAX           2800000
+#define I9XX_N_MIN                   3
+#define I9XX_N_MAX                   8
+#define I9XX_M_MIN                  70
+#define I9XX_M_MAX                 120
+#define I9XX_M1_MIN                 10
+#define I9XX_M1_MAX                 20
+#define I9XX_M2_MIN                  5
+#define I9XX_M2_MAX                  9
+#define I9XX_P_SDVO_DAC_MIN          5
+#define I9XX_P_SDVO_DAC_MAX         80
+#define I9XX_P_LVDS_MIN                      7
+#define I9XX_P_LVDS_MAX                     98
+#define I9XX_P1_MIN                  1
+#define I9XX_P1_MAX                  8
+#define I9XX_P2_SDVO_DAC_SLOW               10
+#define I9XX_P2_SDVO_DAC_FAST                5
+#define I9XX_P2_SDVO_DAC_SLOW_LIMIT     200000
+#define I9XX_P2_LVDS_SLOW                   14
+#define I9XX_P2_LVDS_FAST                    7
+#define I9XX_P2_LVDS_SLOW_LIMIT                 112000
+
+#define INTEL_LIMIT_I8XX_DVO_DAC    0
+#define INTEL_LIMIT_I8XX_LVDS      1
+#define INTEL_LIMIT_I9XX_SDVO_DAC   2
+#define INTEL_LIMIT_I9XX_LVDS      3
+
+static const struct psb_intel_limit_t psb_intel_limits[] = {
+       {                       /* INTEL_LIMIT_I8XX_DVO_DAC */
+        .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
+        .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
+        .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
+        .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
+        .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
+        .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
+        .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
+        .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
+        .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
+               .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
+        },
+       {                       /* INTEL_LIMIT_I8XX_LVDS */
+        .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
+        .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
+        .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
+        .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
+        .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
+        .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
+        .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
+        .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
+        .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
+               .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
+        },
+       {                       /* INTEL_LIMIT_I9XX_SDVO_DAC */
+        .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
+        .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
+        .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
+        .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
+        .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
+        .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
+        .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
+        .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
+        .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
+               .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
+               I9XX_P2_SDVO_DAC_FAST},
+        },
+       {                       /* INTEL_LIMIT_I9XX_LVDS */
+        .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
+        .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
+        .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
+        .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
+        .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
+        .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
+        .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
+        .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
+        /* The single-channel range is 25-112Mhz, and dual-channel
+         * is 80-224Mhz.  Prefer single channel as much as possible.
+         */
+        .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
+               .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
+        },
+};
+
+static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
+{
+       const struct psb_intel_limit_t *limit;
+
+       if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+               limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
+       else
+               limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+       return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+
+static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
+{
+       clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+       clock->p = clock->p1 * clock->p2;
+       clock->vco = refclk * clock->m / (clock->n + 2);
+       clock->dot = clock->vco / clock->p;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
+
+static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
+{
+       clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+       clock->p = clock->p1 * clock->p2;
+       clock->vco = refclk * clock->m / (clock->n + 2);
+       clock->dot = clock->vco / clock->p;
+}
+
+static void psb_intel_clock(struct drm_device *dev, int refclk,
+                       struct psb_intel_clock_t *clock)
+{
+       return i9xx_clock(refclk, clock);
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_connector *l_entry;
+
+       list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+               if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+                       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(l_entry);
+                       if (psb_intel_encoder->type == type)
+                               return true;
+               }
+       }
+       return false;
+}
+
+#define INTELPllInvalid(s)   { /* ErrorF (s) */; return false; }
+/**
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+
+static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
+                              struct psb_intel_clock_t *clock)
+{
+       const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
+
+       if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+               INTELPllInvalid("p1 out of range\n");
+       if (clock->p < limit->p.min || limit->p.max < clock->p)
+               INTELPllInvalid("p out of range\n");
+       if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+               INTELPllInvalid("m2 out of range\n");
+       if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+               INTELPllInvalid("m1 out of range\n");
+       if (clock->m1 <= clock->m2)
+               INTELPllInvalid("m1 <= m2\n");
+       if (clock->m < limit->m.min || limit->m.max < clock->m)
+               INTELPllInvalid("m out of range\n");
+       if (clock->n < limit->n.min || limit->n.max < clock->n)
+               INTELPllInvalid("n out of range\n");
+       if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+               INTELPllInvalid("vco out of range\n");
+       /* XXX: We may need to be checking "Dot clock"
+        * depending on the multiplier, connector, etc.,
+        * rather than just a single range.
+        */
+       if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+               INTELPllInvalid("dot out of range\n");
+
+       return true;
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
+                               int refclk,
+                               struct psb_intel_clock_t *best_clock)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_clock_t clock;
+       const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
+       int err = target;
+
+       if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+           (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
+               /*
+                * For LVDS, if the panel is on, just rely on its current
+                * settings for dual-channel.  We haven't figured out how to
+                * reliably set up different single/dual channel state, if we
+                * even can.
+                */
+               if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+                   LVDS_CLKB_POWER_UP)
+                       clock.p2 = limit->p2.p2_fast;
+               else
+                       clock.p2 = limit->p2.p2_slow;
+       } else {
+               if (target < limit->p2.dot_limit)
+                       clock.p2 = limit->p2.p2_slow;
+               else
+                       clock.p2 = limit->p2.p2_fast;
+       }
+
+       memset(best_clock, 0, sizeof(*best_clock));
+
+       for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+            clock.m1++) {
+               for (clock.m2 = limit->m2.min;
+                    clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
+                    clock.m2++) {
+                       for (clock.n = limit->n.min;
+                            clock.n <= limit->n.max; clock.n++) {
+                               for (clock.p1 = limit->p1.min;
+                                    clock.p1 <= limit->p1.max;
+                                    clock.p1++) {
+                                       int this_err;
+
+                                       psb_intel_clock(dev, refclk, &clock);
+
+                                       if (!psb_intel_PLL_is_valid
+                                           (crtc, &clock))
+                                               continue;
+
+                                       this_err = abs(clock.dot - target);
+                                       if (this_err < err) {
+                                               *best_clock = clock;
+                                               err = this_err;
+                                       }
+                               }
+                       }
+               }
+       }
+
+       return err != target;
+}
+
+void psb_intel_wait_for_vblank(struct drm_device *dev)
+{
+       /* Wait for 20ms, i.e. one cycle at 50hz. */
+       mdelay(20);
+}
+
+int psb_intel_pipe_set_base(struct drm_crtc *crtc,
+                           int x, int y, struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       /* struct drm_i915_master_private *master_priv; */
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+       int pipe = psb_intel_crtc->pipe;
+       unsigned long start, offset;
+       int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
+       int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+       int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       u32 dspcntr;
+       int ret = 0;
+
+       if (!gma_power_begin(dev, true))
+               return 0;
+
+       /* no fb bound */
+       if (!crtc->fb) {
+               dev_dbg(dev->dev, "No FB bound\n");
+               goto psb_intel_pipe_cleaner;
+       }
+
+       /* We are displaying this buffer, make sure it is actually loaded
+          into the GTT */
+       ret = psb_gtt_pin(psbfb->gtt);
+       if (ret < 0)
+               goto psb_intel_pipe_set_base_exit;
+       start = psbfb->gtt->offset;
+
+       offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+       REG_WRITE(dspstride, crtc->fb->pitches[0]);
+
+       dspcntr = REG_READ(dspcntr_reg);
+       dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+       switch (crtc->fb->bits_per_pixel) {
+       case 8:
+               dspcntr |= DISPPLANE_8BPP;
+               break;
+       case 16:
+               if (crtc->fb->depth == 15)
+                       dspcntr |= DISPPLANE_15_16BPP;
+               else
+                       dspcntr |= DISPPLANE_16BPP;
+               break;
+       case 24:
+       case 32:
+               dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+               break;
+       default:
+               dev_err(dev->dev, "Unknown color depth\n");
+               ret = -EINVAL;
+               psb_gtt_unpin(psbfb->gtt);
+               goto psb_intel_pipe_set_base_exit;
+       }
+       REG_WRITE(dspcntr_reg, dspcntr);
+
+
+       if (0 /* FIXMEAC - check what PSB needs */) {
+               REG_WRITE(dspbase, offset);
+               REG_READ(dspbase);
+               REG_WRITE(dspsurf, start);
+               REG_READ(dspsurf);
+       } else {
+               REG_WRITE(dspbase, start + offset);
+               REG_READ(dspbase);
+       }
+
+psb_intel_pipe_cleaner:
+       /* If there was a previous display we can now unpin it */
+       if (old_fb)
+               psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+
+psb_intel_pipe_set_base_exit:
+       gma_power_end(dev);
+       return ret;
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct drm_device *dev = crtc->dev;
+       /* struct drm_i915_master_private *master_priv; */
+       /* struct drm_i915_private *dev_priv = dev->dev_private; */
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       u32 temp;
+       bool enabled;
+
+       /* XXX: When our outputs are all unaware of DPMS modes other than off
+        * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+        */
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+               /* Enable the DPLL */
+               temp = REG_READ(dpll_reg);
+               if ((temp & DPLL_VCO_ENABLE) == 0) {
+                       REG_WRITE(dpll_reg, temp);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+                       REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+                       REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+               }
+
+               /* Enable the pipe */
+               temp = REG_READ(pipeconf_reg);
+               if ((temp & PIPEACONF_ENABLE) == 0)
+                       REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+
+               /* Enable the plane */
+               temp = REG_READ(dspcntr_reg);
+               if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+                       REG_WRITE(dspcntr_reg,
+                                 temp | DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+               }
+
+               psb_intel_crtc_load_lut(crtc);
+
+               /* Give the overlay scaler a chance to enable
+                * if it's on this pipe */
+               /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+               break;
+       case DRM_MODE_DPMS_OFF:
+               /* Give the overlay scaler a chance to disable
+                * if it's on this pipe */
+               /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+               /* Disable the VGA plane that we never use */
+               REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+               /* Disable display plane */
+               temp = REG_READ(dspcntr_reg);
+               if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+                       REG_WRITE(dspcntr_reg,
+                                 temp & ~DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+                       REG_READ(dspbase_reg);
+               }
+
+               /* Next, disable display pipes */
+               temp = REG_READ(pipeconf_reg);
+               if ((temp & PIPEACONF_ENABLE) != 0) {
+                       REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+                       REG_READ(pipeconf_reg);
+               }
+
+               /* Wait for vblank for the disable to take effect. */
+               psb_intel_wait_for_vblank(dev);
+
+               temp = REG_READ(dpll_reg);
+               if ((temp & DPLL_VCO_ENABLE) != 0) {
+                       REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+               }
+
+               /* Wait for the clocks to turn off. */
+               udelay(150);
+               break;
+       }
+
+       enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+
+       /*Set FIFO Watermarks*/
+       REG_WRITE(DSPARB, 0x3F3E);
+}
+
+static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
+{
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void psb_intel_crtc_commit(struct drm_crtc *crtc)
+{
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+void psb_intel_encoder_prepare(struct drm_encoder *encoder)
+{
+       struct drm_encoder_helper_funcs *encoder_funcs =
+           encoder->helper_private;
+       /* lvds has its own version of prepare see psb_intel_lvds_prepare */
+       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void psb_intel_encoder_commit(struct drm_encoder *encoder)
+{
+       struct drm_encoder_helper_funcs *encoder_funcs =
+           encoder->helper_private;
+       /* lvds has its own version of commit see psb_intel_lvds_commit */
+       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+void psb_intel_encoder_destroy(struct drm_encoder *encoder)
+{
+       struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+
+       drm_encoder_cleanup(encoder);
+       kfree(intel_encoder);
+}
+
+static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
+{
+       u32 pfit_control;
+
+       pfit_control = REG_READ(PFIT_CONTROL);
+
+       /* See if the panel fitter is in use */
+       if ((pfit_control & PFIT_ENABLE) == 0)
+               return -1;
+       /* Must be on PIPE 1 for PSB */
+       return 1;
+}
+
+static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode,
+                              int x, int y,
+                              struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       int pipe = psb_intel_crtc->pipe;
+       int fp_reg = (pipe == 0) ? FPA0 : FPB0;
+       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+       int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+       int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+       int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+       int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+       int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+       int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+       int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+       int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+       int refclk;
+       struct psb_intel_clock_t clock;
+       u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+       bool ok, is_sdvo = false, is_dvo = false;
+       bool is_crt = false, is_lvds = false, is_tv = false;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_connector *connector;
+
+       /* No scan out no play */
+       if (crtc->fb == NULL) {
+               crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+               return 0;
+       }
+
+       list_for_each_entry(connector, &mode_config->connector_list, head) {
+               struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+
+               if (!connector->encoder
+                   || connector->encoder->crtc != crtc)
+                       continue;
+
+               switch (psb_intel_encoder->type) {
+               case INTEL_OUTPUT_LVDS:
+                       is_lvds = true;
+                       break;
+               case INTEL_OUTPUT_SDVO:
+                       is_sdvo = true;
+                       break;
+               case INTEL_OUTPUT_DVO:
+                       is_dvo = true;
+                       break;
+               case INTEL_OUTPUT_TVOUT:
+                       is_tv = true;
+                       break;
+               case INTEL_OUTPUT_ANALOG:
+                       is_crt = true;
+                       break;
+               }
+       }
+
+       refclk = 96000;
+
+       ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+                                &clock);
+       if (!ok) {
+               dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
+               return 0;
+       }
+
+       fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+
+       dpll = DPLL_VGA_MODE_DIS;
+       if (is_lvds) {
+               dpll |= DPLLB_MODE_LVDS;
+               dpll |= DPLL_DVO_HIGH_SPEED;
+       } else
+               dpll |= DPLLB_MODE_DAC_SERIAL;
+       if (is_sdvo) {
+               int sdvo_pixel_multiply =
+                           adjusted_mode->clock / mode->clock;
+               dpll |= DPLL_DVO_HIGH_SPEED;
+               dpll |=
+                   (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+       }
+
+       /* compute bitmask from p1 value */
+       dpll |= (1 << (clock.p1 - 1)) << 16;
+       switch (clock.p2) {
+       case 5:
+               dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+               break;
+       case 7:
+               dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+               break;
+       case 10:
+               dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+               break;
+       case 14:
+               dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+               break;
+       }
+
+       if (is_tv) {
+               /* XXX: just matching BIOS for now */
+/*     dpll |= PLL_REF_INPUT_TVCLKINBC; */
+               dpll |= 3;
+       }
+       dpll |= PLL_REF_INPUT_DREFCLK;
+
+       /* setup pipeconf */
+       pipeconf = REG_READ(pipeconf_reg);
+
+       /* Set up the display plane register */
+       dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+       if (pipe == 0)
+               dspcntr |= DISPPLANE_SEL_PIPE_A;
+       else
+               dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+       dspcntr |= DISPLAY_PLANE_ENABLE;
+       pipeconf |= PIPEACONF_ENABLE;
+       dpll |= DPLL_VCO_ENABLE;
+
+
+       /* Disable the panel fitter if it was on our pipe */
+       if (psb_intel_panel_fitter_pipe(dev) == pipe)
+               REG_WRITE(PFIT_CONTROL, 0);
+
+       drm_mode_debug_printmodeline(mode);
+
+       if (dpll & DPLL_VCO_ENABLE) {
+               REG_WRITE(fp_reg, fp);
+               REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
+               REG_READ(dpll_reg);
+               udelay(150);
+       }
+
+       /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+        * This is an exception to the general rule that mode_set doesn't turn
+        * things on.
+        */
+       if (is_lvds) {
+               u32 lvds = REG_READ(LVDS);
+
+               lvds &= ~LVDS_PIPEB_SELECT;
+               if (pipe == 1)
+                       lvds |= LVDS_PIPEB_SELECT;
+
+               lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+               /* Set the B0-B3 data pairs corresponding to
+                * whether we're going to
+                * set the DPLLs for dual-channel mode or not.
+                */
+               lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+               if (clock.p2 == 7)
+                       lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+
+               /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+                * appropriately here, but we need to look more
+                * thoroughly into how panels behave in the two modes.
+                */
+
+               REG_WRITE(LVDS, lvds);
+               REG_READ(LVDS);
+       }
+
+       REG_WRITE(fp_reg, fp);
+       REG_WRITE(dpll_reg, dpll);
+       REG_READ(dpll_reg);
+       /* Wait for the clocks to stabilize. */
+       udelay(150);
+
+       /* write it again -- the BIOS does, after all */
+       REG_WRITE(dpll_reg, dpll);
+
+       REG_READ(dpll_reg);
+       /* Wait for the clocks to stabilize. */
+       udelay(150);
+
+       REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+                 ((adjusted_mode->crtc_htotal - 1) << 16));
+       REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+                 ((adjusted_mode->crtc_hblank_end - 1) << 16));
+       REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+                 ((adjusted_mode->crtc_hsync_end - 1) << 16));
+       REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+                 ((adjusted_mode->crtc_vtotal - 1) << 16));
+       REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+                 ((adjusted_mode->crtc_vblank_end - 1) << 16));
+       REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+                 ((adjusted_mode->crtc_vsync_end - 1) << 16));
+       /* pipesrc and dspsize control the size that is scaled from,
+        * which should always be the user's requested size.
+        */
+       REG_WRITE(dspsize_reg,
+                 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+       REG_WRITE(dsppos_reg, 0);
+       REG_WRITE(pipesrc_reg,
+                 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+       REG_WRITE(pipeconf_reg, pipeconf);
+       REG_READ(pipeconf_reg);
+
+       psb_intel_wait_for_vblank(dev);
+
+       REG_WRITE(dspcntr_reg, dspcntr);
+
+       /* Flush the plane changes */
+       crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+
+       psb_intel_wait_for_vblank(dev);
+
+       return 0;
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_psb_private *dev_priv =
+                               (struct drm_psb_private *)dev->dev_private;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int palreg = PALETTE_A;
+       int i;
+
+       /* The clocks have to be on to load the palette. */
+       if (!crtc->enabled)
+               return;
+
+       switch (psb_intel_crtc->pipe) {
+       case 0:
+               break;
+       case 1:
+               palreg = PALETTE_B;
+               break;
+       case 2:
+               palreg = PALETTE_C;
+               break;
+       default:
+               dev_err(dev->dev, "Illegal Pipe Number.\n");
+               return;
+       }
+
+       if (gma_power_begin(dev, false)) {
+               for (i = 0; i < 256; i++) {
+                       REG_WRITE(palreg + 4 * i,
+                                 ((psb_intel_crtc->lut_r[i] +
+                                 psb_intel_crtc->lut_adj[i]) << 16) |
+                                 ((psb_intel_crtc->lut_g[i] +
+                                 psb_intel_crtc->lut_adj[i]) << 8) |
+                                 (psb_intel_crtc->lut_b[i] +
+                                 psb_intel_crtc->lut_adj[i]));
+               }
+               gma_power_end(dev);
+       } else {
+               for (i = 0; i < 256; i++) {
+                       dev_priv->save_palette_a[i] =
+                                 ((psb_intel_crtc->lut_r[i] +
+                                 psb_intel_crtc->lut_adj[i]) << 16) |
+                                 ((psb_intel_crtc->lut_g[i] +
+                                 psb_intel_crtc->lut_adj[i]) << 8) |
+                                 (psb_intel_crtc->lut_b[i] +
+                                 psb_intel_crtc->lut_adj[i]);
+               }
+
+       }
+}
+
+/**
+ * Save HW states of giving crtc
+ */
+static void psb_intel_crtc_save(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       /* struct drm_psb_private *dev_priv =
+                       (struct drm_psb_private *)dev->dev_private; */
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+       int pipeA = (psb_intel_crtc->pipe == 0);
+       uint32_t paletteReg;
+       int i;
+
+       if (!crtc_state) {
+               dev_err(dev->dev, "No CRTC state found\n");
+               return;
+       }
+
+       crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
+       crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
+       crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
+       crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
+       crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
+       crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
+       crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
+       crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
+       crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
+       crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
+       crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
+       crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
+       crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+
+       /*NOTE: DSPSIZE DSPPOS only for psb*/
+       crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
+       crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+
+       crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+
+       paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+       for (i = 0; i < 256; ++i)
+               crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
+}
+
+/**
+ * Restore HW states of giving crtc
+ */
+static void psb_intel_crtc_restore(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       /* struct drm_psb_private * dev_priv =
+                               (struct drm_psb_private *)dev->dev_private; */
+       struct psb_intel_crtc *psb_intel_crtc =  to_psb_intel_crtc(crtc);
+       struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+       /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
+       int pipeA = (psb_intel_crtc->pipe == 0);
+       uint32_t paletteReg;
+       int i;
+
+       if (!crtc_state) {
+               dev_err(dev->dev, "No crtc state\n");
+               return;
+       }
+
+       if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+               REG_WRITE(pipeA ? DPLL_A : DPLL_B,
+                       crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+               REG_READ(pipeA ? DPLL_A : DPLL_B);
+               udelay(150);
+       }
+
+       REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
+       REG_READ(pipeA ? FPA0 : FPB0);
+
+       REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
+       REG_READ(pipeA ? FPA1 : FPB1);
+
+       REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
+       REG_READ(pipeA ? DPLL_A : DPLL_B);
+       udelay(150);
+
+       REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
+       REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
+       REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
+       REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
+       REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
+       REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
+       REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+
+       REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
+       REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+
+       REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
+       REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+       REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+
+       psb_intel_wait_for_vblank(dev);
+
+       REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
+       REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+
+       psb_intel_wait_for_vblank(dev);
+
+       paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+       for (i = 0; i < 256; ++i)
+               REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
+}
+
+static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
+                                struct drm_file *file_priv,
+                                uint32_t handle,
+                                uint32_t width, uint32_t height)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+       uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+       uint32_t temp;
+       size_t addr = 0;
+       struct gtt_range *gt;
+       struct drm_gem_object *obj;
+       int ret;
+
+       /* if we want to turn of the cursor ignore width and height */
+       if (!handle) {
+               /* turn off the cursor */
+               temp = CURSOR_MODE_DISABLE;
+
+               if (gma_power_begin(dev, false)) {
+                       REG_WRITE(control, temp);
+                       REG_WRITE(base, 0);
+                       gma_power_end(dev);
+               }
+
+               /* Unpin the old GEM object */
+               if (psb_intel_crtc->cursor_obj) {
+                       gt = container_of(psb_intel_crtc->cursor_obj,
+                                                       struct gtt_range, gem);
+                       psb_gtt_unpin(gt);
+                       drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+                       psb_intel_crtc->cursor_obj = NULL;
+               }
+
+               return 0;
+       }
+
+       /* Currently we only support 64x64 cursors */
+       if (width != 64 || height != 64) {
+               dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
+               return -EINVAL;
+       }
+
+       obj = drm_gem_object_lookup(dev, file_priv, handle);
+       if (!obj)
+               return -ENOENT;
+
+       if (obj->size < width * height * 4) {
+               dev_dbg(dev->dev, "buffer is to small\n");
+               return -ENOMEM;
+       }
+
+       gt = container_of(obj, struct gtt_range, gem);
+
+       /* Pin the memory into the GTT */
+       ret = psb_gtt_pin(gt);
+       if (ret) {
+               dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+               return ret;
+       }
+
+
+       addr = gt->offset;      /* Or resource.start ??? */
+
+       psb_intel_crtc->cursor_addr = addr;
+
+       temp = 0;
+       /* set the pipe for the cursor */
+       temp |= (pipe << 28);
+       temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+       if (gma_power_begin(dev, false)) {
+               REG_WRITE(control, temp);
+               REG_WRITE(base, addr);
+               gma_power_end(dev);
+       }
+
+       /* unpin the old bo */
+       if (psb_intel_crtc->cursor_obj) {
+               gt = container_of(psb_intel_crtc->cursor_obj,
+                                                       struct gtt_range, gem);
+               psb_gtt_unpin(gt);
+               drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+               psb_intel_crtc->cursor_obj = obj;
+       }
+       return 0;
+}
+
+static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       uint32_t temp = 0;
+       uint32_t addr;
+
+
+       if (x < 0) {
+               temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+               x = -x;
+       }
+       if (y < 0) {
+               temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+               y = -y;
+       }
+
+       temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+       temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+       addr = psb_intel_crtc->cursor_addr;
+
+       if (gma_power_begin(dev, false)) {
+               REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+               REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
+               gma_power_end(dev);
+       }
+       return 0;
+}
+
+void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+                        u16 *green, u16 *blue, uint32_t type, uint32_t size)
+{
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int i;
+
+       if (size != 256)
+               return;
+
+       for (i = 0; i < 256; i++) {
+               psb_intel_crtc->lut_r[i] = red[i] >> 8;
+               psb_intel_crtc->lut_g[i] = green[i] >> 8;
+               psb_intel_crtc->lut_b[i] = blue[i] >> 8;
+       }
+
+       psb_intel_crtc_load_lut(crtc);
+}
+
+static int psb_crtc_set_config(struct drm_mode_set *set)
+{
+       int ret;
+       struct drm_device *dev = set->crtc->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (!dev_priv->rpm_enabled)
+               return drm_crtc_helper_set_config(set);
+
+       pm_runtime_forbid(&dev->pdev->dev);
+       ret = drm_crtc_helper_set_config(set);
+       pm_runtime_allow(&dev->pdev->dev);
+       return ret;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int psb_intel_crtc_clock_get(struct drm_device *dev,
+                               struct drm_crtc *crtc)
+{
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       u32 dpll;
+       u32 fp;
+       struct psb_intel_clock_t clock;
+       bool is_lvds;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (gma_power_begin(dev, false)) {
+               dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+               if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+                       fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+               else
+                       fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+               is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
+               gma_power_end(dev);
+       } else {
+               dpll = (pipe == 0) ?
+                       dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
+
+               if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+                       fp = (pipe == 0) ?
+                               dev_priv->saveFPA0 :
+                               dev_priv->saveFPB0;
+               else
+                       fp = (pipe == 0) ?
+                               dev_priv->saveFPA1 :
+                               dev_priv->saveFPB1;
+
+               is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
+       }
+
+       clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+       clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+       clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+
+       if (is_lvds) {
+               clock.p1 =
+                   ffs((dpll &
+                        DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+                       DPLL_FPA01_P1_POST_DIV_SHIFT);
+               clock.p2 = 14;
+
+               if ((dpll & PLL_REF_INPUT_MASK) ==
+                   PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+                       /* XXX: might not be 66MHz */
+                       i8xx_clock(66000, &clock);
+               } else
+                       i8xx_clock(48000, &clock);
+       } else {
+               if (dpll & PLL_P1_DIVIDE_BY_TWO)
+                       clock.p1 = 2;
+               else {
+                       clock.p1 =
+                           ((dpll &
+                             DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+                            DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+               }
+               if (dpll & PLL_P2_DIVIDE_BY_4)
+                       clock.p2 = 4;
+               else
+                       clock.p2 = 2;
+
+               i8xx_clock(48000, &clock);
+       }
+
+       /* XXX: It would be nice to validate the clocks, but we can't reuse
+        * i830PllIsValid() because it relies on the xf86_config connector
+        * configuration being accurate, which it isn't necessarily.
+        */
+
+       return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
+                                            struct drm_crtc *crtc)
+{
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       struct drm_display_mode *mode;
+       int htot;
+       int hsync;
+       int vtot;
+       int vsync;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (gma_power_begin(dev, false)) {
+               htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
+               hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
+               vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
+               vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+               gma_power_end(dev);
+       } else {
+               htot = (pipe == 0) ?
+                       dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
+               hsync = (pipe == 0) ?
+                       dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
+               vtot = (pipe == 0) ?
+                       dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
+               vsync = (pipe == 0) ?
+                       dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
+       }
+
+       mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+       if (!mode)
+               return NULL;
+
+       mode->clock = psb_intel_crtc_clock_get(dev, crtc);
+       mode->hdisplay = (htot & 0xffff) + 1;
+       mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+       mode->hsync_start = (hsync & 0xffff) + 1;
+       mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+       mode->vdisplay = (vtot & 0xffff) + 1;
+       mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+       mode->vsync_start = (vsync & 0xffff) + 1;
+       mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+       drm_mode_set_name(mode);
+       drm_mode_set_crtcinfo(mode, 0);
+
+       return mode;
+}
+
+void psb_intel_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct gtt_range *gt;
+
+       /* Unpin the old GEM object */
+       if (psb_intel_crtc->cursor_obj) {
+               gt = container_of(psb_intel_crtc->cursor_obj,
+                                               struct gtt_range, gem);
+               psb_gtt_unpin(gt);
+               drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+               psb_intel_crtc->cursor_obj = NULL;
+       }
+       kfree(psb_intel_crtc->crtc_state);
+       drm_crtc_cleanup(crtc);
+       kfree(psb_intel_crtc);
+}
+
+const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
+       .dpms = psb_intel_crtc_dpms,
+       .mode_fixup = psb_intel_crtc_mode_fixup,
+       .mode_set = psb_intel_crtc_mode_set,
+       .mode_set_base = psb_intel_pipe_set_base,
+       .prepare = psb_intel_crtc_prepare,
+       .commit = psb_intel_crtc_commit,
+};
+
+const struct drm_crtc_funcs psb_intel_crtc_funcs = {
+       .save = psb_intel_crtc_save,
+       .restore = psb_intel_crtc_restore,
+       .cursor_set = psb_intel_crtc_cursor_set,
+       .cursor_move = psb_intel_crtc_cursor_move,
+       .gamma_set = psb_intel_crtc_gamma_set,
+       .set_config = psb_crtc_set_config,
+       .destroy = psb_intel_crtc_destroy,
+};
+
+/*
+ * Set the default value of cursor control and base register
+ * to zero. This is a workaround for h/w defect on Oaktrail
+ */
+static void psb_intel_cursor_init(struct drm_device *dev, int pipe)
+{
+       u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
+       u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
+
+       REG_WRITE(control[pipe], 0);
+       REG_WRITE(base[pipe], 0);
+}
+
+void psb_intel_crtc_init(struct drm_device *dev, int pipe,
+                    struct psb_intel_mode_device *mode_dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_crtc *psb_intel_crtc;
+       int i;
+       uint16_t *r_base, *g_base, *b_base;
+
+       /* We allocate a extra array of drm_connector pointers
+        * for fbdev after the crtc */
+       psb_intel_crtc =
+           kzalloc(sizeof(struct psb_intel_crtc) +
+                   (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+                   GFP_KERNEL);
+       if (psb_intel_crtc == NULL)
+               return;
+
+       psb_intel_crtc->crtc_state =
+               kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
+       if (!psb_intel_crtc->crtc_state) {
+               dev_err(dev->dev, "Crtc state error: No memory\n");
+               kfree(psb_intel_crtc);
+               return;
+       }
+
+       /* Set the CRTC operations from the chip specific data */
+       drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs);
+
+       drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
+       psb_intel_crtc->pipe = pipe;
+       psb_intel_crtc->plane = pipe;
+
+       r_base = psb_intel_crtc->base.gamma_store;
+       g_base = r_base + 256;
+       b_base = g_base + 256;
+       for (i = 0; i < 256; i++) {
+               psb_intel_crtc->lut_r[i] = i;
+               psb_intel_crtc->lut_g[i] = i;
+               psb_intel_crtc->lut_b[i] = i;
+               r_base[i] = i << 8;
+               g_base[i] = i << 8;
+               b_base[i] = i << 8;
+
+               psb_intel_crtc->lut_adj[i] = 0;
+       }
+
+       psb_intel_crtc->mode_dev = mode_dev;
+       psb_intel_crtc->cursor_addr = 0;
+
+       drm_crtc_helper_add(&psb_intel_crtc->base,
+                                               dev_priv->ops->crtc_helper);
+
+       /* Setup the array of drm_connector pointer array */
+       psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
+       BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+              dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL);
+       dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] =
+                                                       &psb_intel_crtc->base;
+       dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] =
+                                                       &psb_intel_crtc->base;
+       psb_intel_crtc->mode_set.connectors =
+           (struct drm_connector **) (psb_intel_crtc + 1);
+       psb_intel_crtc->mode_set.num_connectors = 0;
+       psb_intel_cursor_init(dev, pipe);
+}
+
+int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
+       struct drm_mode_object *drmmode_obj;
+       struct psb_intel_crtc *crtc;
+
+       if (!dev_priv) {
+               dev_err(dev->dev, "called with no initialization\n");
+               return -EINVAL;
+       }
+
+       drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
+                       DRM_MODE_OBJECT_CRTC);
+
+       if (!drmmode_obj) {
+               dev_err(dev->dev, "no such CRTC id\n");
+               return -EINVAL;
+       }
+
+       crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj));
+       pipe_from_crtc_id->pipe = crtc->pipe;
+
+       return 0;
+}
+
+struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
+{
+       struct drm_crtc *crtc = NULL;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+               if (psb_intel_crtc->pipe == pipe)
+                       break;
+       }
+       return crtc;
+}
+
+int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
+{
+       int index_mask = 0;
+       struct drm_connector *connector;
+       int entry = 0;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list,
+                           head) {
+               struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+               if (type_mask & (1 << psb_intel_encoder->type))
+                       index_mask |= (1 << entry);
+               entry++;
+       }
+       return index_mask;
+}
+
+
+void psb_intel_modeset_cleanup(struct drm_device *dev)
+{
+       drm_mode_config_cleanup(dev);
+}
+
+
+/* current intel driver doesn't take advantage of encoders
+   always give back the encoder for the connector
+*/
+struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
+{
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+
+       return &psb_intel_encoder->base;
+}
+
+void psb_intel_connector_attach_encoder(struct psb_intel_connector *connector,
+                                       struct psb_intel_encoder *encoder)
+{
+       connector->encoder = encoder;
+       drm_mode_connector_attach_encoder(&connector->base,
+                                         &encoder->base);
+}
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.h b/drivers/gpu/drm/gma500/psb_intel_display.h
new file mode 100644 (file)
index 0000000..535b49a
--- /dev/null
@@ -0,0 +1,28 @@
+/* copyright (c) 2008, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#ifndef _INTEL_DISPLAY_H_
+#define _INTEL_DISPLAY_H_
+
+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
+void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+                        u16 *green, u16 *blue, uint32_t type, uint32_t size);
+void psb_intel_crtc_destroy(struct drm_crtc *crtc);
+
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
new file mode 100644 (file)
index 0000000..f40535e
--- /dev/null
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2009-2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __INTEL_DRV_H__
+#define __INTEL_DRV_H__
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/gpio.h>
+
+/*
+ * Display related stuff
+ */
+
+/* store information about an Ixxx DVO */
+/* The i830->i865 use multiple DVOs with multiple i2cs */
+/* the i915, i945 have a single sDVO i2c bus - which is different */
+#define MAX_OUTPUTS 6
+/* maximum connectors per crtcs in the mode set */
+#define INTELFB_CONN_LIMIT 4
+
+#define INTEL_I2C_BUS_DVO 1
+#define INTEL_I2C_BUS_SDVO 2
+
+/* Intel Pipe Clone Bit */
+#define INTEL_HDMIB_CLONE_BIT 1
+#define INTEL_HDMIC_CLONE_BIT 2
+#define INTEL_HDMID_CLONE_BIT 3
+#define INTEL_HDMIE_CLONE_BIT 4
+#define INTEL_HDMIF_CLONE_BIT 5
+#define INTEL_SDVO_NON_TV_CLONE_BIT 6
+#define INTEL_SDVO_TV_CLONE_BIT 7
+#define INTEL_SDVO_LVDS_CLONE_BIT 8
+#define INTEL_ANALOG_CLONE_BIT 9
+#define INTEL_TV_CLONE_BIT 10
+#define INTEL_DP_B_CLONE_BIT 11
+#define INTEL_DP_C_CLONE_BIT 12
+#define INTEL_DP_D_CLONE_BIT 13
+#define INTEL_LVDS_CLONE_BIT 14
+#define INTEL_DVO_TMDS_CLONE_BIT 15
+#define INTEL_DVO_LVDS_CLONE_BIT 16
+#define INTEL_EDP_CLONE_BIT 17
+
+/* these are outputs from the chip - integrated only
+ * external chips are via DVO or SDVO output */
+#define INTEL_OUTPUT_UNUSED 0
+#define INTEL_OUTPUT_ANALOG 1
+#define INTEL_OUTPUT_DVO 2
+#define INTEL_OUTPUT_SDVO 3
+#define INTEL_OUTPUT_LVDS 4
+#define INTEL_OUTPUT_TVOUT 5
+#define INTEL_OUTPUT_HDMI 6
+#define INTEL_OUTPUT_MIPI 7
+#define INTEL_OUTPUT_MIPI2 8
+
+#define INTEL_DVO_CHIP_NONE 0
+#define INTEL_DVO_CHIP_LVDS 1
+#define INTEL_DVO_CHIP_TMDS 2
+#define INTEL_DVO_CHIP_TVOUT 4
+
+#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
+#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+
+static inline void
+psb_intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
+                               int multiplier)
+{
+       mode->clock *= multiplier;
+       mode->private_flags |= multiplier;
+}
+
+static inline int
+psb_intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
+{
+       return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK)
+              >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
+}
+
+
+/*
+ * Hold information useally put on the device driver privates here,
+ * since it needs to be shared across multiple of devices drivers privates.
+ */
+struct psb_intel_mode_device {
+
+       /*
+        * Abstracted memory manager operations
+        */
+        size_t(*bo_offset) (struct drm_device *dev, void *bo);
+
+       /*
+        * Cursor (Can go ?)
+        */
+       int cursor_needs_physical;
+
+       /*
+        * LVDS info
+        */
+       int backlight_duty_cycle;       /* restore backlight to this value */
+       bool panel_wants_dither;
+       struct drm_display_mode *panel_fixed_mode;
+       struct drm_display_mode *panel_fixed_mode2;
+       struct drm_display_mode *vbt_mode;      /* if any */
+
+       uint32_t saveBLC_PWM_CTL;
+};
+
+struct psb_intel_i2c_chan {
+       /* for getting at dev. private (mmio etc.) */
+       struct drm_device *drm_dev;
+       u32 reg;                /* GPIO reg */
+       struct i2c_adapter adapter;
+       struct i2c_algo_bit_data algo;
+       u8 slave_addr;
+};
+
+struct psb_intel_encoder {
+       struct drm_encoder base;
+       int type;
+       bool needs_tv_clock;
+       void (*hot_plug)(struct psb_intel_encoder *);
+       int crtc_mask;
+       int clone_mask;
+       void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */
+
+       /* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's
+          own set of output privates */
+       struct psb_intel_i2c_chan *i2c_bus;
+       struct psb_intel_i2c_chan *ddc_bus;
+};
+
+struct psb_intel_connector {
+       struct drm_connector base;
+       struct psb_intel_encoder *encoder;
+};
+
+struct psb_intel_crtc_state {
+       uint32_t saveDSPCNTR;
+       uint32_t savePIPECONF;
+       uint32_t savePIPESRC;
+       uint32_t saveDPLL;
+       uint32_t saveFP0;
+       uint32_t saveFP1;
+       uint32_t saveHTOTAL;
+       uint32_t saveHBLANK;
+       uint32_t saveHSYNC;
+       uint32_t saveVTOTAL;
+       uint32_t saveVBLANK;
+       uint32_t saveVSYNC;
+       uint32_t saveDSPSTRIDE;
+       uint32_t saveDSPSIZE;
+       uint32_t saveDSPPOS;
+       uint32_t saveDSPBASE;
+       uint32_t savePalette[256];
+};
+
+struct psb_intel_crtc {
+       struct drm_crtc base;
+       int pipe;
+       int plane;
+       uint32_t cursor_addr;
+       u8 lut_r[256], lut_g[256], lut_b[256];
+       u8 lut_adj[256];
+       struct psb_intel_framebuffer *fbdev_fb;
+       /* a mode_set for fbdev users on this crtc */
+       struct drm_mode_set mode_set;
+
+       /* GEM object that holds our cursor */
+       struct drm_gem_object *cursor_obj;
+
+       struct drm_display_mode saved_mode;
+       struct drm_display_mode saved_adjusted_mode;
+
+       struct psb_intel_mode_device *mode_dev;
+
+       /*crtc mode setting flags*/
+       u32 mode_flags;
+
+       /* Saved Crtc HW states */
+       struct psb_intel_crtc_state *crtc_state;
+};
+
+#define to_psb_intel_crtc(x)   \
+               container_of(x, struct psb_intel_crtc, base)
+#define to_psb_intel_connector(x) \
+               container_of(x, struct psb_intel_connector, base)
+#define to_psb_intel_encoder(x)        \
+               container_of(x, struct psb_intel_encoder, base)
+#define to_psb_intel_framebuffer(x)    \
+               container_of(x, struct psb_intel_framebuffer, base)
+
+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
+                                       const u32 reg, const char *name);
+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
+int psb_intel_ddc_get_modes(struct drm_connector *connector,
+                           struct i2c_adapter *adapter);
+extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter);
+
+extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
+                           struct psb_intel_mode_device *mode_dev);
+extern void psb_intel_crt_init(struct drm_device *dev);
+extern bool psb_intel_sdvo_init(struct drm_device *dev, int output_device);
+extern void psb_intel_dvo_init(struct drm_device *dev);
+extern void psb_intel_tv_init(struct drm_device *dev);
+extern void psb_intel_lvds_init(struct drm_device *dev,
+                           struct psb_intel_mode_device *mode_dev);
+extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
+extern void oaktrail_lvds_init(struct drm_device *dev,
+                          struct psb_intel_mode_device *mode_dev);
+extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev);
+extern void oaktrail_dsi_init(struct drm_device *dev,
+                          struct psb_intel_mode_device *mode_dev);
+extern void mid_dsi_init(struct drm_device *dev,
+                   struct psb_intel_mode_device *mode_dev, int dsi_num);
+
+extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
+extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
+extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
+extern void psb_intel_encoder_destroy(struct drm_encoder *encoder);
+
+static inline struct psb_intel_encoder *psb_intel_attached_encoder(
+                                               struct drm_connector *connector)
+{
+       return to_psb_intel_connector(connector)->encoder;
+}
+
+extern void psb_intel_connector_attach_encoder(
+                                       struct psb_intel_connector *connector,
+                                       struct psb_intel_encoder *encoder);
+
+extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
+                                             *connector);
+
+extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
+                                                   struct drm_crtc *crtc);
+extern void psb_intel_wait_for_vblank(struct drm_device *dev);
+extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv);
+extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
+                                                int pipe);
+extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
+                                            int sdvoB);
+extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
+extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
+                                  int enable);
+extern int intelfb_probe(struct drm_device *dev);
+extern int intelfb_remove(struct drm_device *dev,
+                         struct drm_framebuffer *fb);
+extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
+                                                       *dev, struct
+                                                       drm_mode_fb_cmd
+                                                       *mode_cmd,
+                                                       void *mm_private);
+extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+                                     struct drm_display_mode *mode,
+                                     struct drm_display_mode *adjusted_mode);
+extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+                                    struct drm_display_mode *mode);
+extern int psb_intel_lvds_set_property(struct drm_connector *connector,
+                                       struct drm_property *property,
+                                       uint64_t value);
+extern void psb_intel_lvds_destroy(struct drm_connector *connector);
+extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
+
+/* intel_gmbus.c */
+extern void gma_intel_i2c_reset(struct drm_device *dev);
+extern int gma_intel_setup_gmbus(struct drm_device *dev);
+extern void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+extern void gma_intel_teardown_gmbus(struct drm_device *dev);
+
+#endif                         /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
new file mode 100644 (file)
index 0000000..a25e4ca
--- /dev/null
@@ -0,0 +1,868 @@
+/*
+ * Copyright Â© 2006-2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ *     Dave Airlie <airlied@linux.ie>
+ *     Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+/*
+ * LVDS I2C backlight control macros
+ */
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BRIGHTNESS_MASK 0xFF
+#define BLC_I2C_TYPE   0x01
+#define BLC_PWM_TYPT   0x02
+
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+
+#define PSB_BLC_MAX_PWM_REG_FREQ       (0xFFFE)
+#define PSB_BLC_MIN_PWM_REG_FREQ       (0x2)
+#define PSB_BLC_PWM_PRECISION_FACTOR   (10)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT    (16)
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+
+struct psb_intel_lvds_priv {
+       /*
+        * Saved LVDO output states
+        */
+       uint32_t savePP_ON;
+       uint32_t savePP_OFF;
+       uint32_t saveLVDS;
+       uint32_t savePP_CONTROL;
+       uint32_t savePP_CYCLE;
+       uint32_t savePFIT_CONTROL;
+       uint32_t savePFIT_PGM_RATIOS;
+       uint32_t saveBLC_PWM_CTL;
+
+       struct psb_intel_i2c_chan *i2c_bus;
+       struct psb_intel_i2c_chan *ddc_bus;
+};
+
+
+/*
+ * Returns the maximum level of the backlight duty cycle field.
+ */
+static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 ret;
+
+       if (gma_power_begin(dev, false)) {
+               ret = REG_READ(BLC_PWM_CTL);
+               gma_power_end(dev);
+       } else /* Powered off, use the saved value */
+               ret = dev_priv->saveBLC_PWM_CTL;
+
+       /* Top 15bits hold the frequency mask */
+       ret = (ret &  BACKLIGHT_MODULATION_FREQ_MASK) >>
+                                       BACKLIGHT_MODULATION_FREQ_SHIFT;
+
+        ret *= 2;      /* Return a 16bit range as needed for setting */
+        if (ret == 0)
+                dev_err(dev->dev, "BL bug: Reg %08x save %08X\n",
+                        REG_READ(BLC_PWM_CTL), dev_priv->saveBLC_PWM_CTL);
+       return ret;
+}
+
+/*
+ * Set LVDS backlight level by I2C command
+ *
+ * FIXME: at some point we need to both track this for PM and also
+ * disable runtime pm on MRST if the brightness is nil (ie blanked)
+ */
+static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
+                                       unsigned int level)
+{
+       struct drm_psb_private *dev_priv =
+               (struct drm_psb_private *)dev->dev_private;
+
+       struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
+       u8 out_buf[2];
+       unsigned int blc_i2c_brightness;
+
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = lvds_i2c_bus->slave_addr,
+                       .flags = 0,
+                       .len = 2,
+                       .buf = out_buf,
+               }
+       };
+
+       blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
+                            BRIGHTNESS_MASK /
+                            BRIGHTNESS_MAX_LEVEL);
+
+       if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+               blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
+
+       out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
+       out_buf[1] = (u8)blc_i2c_brightness;
+
+       if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) {
+               dev_dbg(dev->dev, "I2C set brightness.(command, value) (%d, %d)\n",
+                       dev_priv->lvds_bl->brightnesscmd,
+                       blc_i2c_brightness);
+               return 0;
+       }
+
+       dev_err(dev->dev, "I2C transfer error\n");
+       return -1;
+}
+
+
+static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
+{
+       struct drm_psb_private *dev_priv =
+                       (struct drm_psb_private *)dev->dev_private;
+
+       u32 max_pwm_blc;
+       u32 blc_pwm_duty_cycle;
+
+       max_pwm_blc = psb_intel_lvds_get_max_backlight(dev);
+
+       /*BLC_PWM_CTL Should be initiated while backlight device init*/
+       BUG_ON(max_pwm_blc == 0);
+
+       blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
+
+       if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+               blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
+
+       blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+       REG_WRITE(BLC_PWM_CTL,
+                 (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+                 (blc_pwm_duty_cycle));
+
+        dev_info(dev->dev, "Backlight lvds set brightness %08x\n",
+                 (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+                 (blc_pwm_duty_cycle));
+
+       return 0;
+}
+
+/*
+ * Set LVDS backlight level either by I2C or PWM
+ */
+void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       dev_dbg(dev->dev, "backlight level is %d\n", level);
+
+       if (!dev_priv->lvds_bl) {
+               dev_err(dev->dev, "NO LVDS backlight info\n");
+               return;
+       }
+
+       if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
+               psb_lvds_i2c_set_brightness(dev, level);
+       else
+               psb_lvds_pwm_set_brightness(dev, level);
+}
+
+/*
+ * Sets the backlight level.
+ *
+ * level: backlight level, from 0 to psb_intel_lvds_get_max_backlight().
+ */
+static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 blc_pwm_ctl;
+
+       if (gma_power_begin(dev, false)) {
+               blc_pwm_ctl = REG_READ(BLC_PWM_CTL);
+               blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+               REG_WRITE(BLC_PWM_CTL,
+                               (blc_pwm_ctl |
+                               (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+               dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+                                       (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+               gma_power_end(dev);
+       } else {
+               blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
+                               ~BACKLIGHT_DUTY_CYCLE_MASK;
+               dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+                                       (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+       }
+}
+
+/*
+ * Sets the power state for the panel.
+ */
+static void psb_intel_lvds_set_power(struct drm_device *dev, bool on)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+       u32 pp_status;
+
+       if (!gma_power_begin(dev, true)) {
+               dev_err(dev->dev, "set power, chip off!\n");
+               return;
+        }
+        
+       if (on) {
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+                         POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while ((pp_status & PP_ON) == 0);
+
+               psb_intel_lvds_set_backlight(dev,
+                                            mode_dev->backlight_duty_cycle);
+       } else {
+               psb_intel_lvds_set_backlight(dev, 0);
+
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+                         ~POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while (pp_status & PP_ON);
+       }
+
+       gma_power_end(dev);
+}
+
+static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+
+       if (mode == DRM_MODE_DPMS_ON)
+               psb_intel_lvds_set_power(dev, true);
+       else
+               psb_intel_lvds_set_power(dev, false);
+
+       /* XXX: We never power down the LVDS pairs. */
+}
+
+static void psb_intel_lvds_save(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_psb_private *dev_priv =
+               (struct drm_psb_private *)dev->dev_private;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct psb_intel_lvds_priv *lvds_priv =
+               (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
+
+       lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
+       lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
+       lvds_priv->saveLVDS = REG_READ(LVDS);
+       lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
+       lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
+       /*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/
+       lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+       lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
+       lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
+
+       /*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/
+       dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
+                                               BACKLIGHT_DUTY_CYCLE_MASK);
+
+       /*
+        * If the light is off at server startup,
+        * just make it full brightness
+        */
+       if (dev_priv->backlight_duty_cycle == 0)
+               dev_priv->backlight_duty_cycle =
+               psb_intel_lvds_get_max_backlight(dev);
+
+       dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
+                       lvds_priv->savePP_ON,
+                       lvds_priv->savePP_OFF,
+                       lvds_priv->saveLVDS,
+                       lvds_priv->savePP_CONTROL,
+                       lvds_priv->savePP_CYCLE,
+                       lvds_priv->saveBLC_PWM_CTL);
+}
+
+static void psb_intel_lvds_restore(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       u32 pp_status;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct psb_intel_lvds_priv *lvds_priv =
+               (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
+
+       dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
+                       lvds_priv->savePP_ON,
+                       lvds_priv->savePP_OFF,
+                       lvds_priv->saveLVDS,
+                       lvds_priv->savePP_CONTROL,
+                       lvds_priv->savePP_CYCLE,
+                       lvds_priv->saveBLC_PWM_CTL);
+
+       REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL);
+       REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL);
+       REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS);
+       REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON);
+       REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF);
+       /*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/
+       REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE);
+       REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL);
+       REG_WRITE(LVDS, lvds_priv->saveLVDS);
+
+       if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) {
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+                       POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while ((pp_status & PP_ON) == 0);
+       } else {
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+                       ~POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while (pp_status & PP_ON);
+       }
+}
+
+int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+                                struct drm_display_mode *mode)
+{
+       struct drm_psb_private *dev_priv = connector->dev->dev_private;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct drm_display_mode *fixed_mode =
+                                       dev_priv->mode_dev.panel_fixed_mode;
+
+       if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
+               fixed_mode = dev_priv->mode_dev.panel_fixed_mode2;
+
+       /* just in case */
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       /* just in case */
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               return MODE_NO_INTERLACE;
+
+       if (fixed_mode) {
+               if (mode->hdisplay > fixed_mode->hdisplay)
+                       return MODE_PANEL;
+               if (mode->vdisplay > fixed_mode->vdisplay)
+                       return MODE_PANEL;
+       }
+       return MODE_OK;
+}
+
+bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+       struct psb_intel_crtc *psb_intel_crtc =
+                               to_psb_intel_crtc(encoder->crtc);
+       struct drm_encoder *tmp_encoder;
+       struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                               to_psb_intel_encoder(encoder);
+
+       if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
+               panel_fixed_mode = mode_dev->panel_fixed_mode2;
+
+       /* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
+       if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) {
+               printk(KERN_ERR "Can't support LVDS on pipe A\n");
+               return false;
+       }
+       if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
+               printk(KERN_ERR "Must use PIPE A\n");
+               return false;
+       }
+       /* Should never happen!! */
+       list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
+                           head) {
+               if (tmp_encoder != encoder
+                   && tmp_encoder->crtc == encoder->crtc) {
+                       printk(KERN_ERR "Can't enable LVDS and another "
+                              "encoder on the same pipe\n");
+                       return false;
+               }
+       }
+
+       /*
+        * If we have timings from the BIOS for the panel, put them in
+        * to the adjusted mode.  The CRTC will be set up for this mode,
+        * with the panel scaling set up to source from the H/VDisplay
+        * of the original mode.
+        */
+       if (panel_fixed_mode != NULL) {
+               adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
+               adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
+               adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
+               adjusted_mode->htotal = panel_fixed_mode->htotal;
+               adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
+               adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
+               adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
+               adjusted_mode->vtotal = panel_fixed_mode->vtotal;
+               adjusted_mode->clock = panel_fixed_mode->clock;
+               drm_mode_set_crtcinfo(adjusted_mode,
+                                     CRTC_INTERLACE_HALVE_V);
+       }
+
+       /*
+        * XXX: It would be nice to support lower refresh rates on the
+        * panels to reduce power consumption, and perhaps match the
+        * user's requested refresh rate.
+        */
+
+       return true;
+}
+
+static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+       if (!gma_power_begin(dev, true))
+               return;
+
+       mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+       mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+                                         BACKLIGHT_DUTY_CYCLE_MASK);
+
+       psb_intel_lvds_set_power(dev, false);
+
+       gma_power_end(dev);
+}
+
+static void psb_intel_lvds_commit(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+       if (mode_dev->backlight_duty_cycle == 0)
+               mode_dev->backlight_duty_cycle =
+                   psb_intel_lvds_get_max_backlight(dev);
+
+       psb_intel_lvds_set_power(dev, true);
+}
+
+static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 pfit_control;
+
+       /*
+        * The LVDS pin pair will already have been turned on in the
+        * psb_intel_crtc_mode_set since it has a large impact on the DPLL
+        * settings.
+        */
+
+       /*
+        * Enable automatic panel scaling so that non-native modes fill the
+        * screen.  Should be enabled before the pipe is enabled, according to
+        * register description and PRM.
+        */
+       if (mode->hdisplay != adjusted_mode->hdisplay ||
+           mode->vdisplay != adjusted_mode->vdisplay)
+               pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+                               HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+                               HORIZ_INTERP_BILINEAR);
+       else
+               pfit_control = 0;
+
+       if (dev_priv->lvds_dither)
+               pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+       REG_WRITE(PFIT_CONTROL, pfit_control);
+}
+
+/*
+ * Detect the LVDS connection.
+ *
+ * This always returns CONNECTOR_STATUS_CONNECTED.
+ * This connector should only have
+ * been set up if the LVDS was actually connected anyway.
+ */
+static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
+                                                  *connector, bool force)
+{
+       return connector_status_connected;
+}
+
+/*
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int psb_intel_lvds_get_modes(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
+       int ret = 0;
+
+       if (!IS_MRST(dev))
+               ret = psb_intel_ddc_get_modes(connector, &lvds_priv->i2c_bus->adapter);
+
+       if (ret)
+               return ret;
+
+       /* Didn't get an EDID, so
+        * Set wide sync ranges so we get all modes
+        * handed to valid_mode for checking
+        */
+       connector->display_info.min_vfreq = 0;
+       connector->display_info.max_vfreq = 200;
+       connector->display_info.min_hfreq = 0;
+       connector->display_info.max_hfreq = 200;
+
+       if (mode_dev->panel_fixed_mode != NULL) {
+               struct drm_display_mode *mode =
+                   drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+               drm_mode_probed_add(connector, mode);
+               return 1;
+       }
+
+       return 0;
+}
+
+/**
+ * psb_intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+void psb_intel_lvds_destroy(struct drm_connector *connector)
+{
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
+
+       if (lvds_priv->ddc_bus)
+               psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
+
+int psb_intel_lvds_set_property(struct drm_connector *connector,
+                                      struct drm_property *property,
+                                      uint64_t value)
+{
+       struct drm_encoder *encoder = connector->encoder;
+
+       if (!encoder)
+               return -1;
+
+       if (!strcmp(property->name, "scaling mode")) {
+               struct psb_intel_crtc *crtc =
+                                       to_psb_intel_crtc(encoder->crtc);
+               uint64_t curval;
+
+               if (!crtc)
+                       goto set_prop_error;
+
+               switch (value) {
+               case DRM_MODE_SCALE_FULLSCREEN:
+                       break;
+               case DRM_MODE_SCALE_NO_SCALE:
+                       break;
+               case DRM_MODE_SCALE_ASPECT:
+                       break;
+               default:
+                       goto set_prop_error;
+               }
+
+               if (drm_connector_property_get_value(connector,
+                                                    property,
+                                                    &curval))
+                       goto set_prop_error;
+
+               if (curval == value)
+                       goto set_prop_done;
+
+               if (drm_connector_property_set_value(connector,
+                                                       property,
+                                                       value))
+                       goto set_prop_error;
+
+               if (crtc->saved_mode.hdisplay != 0 &&
+                   crtc->saved_mode.vdisplay != 0) {
+                       if (!drm_crtc_helper_set_mode(encoder->crtc,
+                                                     &crtc->saved_mode,
+                                                     encoder->crtc->x,
+                                                     encoder->crtc->y,
+                                                     encoder->crtc->fb))
+                               goto set_prop_error;
+               }
+       } else if (!strcmp(property->name, "backlight")) {
+               if (drm_connector_property_set_value(connector,
+                                                       property,
+                                                       value))
+                       goto set_prop_error;
+               else {
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+                       struct drm_psb_private *devp =
+                                               encoder->dev->dev_private;
+                       struct backlight_device *bd = devp->backlight_device;
+                       if (bd) {
+                               bd->props.brightness = value;
+                               backlight_update_status(bd);
+                       }
+#endif
+               }
+       } else if (!strcmp(property->name, "DPMS")) {
+               struct drm_encoder_helper_funcs *hfuncs
+                                               = encoder->helper_private;
+               hfuncs->dpms(encoder, value);
+       }
+
+set_prop_done:
+       return 0;
+set_prop_error:
+       return -1;
+}
+
+static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
+       .dpms = psb_intel_lvds_encoder_dpms,
+       .mode_fixup = psb_intel_lvds_mode_fixup,
+       .prepare = psb_intel_lvds_prepare,
+       .mode_set = psb_intel_lvds_mode_set,
+       .commit = psb_intel_lvds_commit,
+};
+
+const struct drm_connector_helper_funcs
+                               psb_intel_lvds_connector_helper_funcs = {
+       .get_modes = psb_intel_lvds_get_modes,
+       .mode_valid = psb_intel_lvds_mode_valid,
+       .best_encoder = psb_intel_best_encoder,
+};
+
+const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .save = psb_intel_lvds_save,
+       .restore = psb_intel_lvds_restore,
+       .detect = psb_intel_lvds_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .set_property = psb_intel_lvds_set_property,
+       .destroy = psb_intel_lvds_destroy,
+};
+
+
+static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+}
+
+const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
+       .destroy = psb_intel_lvds_enc_destroy,
+};
+
+
+
+/**
+ * psb_intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void psb_intel_lvds_init(struct drm_device *dev,
+                        struct psb_intel_mode_device *mode_dev)
+{
+       struct psb_intel_encoder *psb_intel_encoder;
+       struct psb_intel_connector *psb_intel_connector;
+       struct psb_intel_lvds_priv *lvds_priv;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       struct drm_display_mode *scan;  /* *modes, *bios_mode; */
+       struct drm_crtc *crtc;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 lvds;
+       int pipe;
+
+       psb_intel_encoder =
+                       kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+
+       if (!psb_intel_encoder) {
+               dev_err(dev->dev, "psb_intel_encoder allocation error\n");
+               return;
+       }
+
+       psb_intel_connector =
+               kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+
+       if (!psb_intel_connector) {
+               kfree(psb_intel_encoder);
+               dev_err(dev->dev, "psb_intel_connector allocation error\n");
+       }
+
+       lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
+       if (!lvds_priv) {
+               dev_err(dev->dev, "LVDS private allocation error\n");
+               goto failed_connector;
+       }
+
+       psb_intel_encoder->dev_priv = lvds_priv;
+
+       connector = &psb_intel_connector->base;
+       encoder = &psb_intel_encoder->base;
+       drm_connector_init(dev, connector,
+                          &psb_intel_lvds_connector_funcs,
+                          DRM_MODE_CONNECTOR_LVDS);
+
+       drm_encoder_init(dev, encoder,
+                        &psb_intel_lvds_enc_funcs,
+                        DRM_MODE_ENCODER_LVDS);
+
+       psb_intel_connector_attach_encoder(psb_intel_connector,
+                                          psb_intel_encoder);
+       psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+       drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
+       drm_connector_helper_add(connector,
+                                &psb_intel_lvds_connector_helper_funcs);
+       connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+       connector->interlace_allowed = false;
+       connector->doublescan_allowed = false;
+
+       /*Attach connector properties*/
+       drm_connector_attach_property(connector,
+                                     dev->mode_config.scaling_mode_property,
+                                     DRM_MODE_SCALE_FULLSCREEN);
+       drm_connector_attach_property(connector,
+                                     dev_priv->backlight_property,
+                                     BRIGHTNESS_MAX_LEVEL);
+
+       /*
+        * Set up I2C bus
+        * FIXME: distroy i2c_bus when exit
+        */
+       lvds_priv->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
+       if (!lvds_priv->i2c_bus) {
+               dev_printk(KERN_ERR,
+                       &dev->pdev->dev, "I2C bus registration failed.\n");
+               goto failed_blc_i2c;
+       }
+       lvds_priv->i2c_bus->slave_addr = 0x2C;
+       dev_priv->lvds_i2c_bus =  lvds_priv->i2c_bus;
+
+       /*
+        * LVDS discovery:
+        * 1) check for EDID on DDC
+        * 2) check for VBT data
+        * 3) check to see if LVDS is already on
+        *    if none of the above, no panel
+        * 4) make sure lid is open
+        *    if closed, act like it's not there for now
+        */
+
+       /* Set up the DDC bus. */
+       lvds_priv->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
+       if (!lvds_priv->ddc_bus) {
+               dev_printk(KERN_ERR, &dev->pdev->dev,
+                          "DDC bus registration " "failed.\n");
+               goto failed_ddc;
+       }
+
+       /*
+        * Attempt to get the fixed panel mode from DDC.  Assume that the
+        * preferred mode is the right one.
+        */
+       psb_intel_ddc_get_modes(connector, &lvds_priv->ddc_bus->adapter);
+       list_for_each_entry(scan, &connector->probed_modes, head) {
+               if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+                       mode_dev->panel_fixed_mode =
+                           drm_mode_duplicate(dev, scan);
+                       goto out;       /* FIXME: check for quirks */
+               }
+       }
+
+       /* Failed to get EDID, what about VBT? do we need this? */
+       if (mode_dev->vbt_mode)
+               mode_dev->panel_fixed_mode =
+                   drm_mode_duplicate(dev, mode_dev->vbt_mode);
+
+       if (!mode_dev->panel_fixed_mode)
+               if (dev_priv->lfp_lvds_vbt_mode)
+                       mode_dev->panel_fixed_mode =
+                               drm_mode_duplicate(dev,
+                                       dev_priv->lfp_lvds_vbt_mode);
+
+       /*
+        * If we didn't get EDID, try checking if the panel is already turned
+        * on.  If so, assume that whatever is currently programmed is the
+        * correct mode.
+        */
+       lvds = REG_READ(LVDS);
+       pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+       crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
+
+       if (crtc && (lvds & LVDS_PORT_EN)) {
+               mode_dev->panel_fixed_mode =
+                   psb_intel_crtc_mode_get(dev, crtc);
+               if (mode_dev->panel_fixed_mode) {
+                       mode_dev->panel_fixed_mode->type |=
+                           DRM_MODE_TYPE_PREFERRED;
+                       goto out;       /* FIXME: check for quirks */
+               }
+       }
+
+       /* If we still don't have a mode after all that, give up. */
+       if (!mode_dev->panel_fixed_mode) {
+               dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
+               goto failed_find;
+       }
+
+       /*
+        * Blacklist machines with BIOSes that list an LVDS panel without
+        * actually having one.
+        */
+out:
+       drm_sysfs_connector_add(connector);
+       return;
+
+failed_find:
+       if (lvds_priv->ddc_bus)
+               psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+failed_ddc:
+       if (lvds_priv->i2c_bus)
+               psb_intel_i2c_destroy(lvds_priv->i2c_bus);
+failed_blc_i2c:
+       drm_encoder_cleanup(encoder);
+       drm_connector_cleanup(connector);
+failed_connector:
+       if (psb_intel_connector)
+               kfree(psb_intel_connector);
+}
+
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
new file mode 100644 (file)
index 0000000..4fca0d6
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authers: Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include <drm/drmP.h>
+#include "psb_intel_drv.h"
+
+/**
+ * psb_intel_ddc_probe
+ *
+ */
+bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
+{
+       u8 out_buf[] = { 0x0, 0x0 };
+       u8 buf[2];
+       int ret;
+       struct i2c_msg msgs[] = {
+               {
+                .addr = 0x50,
+                .flags = 0,
+                .len = 1,
+                .buf = out_buf,
+                },
+               {
+                .addr = 0x50,
+                .flags = I2C_M_RD,
+                .len = 1,
+                .buf = buf,
+                }
+       };
+
+       ret = i2c_transfer(adapter, msgs, 2);
+       if (ret == 2)
+               return true;
+
+       return false;
+}
+
+/**
+ * psb_intel_ddc_get_modes - get modelist from monitor
+ * @connector: DRM connector device to use
+ *
+ * Fetch the EDID information from @connector using the DDC bus.
+ */
+int psb_intel_ddc_get_modes(struct drm_connector *connector,
+                           struct i2c_adapter *adapter)
+{
+       struct edid *edid;
+       int ret = 0;
+
+       edid = drm_get_edid(connector, adapter);
+       if (edid) {
+               drm_mode_connector_update_edid_property(connector, edid);
+               ret = drm_add_edid_modes(connector, edid);
+               kfree(edid);
+       }
+       return ret;
+}
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
new file mode 100644 (file)
index 0000000..fcc0af0
--- /dev/null
@@ -0,0 +1,1309 @@
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __PSB_INTEL_REG_H__
+#define __PSB_INTEL_REG_H__
+
+/*
+ * GPIO regs
+ */
+#define GPIOA                  0x5010
+#define GPIOB                  0x5014
+#define GPIOC                  0x5018
+#define GPIOD                  0x501c
+#define GPIOE                  0x5020
+#define GPIOF                  0x5024
+#define GPIOG                  0x5028
+#define GPIOH                  0x502c
+# define GPIO_CLOCK_DIR_MASK           (1 << 0)
+# define GPIO_CLOCK_DIR_IN             (0 << 1)
+# define GPIO_CLOCK_DIR_OUT            (1 << 1)
+# define GPIO_CLOCK_VAL_MASK           (1 << 2)
+# define GPIO_CLOCK_VAL_OUT            (1 << 3)
+# define GPIO_CLOCK_VAL_IN             (1 << 4)
+# define GPIO_CLOCK_PULLUP_DISABLE     (1 << 5)
+# define GPIO_DATA_DIR_MASK            (1 << 8)
+# define GPIO_DATA_DIR_IN              (0 << 9)
+# define GPIO_DATA_DIR_OUT             (1 << 9)
+# define GPIO_DATA_VAL_MASK            (1 << 10)
+# define GPIO_DATA_VAL_OUT             (1 << 11)
+# define GPIO_DATA_VAL_IN              (1 << 12)
+# define GPIO_DATA_PULLUP_DISABLE      (1 << 13)
+
+#define GMBUS0                 0x5100 /* clock/port select */
+#define   GMBUS_RATE_100KHZ    (0<<8)
+#define   GMBUS_RATE_50KHZ     (1<<8)
+#define   GMBUS_RATE_400KHZ    (2<<8) /* reserved on Pineview */
+#define   GMBUS_RATE_1MHZ      (3<<8) /* reserved on Pineview */
+#define   GMBUS_HOLD_EXT       (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define   GMBUS_PORT_DISABLED  0
+#define   GMBUS_PORT_SSC       1
+#define   GMBUS_PORT_VGADDC    2
+#define   GMBUS_PORT_PANEL     3
+#define   GMBUS_PORT_DPC       4 /* HDMIC */
+#define   GMBUS_PORT_DPB       5 /* SDVO, HDMIB */
+                                 /* 6 reserved */
+#define   GMBUS_PORT_DPD       7 /* HDMID */
+#define   GMBUS_NUM_PORTS       8
+#define GMBUS1                 0x5104 /* command/status */
+#define   GMBUS_SW_CLR_INT     (1<<31)
+#define   GMBUS_SW_RDY         (1<<30)
+#define   GMBUS_ENT            (1<<29) /* enable timeout */
+#define   GMBUS_CYCLE_NONE     (0<<25)
+#define   GMBUS_CYCLE_WAIT     (1<<25)
+#define   GMBUS_CYCLE_INDEX    (2<<25)
+#define   GMBUS_CYCLE_STOP     (4<<25)
+#define   GMBUS_BYTE_COUNT_SHIFT 16
+#define   GMBUS_SLAVE_INDEX_SHIFT 8
+#define   GMBUS_SLAVE_ADDR_SHIFT 1
+#define   GMBUS_SLAVE_READ     (1<<0)
+#define   GMBUS_SLAVE_WRITE    (0<<0)
+#define GMBUS2                 0x5108 /* status */
+#define   GMBUS_INUSE          (1<<15)
+#define   GMBUS_HW_WAIT_PHASE  (1<<14)
+#define   GMBUS_STALL_TIMEOUT  (1<<13)
+#define   GMBUS_INT            (1<<12)
+#define   GMBUS_HW_RDY         (1<<11)
+#define   GMBUS_SATOER         (1<<10)
+#define   GMBUS_ACTIVE         (1<<9)
+#define GMBUS3                 0x510c /* data buffer bytes 3-0 */
+#define GMBUS4                 0x5110 /* interrupt mask (Pineview+) */
+#define   GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define   GMBUS_NAK_EN         (1<<3)
+#define   GMBUS_IDLE_EN                (1<<2)
+#define   GMBUS_HW_WAIT_EN     (1<<1)
+#define   GMBUS_HW_RDY_EN      (1<<0)
+#define GMBUS5                 0x5120 /* byte index */
+#define   GMBUS_2BYTE_INDEX_EN (1<<31)
+
+#define BLC_PWM_CTL            0x61254
+#define BLC_PWM_CTL2           0x61250
+#define BLC_PWM_CTL_C          0x62254
+#define BLC_PWM_CTL2_C         0x62250
+#define BACKLIGHT_MODULATION_FREQ_SHIFT                (17)
+/*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+#define BLM_LEGACY_MODE                        (1 << 16)
+/*
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT     (0)
+#define BACKLIGHT_DUTY_CYCLE_MASK      (0xffff)
+
+#define I915_GCFGC                     0xf0
+#define I915_LOW_FREQUENCY_ENABLE      (1 << 7)
+#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
+#define I915_DISPLAY_CLOCK_333_MHZ     (4 << 4)
+#define I915_DISPLAY_CLOCK_MASK                (7 << 4)
+
+#define I855_HPLLCC                    0xc0
+#define I855_CLOCK_CONTROL_MASK                (3 << 0)
+#define I855_CLOCK_133_200             (0 << 0)
+#define I855_CLOCK_100_200             (1 << 0)
+#define I855_CLOCK_100_133             (2 << 0)
+#define I855_CLOCK_166_250             (3 << 0)
+
+/* I830 CRTC registers */
+#define HTOTAL_A               0x60000
+#define HBLANK_A               0x60004
+#define HSYNC_A                        0x60008
+#define VTOTAL_A               0x6000c
+#define VBLANK_A               0x60010
+#define VSYNC_A                        0x60014
+#define PIPEASRC               0x6001c
+#define BCLRPAT_A              0x60020
+#define VSYNCSHIFT_A           0x60028
+
+#define HTOTAL_B               0x61000
+#define HBLANK_B               0x61004
+#define HSYNC_B                        0x61008
+#define VTOTAL_B               0x6100c
+#define VBLANK_B               0x61010
+#define VSYNC_B                        0x61014
+#define PIPEBSRC               0x6101c
+#define BCLRPAT_B              0x61020
+#define VSYNCSHIFT_B           0x61028
+
+#define HTOTAL_C               0x62000
+#define HBLANK_C               0x62004
+#define HSYNC_C                        0x62008
+#define VTOTAL_C               0x6200c
+#define VBLANK_C               0x62010
+#define VSYNC_C                        0x62014
+#define PIPECSRC               0x6201c
+#define BCLRPAT_C              0x62020
+#define VSYNCSHIFT_C           0x62028
+
+#define PP_STATUS              0x61200
+# define PP_ON                         (1 << 31)
+/*
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+#define PP_READY                       (1 << 30)
+#define PP_SEQUENCE_NONE               (0 << 28)
+#define PP_SEQUENCE_ON                 (1 << 28)
+#define PP_SEQUENCE_OFF                        (2 << 28)
+#define PP_SEQUENCE_MASK               0x30000000
+#define PP_CONTROL             0x61204
+#define POWER_TARGET_ON                        (1 << 0)
+
+#define LVDSPP_ON              0x61208
+#define LVDSPP_OFF             0x6120c
+#define PP_CYCLE               0x61210
+
+#define PFIT_CONTROL           0x61230
+#define PFIT_ENABLE                    (1 << 31)
+#define PFIT_PIPE_MASK                 (3 << 29)
+#define PFIT_PIPE_SHIFT                        29
+#define PFIT_SCALING_MODE_PILLARBOX    (1 << 27)
+#define PFIT_SCALING_MODE_LETTERBOX    (3 << 26)
+#define VERT_INTERP_DISABLE            (0 << 10)
+#define VERT_INTERP_BILINEAR           (1 << 10)
+#define VERT_INTERP_MASK               (3 << 10)
+#define VERT_AUTO_SCALE                        (1 << 9)
+#define HORIZ_INTERP_DISABLE           (0 << 6)
+#define HORIZ_INTERP_BILINEAR          (1 << 6)
+#define HORIZ_INTERP_MASK              (3 << 6)
+#define HORIZ_AUTO_SCALE               (1 << 5)
+#define PANEL_8TO6_DITHER_ENABLE       (1 << 3)
+
+#define PFIT_PGM_RATIOS                0x61234
+#define PFIT_VERT_SCALE_MASK                   0xfff00000
+#define PFIT_HORIZ_SCALE_MASK                  0x0000fff0
+
+#define PFIT_AUTO_RATIOS       0x61238
+
+#define DPLL_A                 0x06014
+#define DPLL_B                 0x06018
+#define DPLL_VCO_ENABLE                        (1 << 31)
+#define DPLL_DVO_HIGH_SPEED            (1 << 30)
+#define DPLL_SYNCLOCK_ENABLE           (1 << 29)
+#define DPLL_VGA_MODE_DIS              (1 << 28)
+#define DPLLB_MODE_DAC_SERIAL          (1 << 26)       /* i915 */
+#define DPLLB_MODE_LVDS                        (2 << 26)       /* i915 */
+#define DPLL_MODE_MASK                 (3 << 26)
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10        (0 << 24)       /* i915 */
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24)       /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_14     (0 << 24)       /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_7      (1 << 24)       /* i915 */
+#define DPLL_P2_CLOCK_DIV_MASK         0x03000000      /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK    0x00ff0000      /* i915 */
+#define DPLL_LOCK                      (1 << 15)       /* CDV */
+
+/*
+ *  The i830 generation, in DAC/serial mode, defines p1 as two plus this
+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830      0x001f0000
+/*
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS  0x003f0000
+#define DPLL_FPA01_P1_POST_DIV_SHIFT   16
+#define PLL_P2_DIVIDE_BY_4             (1 << 23)       /* i830, required
+                                                        * in DVO non-gang */
+# define PLL_P1_DIVIDE_BY_TWO          (1 << 21)       /* i830 */
+#define PLL_REF_INPUT_DREFCLK          (0 << 13)
+#define PLL_REF_INPUT_TVCLKINA         (1 << 13)       /* i830 */
+#define PLL_REF_INPUT_TVCLKINBC                (2 << 13)       /* SDVO
+                                                                * TVCLKIN */
+#define PLLB_REF_INPUT_SPREADSPECTRUMIN        (3 << 13)
+#define PLL_REF_INPUT_MASK             (3 << 13)
+#define PLL_LOAD_PULSE_PHASE_SHIFT     9
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+#define PLL_LOAD_PULSE_PHASE_MASK      (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define DISPLAY_RATE_SELECT_FPA1       (1 << 8)
+
+/*
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ *
+ * DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_MULTIPLIER_MASK           0x000000ff
+#define SDVO_MULTIPLIER_SHIFT_HIRES    4
+#define SDVO_MULTIPLIER_SHIFT_VGA      0
+
+/*
+ * PLL_MD
+ */
+/* Pipe A SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_A_MD              0x0601c
+/* Pipe B SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_B_MD              0x06020
+/*
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1.  Must be set to 1 pixel for SDVO.
+ */
+#define DPLL_MD_UDI_DIVIDER_MASK       0x3f000000
+#define DPLL_MD_UDI_DIVIDER_SHIFT      24
+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+#define DPLL_MD_VGA_UDI_DIVIDER_MASK   0x003f0000
+#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT  16
+/*
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock.  At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz.  The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+#define DPLL_MD_UDI_MULTIPLIER_MASK    0x00003f00
+#define DPLL_MD_UDI_MULTIPLIER_SHIFT   8
+/*
+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK        0x0000003f
+#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+
+#define DPLL_TEST              0x606c
+#define DPLLB_TEST_SDVO_DIV_1          (0 << 22)
+#define DPLLB_TEST_SDVO_DIV_2          (1 << 22)
+#define DPLLB_TEST_SDVO_DIV_4          (2 << 22)
+#define DPLLB_TEST_SDVO_DIV_MASK       (3 << 22)
+#define DPLLB_TEST_N_BYPASS            (1 << 19)
+#define DPLLB_TEST_M_BYPASS            (1 << 18)
+#define DPLLB_INPUT_BUFFER_ENABLE      (1 << 16)
+#define DPLLA_TEST_N_BYPASS            (1 << 3)
+#define DPLLA_TEST_M_BYPASS            (1 << 2)
+#define DPLLA_INPUT_BUFFER_ENABLE      (1 << 0)
+
+#define ADPA                   0x61100
+#define ADPA_DAC_ENABLE                        (1 << 31)
+#define ADPA_DAC_DISABLE               0
+#define ADPA_PIPE_SELECT_MASK          (1 << 30)
+#define ADPA_PIPE_A_SELECT             0
+#define ADPA_PIPE_B_SELECT             (1 << 30)
+#define ADPA_USE_VGA_HVPOLARITY                (1 << 15)
+#define ADPA_SETS_HVPOLARITY           0
+#define ADPA_VSYNC_CNTL_DISABLE                (1 << 11)
+#define ADPA_VSYNC_CNTL_ENABLE         0
+#define ADPA_HSYNC_CNTL_DISABLE                (1 << 10)
+#define ADPA_HSYNC_CNTL_ENABLE         0
+#define ADPA_VSYNC_ACTIVE_HIGH         (1 << 4)
+#define ADPA_VSYNC_ACTIVE_LOW          0
+#define ADPA_HSYNC_ACTIVE_HIGH         (1 << 3)
+#define ADPA_HSYNC_ACTIVE_LOW          0
+
+#define FPA0                   0x06040
+#define FPA1                   0x06044
+#define FPB0                   0x06048
+#define FPB1                   0x0604c
+#define FP_N_DIV_MASK                  0x003f0000
+#define FP_N_DIV_SHIFT                 16
+#define FP_M1_DIV_MASK                 0x00003f00
+#define FP_M1_DIV_SHIFT                        8
+#define FP_M2_DIV_MASK                 0x0000003f
+#define FP_M2_DIV_SHIFT                        0
+
+#define PORT_HOTPLUG_EN                0x61110
+#define SDVOB_HOTPLUG_INT_EN           (1 << 26)
+#define SDVOC_HOTPLUG_INT_EN           (1 << 25)
+#define TV_HOTPLUG_INT_EN              (1 << 18)
+#define CRT_HOTPLUG_INT_EN             (1 << 9)
+#define CRT_HOTPLUG_FORCE_DETECT       (1 << 3)
+/* CDV.. */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64       (1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M             (0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M             (1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40         (0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50         (1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60         (2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70         (3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK       (3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G            (0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G            (1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV       (0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV       (1 << 2)
+#define CRT_HOTPLUG_DETECT_MASK                        0x000000F8
+
+#define PORT_HOTPLUG_STAT      0x61114
+#define CRT_HOTPLUG_INT_STATUS         (1 << 11)
+#define TV_HOTPLUG_INT_STATUS          (1 << 10)
+#define CRT_HOTPLUG_MONITOR_MASK       (3 << 8)
+#define CRT_HOTPLUG_MONITOR_COLOR      (3 << 8)
+#define CRT_HOTPLUG_MONITOR_MONO       (2 << 8)
+#define CRT_HOTPLUG_MONITOR_NONE       (0 << 8)
+#define SDVOC_HOTPLUG_INT_STATUS       (1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS       (1 << 6)
+
+#define SDVOB                  0x61140
+#define SDVOC                  0x61160
+#define SDVO_ENABLE                    (1 << 31)
+#define SDVO_PIPE_B_SELECT             (1 << 30)
+#define SDVO_STALL_SELECT              (1 << 29)
+#define SDVO_INTERRUPT_ENABLE          (1 << 26)
+#define SDVO_COLOR_RANGE_16_235                (1 << 8)
+#define SDVO_AUDIO_ENABLE              (1 << 6)
+
+/**
+ * 915G/GM SDVO pixel multiplier.
+ *
+ * Programmed value is multiplier - 1, up to 5x.
+ *
+ * DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK                (7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT       23
+#define SDVO_PHASE_SELECT_MASK         (15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT      (6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT       (1 << 18)
+#define SDVOC_GANG_MODE                        (1 << 16)
+#define SDVO_BORDER_ENABLE             (1 << 7)
+#define SDVOB_PCIE_CONCURRENCY         (1 << 3)
+#define SDVO_DETECTED                  (1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK            ((1 << 17) | (1 << 16) | (1 << 14))
+#define SDVOC_PRESERVE_MASK            (1 << 17)
+
+/*
+ * This register controls the LVDS output enable, pipe selection, and data
+ * format selection.
+ *
+ * All of the clock/data pairs are force powered down by power sequencing.
+ */
+#define LVDS                   0x61180
+/*
+ * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+#define LVDS_PORT_EN                   (1 << 31)
+/* Selects pipe B for LVDS data.  Must be set on pre-965. */
+#define LVDS_PIPEB_SELECT              (1 << 30)
+
+/* Turns on border drawing to allow centered display. */
+#define LVDS_BORDER_EN                 (1 << 15)
+
+/*
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+#define LVDS_A0A2_CLKA_POWER_MASK      (3 << 8)
+#define LVDS_A0A2_CLKA_POWER_DOWN      (0 << 8)
+#define LVDS_A0A2_CLKA_POWER_UP                (3 << 8)
+/*
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode.  Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+#define LVDS_A3_POWER_MASK             (3 << 6)
+#define LVDS_A3_POWER_DOWN             (0 << 6)
+#define LVDS_A3_POWER_UP               (3 << 6)
+/*
+ * Controls the CLKB pair.  This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+#define LVDS_CLKB_POWER_MASK           (3 << 4)
+#define LVDS_CLKB_POWER_DOWN           (0 << 4)
+#define LVDS_CLKB_POWER_UP             (3 << 4)
+/*
+ * Controls the B0-B3 data pairs.  This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode.  The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+#define LVDS_B0B3_POWER_MASK           (3 << 2)
+#define LVDS_B0B3_POWER_DOWN           (0 << 2)
+#define LVDS_B0B3_POWER_UP             (3 << 2)
+
+#define PIPEACONF              0x70008
+#define PIPEACONF_ENABLE               (1 << 31)
+#define PIPEACONF_DISABLE              0
+#define PIPEACONF_DOUBLE_WIDE          (1 << 30)
+#define PIPECONF_ACTIVE                        (1 << 30)
+#define I965_PIPECONF_ACTIVE           (1 << 30)
+#define PIPECONF_DSIPLL_LOCK           (1 << 29)
+#define PIPEACONF_SINGLE_WIDE          0
+#define PIPEACONF_PIPE_UNLOCKED                0
+#define PIPEACONF_DSR                  (1 << 26)
+#define PIPEACONF_PIPE_LOCKED          (1 << 25)
+#define PIPEACONF_PALETTE              0
+#define PIPECONF_FORCE_BORDER          (1 << 25)
+#define PIPEACONF_GAMMA                        (1 << 24)
+#define PIPECONF_PROGRESSIVE           (0 << 21)
+#define PIPECONF_INTERLACE_W_FIELD_INDICATION  (6 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY                (7 << 21)
+#define PIPECONF_PLANE_OFF             (1 << 19)
+#define PIPECONF_CURSOR_OFF            (1 << 18)
+
+#define PIPEBCONF              0x71008
+#define PIPEBCONF_ENABLE               (1 << 31)
+#define PIPEBCONF_DISABLE              0
+#define PIPEBCONF_DOUBLE_WIDE          (1 << 30)
+#define PIPEBCONF_DISABLE              0
+#define PIPEBCONF_GAMMA                        (1 << 24)
+#define PIPEBCONF_PALETTE              0
+
+#define PIPECCONF              0x72008
+
+#define PIPEBGCMAXRED          0x71010
+#define PIPEBGCMAXGREEN                0x71014
+#define PIPEBGCMAXBLUE         0x71018
+
+#define PIPEASTAT              0x70024
+#define PIPEBSTAT              0x71024
+#define PIPECSTAT              0x72024
+#define PIPE_VBLANK_INTERRUPT_STATUS           (1UL << 1)
+#define PIPE_START_VBLANK_INTERRUPT_STATUS     (1UL << 2)
+#define PIPE_VBLANK_CLEAR                      (1 << 1)
+#define PIPE_VBLANK_STATUS                     (1 << 1)
+#define PIPE_TE_STATUS                         (1UL << 6)
+#define PIPE_DPST_EVENT_STATUS                 (1UL << 7)
+#define PIPE_VSYNC_CLEAR                       (1UL << 9)
+#define PIPE_VSYNC_STATUS                      (1UL << 9)
+#define PIPE_HDMI_AUDIO_UNDERRUN_STATUS                (1UL << 10)
+#define PIPE_HDMI_AUDIO_BUFFER_DONE_STATUS     (1UL << 11)
+#define PIPE_VBLANK_INTERRUPT_ENABLE           (1UL << 17)
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE     (1UL << 18)
+#define PIPE_TE_ENABLE                         (1UL << 22)
+#define PIPE_DPST_EVENT_ENABLE                 (1UL << 23)
+#define PIPE_VSYNC_ENABL                       (1UL << 25)
+#define PIPE_HDMI_AUDIO_UNDERRUN               (1UL << 26)
+#define PIPE_HDMI_AUDIO_BUFFER_DONE            (1UL << 27)
+#define PIPE_HDMI_AUDIO_INT_MASK               (PIPE_HDMI_AUDIO_UNDERRUN | \
+                                               PIPE_HDMI_AUDIO_BUFFER_DONE)
+#define PIPE_EVENT_MASK ((1 << 29)|(1 << 28)|(1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)|(1 << 22)|(1 << 21)|(1 << 20)|(1 << 16))
+#define PIPE_VBLANK_MASK ((1 << 25)|(1 << 24)|(1 << 18)|(1 << 17))
+#define HISTOGRAM_INT_CONTROL          0x61268
+#define HISTOGRAM_BIN_DATA             0X61264
+#define HISTOGRAM_LOGIC_CONTROL                0x61260
+#define PWM_CONTROL_LOGIC              0x61250
+#define PIPE_HOTPLUG_INTERRUPT_STATUS          (1UL << 10)
+#define HISTOGRAM_INTERRUPT_ENABLE             (1UL << 31)
+#define HISTOGRAM_LOGIC_ENABLE                 (1UL << 31)
+#define PWM_LOGIC_ENABLE                       (1UL << 31)
+#define PWM_PHASEIN_ENABLE                     (1UL << 25)
+#define PWM_PHASEIN_INT_ENABLE                 (1UL << 24)
+#define PWM_PHASEIN_VB_COUNT                   0x00001f00
+#define PWM_PHASEIN_INC                                0x0000001f
+#define HISTOGRAM_INT_CTRL_CLEAR               (1UL << 30)
+#define DPST_YUV_LUMA_MODE                     0
+
+struct dpst_ie_histogram_control {
+       union {
+               uint32_t data;
+               struct {
+                       uint32_t bin_reg_index:7;
+                       uint32_t reserved:4;
+                       uint32_t bin_reg_func_select:1;
+                       uint32_t sync_to_phase_in:1;
+                       uint32_t alt_enhancement_mode:2;
+                       uint32_t reserved1:1;
+                       uint32_t sync_to_phase_in_count:8;
+                       uint32_t histogram_mode_select:1;
+                       uint32_t reserved2:4;
+                       uint32_t ie_pipe_assignment:1;
+                       uint32_t ie_mode_table_enabled:1;
+                       uint32_t ie_histogram_enable:1;
+               };
+       };
+};
+
+struct dpst_guardband {
+       union {
+               uint32_t data;
+               struct {
+                       uint32_t guardband:22;
+                       uint32_t guardband_interrupt_delay:8;
+                       uint32_t interrupt_status:1;
+                       uint32_t interrupt_enable:1;
+               };
+       };
+};
+
+#define PIPEAFRAMEHIGH         0x70040
+#define PIPEAFRAMEPIXEL                0x70044
+#define PIPEBFRAMEHIGH         0x71040
+#define PIPEBFRAMEPIXEL                0x71044
+#define PIPECFRAMEHIGH         0x72040
+#define PIPECFRAMEPIXEL                0x72044
+#define PIPE_FRAME_HIGH_MASK   0x0000ffff
+#define PIPE_FRAME_HIGH_SHIFT  0
+#define PIPE_FRAME_LOW_MASK    0xff000000
+#define PIPE_FRAME_LOW_SHIFT   24
+#define PIPE_PIXEL_MASK                0x00ffffff
+#define PIPE_PIXEL_SHIFT       0
+
+#define DSPARB                 0x70030
+#define DSPFW1                 0x70034
+#define DSPFW2                 0x70038
+#define DSPFW3                 0x7003c
+#define DSPFW4                 0x70050
+#define DSPFW5                 0x70054
+#define DSPFW6                 0x70058
+#define DSPCHICKENBIT          0x70400
+#define DSPACNTR               0x70180
+#define DSPBCNTR               0x71180
+#define DSPCCNTR               0x72180
+#define DISPLAY_PLANE_ENABLE                   (1 << 31)
+#define DISPLAY_PLANE_DISABLE                  0
+#define DISPPLANE_GAMMA_ENABLE                 (1 << 30)
+#define DISPPLANE_GAMMA_DISABLE                        0
+#define DISPPLANE_PIXFORMAT_MASK               (0xf << 26)
+#define DISPPLANE_8BPP                         (0x2 << 26)
+#define DISPPLANE_15_16BPP                     (0x4 << 26)
+#define DISPPLANE_16BPP                                (0x5 << 26)
+#define DISPPLANE_32BPP_NO_ALPHA               (0x6 << 26)
+#define DISPPLANE_32BPP                                (0x7 << 26)
+#define DISPPLANE_STEREO_ENABLE                        (1 << 25)
+#define DISPPLANE_STEREO_DISABLE               0
+#define DISPPLANE_SEL_PIPE_MASK                        (1 << 24)
+#define DISPPLANE_SEL_PIPE_POS                 24
+#define DISPPLANE_SEL_PIPE_A                   0
+#define DISPPLANE_SEL_PIPE_B                   (1 << 24)
+#define DISPPLANE_SRC_KEY_ENABLE               (1 << 22)
+#define DISPPLANE_SRC_KEY_DISABLE              0
+#define DISPPLANE_LINE_DOUBLE                  (1 << 20)
+#define DISPPLANE_NO_LINE_DOUBLE               0
+#define DISPPLANE_STEREO_POLARITY_FIRST                0
+#define DISPPLANE_STEREO_POLARITY_SECOND       (1 << 18)
+/* plane B only */
+#define DISPPLANE_ALPHA_TRANS_ENABLE           (1 << 15)
+#define DISPPLANE_ALPHA_TRANS_DISABLE          0
+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA                0
+#define DISPPLANE_SPRITE_ABOVE_OVERLAY         (1)
+#define DISPPLANE_BOTTOM                       (4)
+
+#define DSPABASE               0x70184
+#define DSPALINOFF             0x70184
+#define DSPASTRIDE             0x70188
+
+#define DSPBBASE               0x71184
+#define DSPBLINOFF             0X71184
+#define DSPBADDR               DSPBBASE
+#define DSPBSTRIDE             0x71188
+
+#define DSPCBASE               0x72184
+#define DSPCLINOFF             0x72184
+#define DSPCSTRIDE             0x72188
+
+#define DSPAKEYVAL             0x70194
+#define DSPAKEYMASK            0x70198
+
+#define DSPAPOS                        0x7018C /* reserved */
+#define DSPASIZE               0x70190
+#define DSPBPOS                        0x7118C
+#define DSPBSIZE               0x71190
+#define DSPCPOS                        0x7218C
+#define DSPCSIZE               0x72190
+
+#define DSPASURF               0x7019C
+#define DSPATILEOFF            0x701A4
+
+#define DSPBSURF               0x7119C
+#define DSPBTILEOFF            0x711A4
+
+#define DSPCSURF               0x7219C
+#define DSPCTILEOFF            0x721A4
+#define DSPCKEYMAXVAL          0x721A0
+#define DSPCKEYMINVAL          0x72194
+#define DSPCKEYMSK             0x72198
+
+#define VGACNTRL               0x71400
+#define VGA_DISP_DISABLE               (1 << 31)
+#define VGA_2X_MODE                    (1 << 30)
+#define VGA_PIPE_B_SELECT              (1 << 29)
+
+/*
+ * Overlay registers
+ */
+#define OV_C_OFFSET            0x08000
+#define OV_OVADD               0x30000
+#define OV_DOVASTA             0x30008
+# define OV_PIPE_SELECT                        ((1 << 6)|(1 << 7))
+# define OV_PIPE_SELECT_POS            6
+# define OV_PIPE_A                     0
+# define OV_PIPE_C                     1
+#define OV_OGAMC5              0x30010
+#define OV_OGAMC4              0x30014
+#define OV_OGAMC3              0x30018
+#define OV_OGAMC2              0x3001C
+#define OV_OGAMC1              0x30020
+#define OV_OGAMC0              0x30024
+#define OVC_OVADD              0x38000
+#define OVC_DOVCSTA            0x38008
+#define OVC_OGAMC5             0x38010
+#define OVC_OGAMC4             0x38014
+#define OVC_OGAMC3             0x38018
+#define OVC_OGAMC2             0x3801C
+#define OVC_OGAMC1             0x38020
+#define OVC_OGAMC0             0x38024
+
+/*
+ * Some BIOS scratch area registers.  The 845 (and 830?) store the amount
+ * of video memory available to the BIOS in SWF1.
+ */
+#define SWF0                   0x71410
+#define SWF1                   0x71414
+#define SWF2                   0x71418
+#define SWF3                   0x7141c
+#define SWF4                   0x71420
+#define SWF5                   0x71424
+#define SWF6                   0x71428
+
+/*
+ * 855 scratch registers.
+ */
+#define SWF00                  0x70410
+#define SWF01                  0x70414
+#define SWF02                  0x70418
+#define SWF03                  0x7041c
+#define SWF04                  0x70420
+#define SWF05                  0x70424
+#define SWF06                  0x70428
+
+#define SWF10                  SWF0
+#define SWF11                  SWF1
+#define SWF12                  SWF2
+#define SWF13                  SWF3
+#define SWF14                  SWF4
+#define SWF15                  SWF5
+#define SWF16                  SWF6
+
+#define SWF30                  0x72414
+#define SWF31                  0x72418
+#define SWF32                  0x7241c
+
+
+/*
+ * Palette registers
+ */
+#define PALETTE_A              0x0a000
+#define PALETTE_B              0x0a800
+#define PALETTE_C              0x0ac00
+
+/* Cursor A & B regs */
+#define CURACNTR               0x70080
+#define CURSOR_MODE_DISABLE            0x00
+#define CURSOR_MODE_64_32B_AX          0x07
+#define CURSOR_MODE_64_ARGB_AX         ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define MCURSOR_GAMMA_ENABLE           (1 << 26)
+#define CURABASE               0x70084
+#define CURAPOS                        0x70088
+#define CURSOR_POS_MASK                        0x007FF
+#define CURSOR_POS_SIGN                        0x8000
+#define CURSOR_X_SHIFT                 0
+#define CURSOR_Y_SHIFT                 16
+#define CURBCNTR               0x700c0
+#define CURBBASE               0x700c4
+#define CURBPOS                        0x700c8
+#define CURCCNTR               0x700e0
+#define CURCBASE               0x700e4
+#define CURCPOS                        0x700e8
+
+/*
+ * Interrupt Registers
+ */
+#define IER                    0x020a0
+#define IIR                    0x020a4
+#define IMR                    0x020a8
+#define ISR                    0x020ac
+
+/*
+ * MOORESTOWN delta registers
+ */
+#define MRST_DPLL_A            0x0f014
+#define MDFLD_DPLL_B           0x0f018
+#define MDFLD_INPUT_REF_SEL            (1 << 14)
+#define MDFLD_VCO_SEL                  (1 << 16)
+#define DPLLA_MODE_LVDS                        (2 << 26)       /* mrst */
+#define MDFLD_PLL_LATCHEN              (1 << 28)
+#define MDFLD_PWR_GATE_EN              (1 << 30)
+#define MDFLD_P1_MASK                  (0x1FF << 17)
+#define MRST_FPA0              0x0f040
+#define MRST_FPA1              0x0f044
+#define MDFLD_DPLL_DIV0                0x0f048
+#define MDFLD_DPLL_DIV1                0x0f04c
+#define MRST_PERF_MODE         0x020f4
+
+/*
+ * MEDFIELD HDMI registers
+ */
+#define HDMIPHYMISCCTL         0x61134
+#define HDMI_PHY_POWER_DOWN            0x7f
+#define HDMIB_CONTROL          0x61140
+#define HDMIB_PORT_EN                  (1 << 31)
+#define HDMIB_PIPE_B_SELECT            (1 << 30)
+#define HDMIB_NULL_PACKET              (1 << 9)
+#define HDMIB_HDCP_PORT                        (1 << 5)
+
+/* #define LVDS                        0x61180 */
+#define MRST_PANEL_8TO6_DITHER_ENABLE  (1 << 25)
+#define MRST_PANEL_24_DOT_1_FORMAT     (1 << 24)
+#define LVDS_A3_POWER_UP_0_OUTPUT      (1 << 6)
+
+#define MIPI                   0x61190
+#define MIPI_C                 0x62190
+#define MIPI_PORT_EN                   (1 << 31)
+/* Turns on border drawing to allow centered display. */
+#define SEL_FLOPPED_HSTX               (1 << 23)
+#define PASS_FROM_SPHY_TO_AFE          (1 << 16)
+#define MIPI_BORDER_EN                 (1 << 15)
+#define MIPIA_3LANE_MIPIC_1LANE                0x1
+#define MIPIA_2LANE_MIPIC_2LANE                0x2
+#define TE_TRIGGER_DSI_PROTOCOL                (1 << 2)
+#define TE_TRIGGER_GPIO_PIN            (1 << 3)
+#define MIPI_TE_COUNT          0x61194
+
+/* #define PP_CONTROL  0x61204 */
+#define POWER_DOWN_ON_RESET            (1 << 1)
+
+/* #define PFIT_CONTROL        0x61230 */
+#define PFIT_PIPE_SELECT               (3 << 29)
+#define PFIT_PIPE_SELECT_SHIFT         (29)
+
+/* #define BLC_PWM_CTL         0x61254 */
+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT   (16)
+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK    (0xffff << 16)
+
+/* #define PIPEACONF 0x70008 */
+#define PIPEACONF_PIPE_STATE           (1 << 30)
+/* #define DSPACNTR            0x70180 */
+
+#define MRST_DSPABASE          0x7019c
+#define MRST_DSPBBASE          0x7119c
+#define MDFLD_DSPCBASE         0x7219c
+
+/*
+ * Moorestown registers.
+ */
+
+/*
+ *     MIPI IP registers
+ */
+#define MIPIC_REG_OFFSET               0x800
+
+#define DEVICE_READY_REG               0xb000
+#define LP_OUTPUT_HOLD                         (1 << 16)
+#define EXIT_ULPS_DEV_READY                    0x3
+#define LP_OUTPUT_HOLD_RELEASE                 0x810000
+# define ENTERING_ULPS                         (2 << 1)
+# define EXITING_ULPS                          (1 << 1)
+# define ULPS_MASK                             (3 << 1)
+# define BUS_POSSESSION                                (1 << 3)
+#define INTR_STAT_REG                  0xb004
+#define RX_SOT_ERROR                           (1 << 0)
+#define RX_SOT_SYNC_ERROR                      (1 << 1)
+#define RX_ESCAPE_MODE_ENTRY_ERROR             (1 << 3)
+#define RX_LP_TX_SYNC_ERROR                    (1 << 4)
+#define RX_HS_RECEIVE_TIMEOUT_ERROR            (1 << 5)
+#define RX_FALSE_CONTROL_ERROR                 (1 << 6)
+#define RX_ECC_SINGLE_BIT_ERROR                        (1 << 7)
+#define RX_ECC_MULTI_BIT_ERROR                 (1 << 8)
+#define RX_CHECKSUM_ERROR                      (1 << 9)
+#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED                (1 << 10)
+#define RX_DSI_VC_ID_INVALID                   (1 << 11)
+#define TX_FALSE_CONTROL_ERROR                 (1 << 12)
+#define TX_ECC_SINGLE_BIT_ERROR                        (1 << 13)
+#define TX_ECC_MULTI_BIT_ERROR                 (1 << 14)
+#define TX_CHECKSUM_ERROR                      (1 << 15)
+#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED                (1 << 16)
+#define TX_DSI_VC_ID_INVALID                   (1 << 17)
+#define HIGH_CONTENTION                                (1 << 18)
+#define LOW_CONTENTION                         (1 << 19)
+#define DPI_FIFO_UNDER_RUN                     (1 << 20)
+#define HS_TX_TIMEOUT                          (1 << 21)
+#define LP_RX_TIMEOUT                          (1 << 22)
+#define TURN_AROUND_ACK_TIMEOUT                        (1 << 23)
+#define ACK_WITH_NO_ERROR                      (1 << 24)
+#define HS_GENERIC_WR_FIFO_FULL                        (1 << 27)
+#define LP_GENERIC_WR_FIFO_FULL                        (1 << 28)
+#define SPL_PKT_SENT                           (1 << 30)
+#define INTR_EN_REG                    0xb008
+#define DSI_FUNC_PRG_REG               0xb00c
+#define DPI_CHANNEL_NUMBER_POS                 0x03
+#define DBI_CHANNEL_NUMBER_POS                 0x05
+#define FMT_DPI_POS                            0x07
+#define FMT_DBI_POS                            0x0A
+#define DBI_DATA_WIDTH_POS                     0x0D
+
+/* DPI PIXEL FORMATS */
+#define RGB_565_FMT                            0x01    /* RGB 565 FORMAT */
+#define RGB_666_FMT                            0x02    /* RGB 666 FORMAT */
+#define LRGB_666_FMT                           0x03    /* RGB LOOSELY PACKED
+                                                        * 666 FORMAT
+                                                        */
+#define RGB_888_FMT                            0x04    /* RGB 888 FORMAT */
+#define VIRTUAL_CHANNEL_NUMBER_0               0x00    /* Virtual channel 0 */
+#define VIRTUAL_CHANNEL_NUMBER_1               0x01    /* Virtual channel 1 */
+#define VIRTUAL_CHANNEL_NUMBER_2               0x02    /* Virtual channel 2 */
+#define VIRTUAL_CHANNEL_NUMBER_3               0x03    /* Virtual channel 3 */
+
+#define DBI_NOT_SUPPORTED                      0x00    /* command mode
+                                                        * is not supported
+                                                        */
+#define DBI_DATA_WIDTH_16BIT                   0x01    /* 16 bit data */
+#define DBI_DATA_WIDTH_9BIT                    0x02    /* 9 bit data */
+#define DBI_DATA_WIDTH_8BIT                    0x03    /* 8 bit data */
+#define DBI_DATA_WIDTH_OPT1                    0x04    /* option 1 */
+#define DBI_DATA_WIDTH_OPT2                    0x05    /* option 2 */
+
+#define HS_TX_TIMEOUT_REG              0xb010
+#define LP_RX_TIMEOUT_REG              0xb014
+#define TURN_AROUND_TIMEOUT_REG                0xb018
+#define DEVICE_RESET_REG               0xb01C
+#define DPI_RESOLUTION_REG             0xb020
+#define RES_V_POS                              0x10
+#define DBI_RESOLUTION_REG             0xb024 /* Reserved for MDFLD */
+#define HORIZ_SYNC_PAD_COUNT_REG       0xb028
+#define HORIZ_BACK_PORCH_COUNT_REG     0xb02C
+#define HORIZ_FRONT_PORCH_COUNT_REG    0xb030
+#define HORIZ_ACTIVE_AREA_COUNT_REG    0xb034
+#define VERT_SYNC_PAD_COUNT_REG                0xb038
+#define VERT_BACK_PORCH_COUNT_REG      0xb03c
+#define VERT_FRONT_PORCH_COUNT_REG     0xb040
+#define HIGH_LOW_SWITCH_COUNT_REG      0xb044
+#define DPI_CONTROL_REG                        0xb048
+#define DPI_SHUT_DOWN                          (1 << 0)
+#define DPI_TURN_ON                            (1 << 1)
+#define DPI_COLOR_MODE_ON                      (1 << 2)
+#define DPI_COLOR_MODE_OFF                     (1 << 3)
+#define DPI_BACK_LIGHT_ON                      (1 << 4)
+#define DPI_BACK_LIGHT_OFF                     (1 << 5)
+#define DPI_LP                                 (1 << 6)
+#define DPI_DATA_REG                   0xb04c
+#define DPI_BACK_LIGHT_ON_DATA                 0x07
+#define DPI_BACK_LIGHT_OFF_DATA                        0x17
+#define INIT_COUNT_REG                 0xb050
+#define MAX_RET_PAK_REG                        0xb054
+#define VIDEO_FMT_REG                  0xb058
+#define COMPLETE_LAST_PCKT                     (1 << 2)
+#define EOT_DISABLE_REG                        0xb05c
+#define ENABLE_CLOCK_STOPPING                  (1 << 1)
+#define LP_BYTECLK_REG                 0xb060
+#define LP_GEN_DATA_REG                        0xb064
+#define HS_GEN_DATA_REG                        0xb068
+#define LP_GEN_CTRL_REG                        0xb06C
+#define HS_GEN_CTRL_REG                        0xb070
+#define DCS_CHANNEL_NUMBER_POS         0x6
+#define MCS_COMMANDS_POS               0x8
+#define WORD_COUNTS_POS                        0x8
+#define MCS_PARAMETER_POS                      0x10
+#define GEN_FIFO_STAT_REG              0xb074
+#define HS_DATA_FIFO_FULL                      (1 << 0)
+#define HS_DATA_FIFO_HALF_EMPTY                        (1 << 1)
+#define HS_DATA_FIFO_EMPTY                     (1 << 2)
+#define LP_DATA_FIFO_FULL                      (1 << 8)
+#define LP_DATA_FIFO_HALF_EMPTY                        (1 << 9)
+#define LP_DATA_FIFO_EMPTY                     (1 << 10)
+#define HS_CTRL_FIFO_FULL                      (1 << 16)
+#define HS_CTRL_FIFO_HALF_EMPTY                        (1 << 17)
+#define HS_CTRL_FIFO_EMPTY                     (1 << 18)
+#define LP_CTRL_FIFO_FULL                      (1 << 24)
+#define LP_CTRL_FIFO_HALF_EMPTY                        (1 << 25)
+#define LP_CTRL_FIFO_EMPTY                     (1 << 26)
+#define DBI_FIFO_EMPTY                         (1 << 27)
+#define DPI_FIFO_EMPTY                         (1 << 28)
+#define HS_LS_DBI_ENABLE_REG           0xb078
+#define TXCLKESC_REG                   0xb07c
+#define DPHY_PARAM_REG                 0xb080
+#define DBI_BW_CTRL_REG                        0xb084
+#define CLK_LANE_SWT_REG               0xb088
+
+/*
+ * MIPI Adapter registers
+ */
+#define MIPI_CONTROL_REG               0xb104
+#define MIPI_2X_CLOCK_BITS                     ((1 << 0) | (1 << 1))
+#define MIPI_DATA_ADDRESS_REG          0xb108
+#define MIPI_DATA_LENGTH_REG           0xb10C
+#define MIPI_COMMAND_ADDRESS_REG       0xb110
+#define MIPI_COMMAND_LENGTH_REG                0xb114
+#define MIPI_READ_DATA_RETURN_REG0     0xb118
+#define MIPI_READ_DATA_RETURN_REG1     0xb11C
+#define MIPI_READ_DATA_RETURN_REG2     0xb120
+#define MIPI_READ_DATA_RETURN_REG3     0xb124
+#define MIPI_READ_DATA_RETURN_REG4     0xb128
+#define MIPI_READ_DATA_RETURN_REG5     0xb12C
+#define MIPI_READ_DATA_RETURN_REG6     0xb130
+#define MIPI_READ_DATA_RETURN_REG7     0xb134
+#define MIPI_READ_DATA_VALID_REG       0xb138
+
+/* DBI COMMANDS */
+#define soft_reset                     0x01
+/*
+ *     The display module performs a software reset.
+ *     Registers are written with their SW Reset default values.
+ */
+#define get_power_mode                 0x0a
+/*
+ *     The display module returns the current power mode
+ */
+#define get_address_mode               0x0b
+/*
+ *     The display module returns the current status.
+ */
+#define get_pixel_format               0x0c
+/*
+ *     This command gets the pixel format for the RGB image data
+ *     used by the interface.
+ */
+#define get_display_mode               0x0d
+/*
+ *     The display module returns the Display Image Mode status.
+ */
+#define get_signal_mode                        0x0e
+/*
+ *     The display module returns the Display Signal Mode.
+ */
+#define get_diagnostic_result          0x0f
+/*
+ *     The display module returns the self-diagnostic results following
+ *     a Sleep Out command.
+ */
+#define enter_sleep_mode               0x10
+/*
+ *     This command causes the display module to enter the Sleep mode.
+ *     In this mode, all unnecessary blocks inside the display module are
+ *     disabled except interface communication. This is the lowest power
+ *     mode the display module supports.
+ */
+#define exit_sleep_mode                        0x11
+/*
+ *     This command causes the display module to exit Sleep mode.
+ *     All blocks inside the display module are enabled.
+ */
+#define enter_partial_mode             0x12
+/*
+ *     This command causes the display module to enter the Partial Display
+ *     Mode. The Partial Display Mode window is described by the
+ *     set_partial_area command.
+ */
+#define enter_normal_mode              0x13
+/*
+ *     This command causes the display module to enter the Normal mode.
+ *     Normal Mode is defined as Partial Display mode and Scroll mode are off
+ */
+#define exit_invert_mode               0x20
+/*
+ *     This command causes the display module to stop inverting the image
+ *     data on the display device. The frame memory contents remain unchanged.
+ *     No status bits are changed.
+ */
+#define enter_invert_mode              0x21
+/*
+ *     This command causes the display module to invert the image data only on
+ *     the display device. The frame memory contents remain unchanged.
+ *     No status bits are changed.
+ */
+#define set_gamma_curve                        0x26
+/*
+ *     This command selects the desired gamma curve for the display device.
+ *     Four fixed gamma curves are defined in section DCS spec.
+ */
+#define set_display_off                        0x28
+/* ************************************************************************* *\
+This command causes the display module to stop displaying the image data
+on the display device. The frame memory contents remain unchanged.
+No status bits are changed.
+\* ************************************************************************* */
+#define set_display_on                 0x29
+/* ************************************************************************* *\
+This command causes the display module to start displaying the image data
+on the display device. The frame memory contents remain unchanged.
+No status bits are changed.
+\* ************************************************************************* */
+#define set_column_address             0x2a
+/*
+ *     This command defines the column extent of the frame memory accessed by
+ *     the hostprocessor with the read_memory_continue and
+ *     write_memory_continue commands.
+ *     No status bits are changed.
+ */
+#define set_page_addr                  0x2b
+/*
+ *     This command defines the page extent of the frame memory accessed by
+ *     the host processor with the write_memory_continue and
+ *     read_memory_continue command.
+ *     No status bits are changed.
+ */
+#define write_mem_start                        0x2c
+/*
+ *     This command transfers image data from the host processor to the
+ *     display modules frame memory starting at the pixel location specified
+ *     by preceding set_column_address and set_page_address commands.
+ */
+#define set_partial_area               0x30
+/*
+ *     This command defines the Partial Display mode s display area.
+ *     There are two parameters associated with this command, the first
+ *     defines the Start Row (SR) and the second the End Row (ER). SR and ER
+ *     refer to the Frame Memory Line Pointer.
+ */
+#define set_scroll_area                        0x33
+/*
+ *     This command defines the display modules Vertical Scrolling Area.
+ */
+#define set_tear_off                   0x34
+/*
+ *     This command turns off the display modules Tearing Effect output
+ *     signal on the TE signal line.
+ */
+#define set_tear_on                    0x35
+/*
+ *     This command turns on the display modules Tearing Effect output signal
+ *     on the TE signal line.
+ */
+#define set_address_mode               0x36
+/*
+ *     This command sets the data order for transfers from the host processor
+ *     to display modules frame memory,bits B[7:5] and B3, and from the
+ *     display modules frame memory to the display device, bits B[2:0] and B4.
+ */
+#define set_scroll_start               0x37
+/*
+ *     This command sets the start of the vertical scrolling area in the frame
+ *     memory. The vertical scrolling area is fully defined when this command
+ *     is used with the set_scroll_area command The set_scroll_start command
+ *     has one parameter, the Vertical Scroll Pointer. The VSP defines the
+ *     line in the frame memory that is written to the display device as the
+ *     first line of the vertical scroll area.
+ */
+#define exit_idle_mode                 0x38
+/*
+ *     This command causes the display module to exit Idle mode.
+ */
+#define enter_idle_mode                        0x39
+/*
+ *     This command causes the display module to enter Idle Mode.
+ *     In Idle Mode, color expression is reduced. Colors are shown on the
+ *     display device using the MSB of each of the R, G and B color
+ *     components in the frame memory
+ */
+#define set_pixel_format               0x3a
+/*
+ *     This command sets the pixel format for the RGB image data used by the
+ *     interface.
+ *     Bits D[6:4]  DPI Pixel Format Definition
+ *     Bits D[2:0]  DBI Pixel Format Definition
+ *     Bits D7 and D3 are not used.
+ */
+#define DCS_PIXEL_FORMAT_3bpp          0x1
+#define DCS_PIXEL_FORMAT_8bpp          0x2
+#define DCS_PIXEL_FORMAT_12bpp         0x3
+#define DCS_PIXEL_FORMAT_16bpp         0x5
+#define DCS_PIXEL_FORMAT_18bpp         0x6
+#define DCS_PIXEL_FORMAT_24bpp         0x7
+
+#define write_mem_cont                 0x3c
+
+/*
+ *     This command transfers image data from the host processor to the
+ *     display module's frame memory continuing from the pixel location
+ *     following the previous write_memory_continue or write_memory_start
+ *     command.
+ */
+#define set_tear_scanline              0x44
+/*
+ *     This command turns on the display modules Tearing Effect output signal
+ *     on the TE signal line when the display module reaches line N.
+ */
+#define get_scanline                   0x45
+/*
+ *     The display module returns the current scanline, N, used to update the
+ *      display device. The total number of scanlines on a display device is
+ *     defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
+ *     the first line of V Sync and is denoted as Line 0.
+ *     When in Sleep Mode, the value returned by get_scanline is undefined.
+ */
+
+/* MCS or Generic COMMANDS */
+/* MCS/generic data type */
+#define GEN_SHORT_WRITE_0      0x03  /* generic short write, no parameters */
+#define GEN_SHORT_WRITE_1      0x13  /* generic short write, 1 parameters */
+#define GEN_SHORT_WRITE_2      0x23  /* generic short write, 2 parameters */
+#define GEN_READ_0             0x04  /* generic read, no parameters */
+#define GEN_READ_1             0x14  /* generic read, 1 parameters */
+#define GEN_READ_2             0x24  /* generic read, 2 parameters */
+#define GEN_LONG_WRITE         0x29  /* generic long write */
+#define MCS_SHORT_WRITE_0      0x05  /* MCS short write, no parameters */
+#define MCS_SHORT_WRITE_1      0x15  /* MCS short write, 1 parameters */
+#define MCS_READ               0x06  /* MCS read, no parameters */
+#define MCS_LONG_WRITE         0x39  /* MCS long write */
+/* MCS/generic commands */
+/* TPO MCS */
+#define write_display_profile          0x50
+#define write_display_brightness       0x51
+#define write_ctrl_display             0x53
+#define write_ctrl_cabc                        0x55
+  #define UI_IMAGE             0x01
+  #define STILL_IMAGE          0x02
+  #define MOVING_IMAGE         0x03
+#define write_hysteresis               0x57
+#define write_gamma_setting            0x58
+#define write_cabc_min_bright          0x5e
+#define write_kbbc_profile             0x60
+/* TMD MCS */
+#define tmd_write_display_brightness 0x8c
+
+/*
+ *     This command is used to control ambient light, panel backlight
+ *     brightness and gamma settings.
+ */
+#define BRIGHT_CNTL_BLOCK_ON   (1 << 5)
+#define AMBIENT_LIGHT_SENSE_ON (1 << 4)
+#define DISPLAY_DIMMING_ON     (1 << 3)
+#define BACKLIGHT_ON           (1 << 2)
+#define DISPLAY_BRIGHTNESS_AUTO        (1 << 1)
+#define GAMMA_AUTO             (1 << 0)
+
+/* DCS Interface Pixel Formats */
+#define DCS_PIXEL_FORMAT_3BPP  0x1
+#define DCS_PIXEL_FORMAT_8BPP  0x2
+#define DCS_PIXEL_FORMAT_12BPP 0x3
+#define DCS_PIXEL_FORMAT_16BPP 0x5
+#define DCS_PIXEL_FORMAT_18BPP 0x6
+#define DCS_PIXEL_FORMAT_24BPP 0x7
+/* ONE PARAMETER READ DATA */
+#define addr_mode_data         0xfc
+#define diag_res_data          0x00
+#define disp_mode_data         0x23
+#define pxl_fmt_data           0x77
+#define pwr_mode_data          0x74
+#define sig_mode_data          0x00
+/* TWO PARAMETERS READ DATA */
+#define scanline_data1         0xff
+#define scanline_data2         0xff
+#define NON_BURST_MODE_SYNC_PULSE      0x01    /* Non Burst Mode
+                                                * with Sync Pulse
+                                                */
+#define NON_BURST_MODE_SYNC_EVENTS     0x02    /* Non Burst Mode
+                                                * with Sync events
+                                                */
+#define BURST_MODE                     0x03    /* Burst Mode */
+#define DBI_COMMAND_BUFFER_SIZE                0x240   /* 0x32 */    /* 0x120 */
+                                               /* Allocate at least
+                                                * 0x100 Byte with 32
+                                                * byte alignment
+                                                */
+#define DBI_DATA_BUFFER_SIZE           0x120   /* Allocate at least
+                                                * 0x100 Byte with 32
+                                                * byte alignment
+                                                */
+#define DBI_CB_TIME_OUT                        0xFFFF
+
+#define GEN_FB_TIME_OUT                        2000
+
+#define SKU_83                         0x01
+#define SKU_100                                0x02
+#define SKU_100L                       0x04
+#define SKU_BYPASS                     0x08
+
+/* Some handy macros for playing with bitfields. */
+#define PSB_MASK(high, low) (((1<<((high)-(low)+1))-1)<<(low))
+#define SET_FIELD(value, field) (((value) << field ## _SHIFT) & field ## _MASK)
+#define GET_FIELD(word, field) (((word)  & field ## _MASK) >> field ## _SHIFT)
+
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+
+/* PCI config space */
+
+#define SB_PCKT         0x02100 /* cedarview */
+# define SB_OPCODE_MASK                         PSB_MASK(31, 16)
+# define SB_OPCODE_SHIFT                        16
+# define SB_OPCODE_READ                         0
+# define SB_OPCODE_WRITE                        1
+# define SB_DEST_MASK                           PSB_MASK(15, 8)
+# define SB_DEST_SHIFT                          8
+# define SB_DEST_DPLL                           0x88
+# define SB_BYTE_ENABLE_MASK                    PSB_MASK(7, 4)
+# define SB_BYTE_ENABLE_SHIFT                   4
+# define SB_BUSY                                (1 << 0)
+
+
+/* 32-bit value read/written from the DPIO reg. */
+#define SB_DATA                0x02104 /* cedarview */
+/* 32-bit address of the DPIO reg to be read/written. */
+#define SB_ADDR                0x02108 /* cedarview */
+#define DPIO_CFG       0x02110 /* cedarview */
+# define DPIO_MODE_SELECT_1                    (1 << 3)
+# define DPIO_MODE_SELECT_0                    (1 << 2)
+# define DPIO_SFR_BYPASS                       (1 << 1)
+/* reset is active low */
+# define DPIO_CMN_RESET_N                      (1 << 0)
+
+/* Cedarview sideband registers */
+#define _SB_M_A                        0x8008
+#define _SB_M_B                        0x8028
+#define SB_M(pipe) _PIPE(pipe, _SB_M_A, _SB_M_B)
+# define SB_M_DIVIDER_MASK                     (0xFF << 24)
+# define SB_M_DIVIDER_SHIFT                    24
+
+#define _SB_N_VCO_A            0x8014
+#define _SB_N_VCO_B            0x8034
+#define SB_N_VCO(pipe) _PIPE(pipe, _SB_N_VCO_A, _SB_N_VCO_B)
+#define SB_N_VCO_SEL_MASK                      PSB_MASK(31, 30)
+#define SB_N_VCO_SEL_SHIFT                     30
+#define SB_N_DIVIDER_MASK                      PSB_MASK(29, 26)
+#define SB_N_DIVIDER_SHIFT                     26
+#define SB_N_CB_TUNE_MASK                      PSB_MASK(25, 24)
+#define SB_N_CB_TUNE_SHIFT                     24
+
+#define _SB_REF_A              0x8018
+#define _SB_REF_B              0x8038
+#define SB_REF_SFR(pipe)       _PIPE(pipe, _SB_REF_A, _SB_REF_B)
+
+#define _SB_P_A                        0x801c
+#define _SB_P_B                        0x803c
+#define SB_P(pipe) _PIPE(pipe, _SB_P_A, _SB_P_B)
+#define SB_P2_DIVIDER_MASK                     PSB_MASK(31, 30)
+#define SB_P2_DIVIDER_SHIFT                    30
+#define SB_P2_10                               0 /* HDMI, DP, DAC */
+#define SB_P2_5                                1 /* DAC */
+#define SB_P2_14                               2 /* LVDS single */
+#define SB_P2_7                                3 /* LVDS double */
+#define SB_P1_DIVIDER_MASK                     PSB_MASK(15, 12)
+#define SB_P1_DIVIDER_SHIFT                    12
+
+#define PSB_LANE0              0x120
+#define PSB_LANE1              0x220
+#define PSB_LANE2              0x2320
+#define PSB_LANE3              0x2420
+
+#define LANE_PLL_MASK          (0x7 << 20)
+#define LANE_PLL_ENABLE                (0x3 << 20)
+
+
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
new file mode 100644 (file)
index 0000000..4882b29
--- /dev/null
@@ -0,0 +1,2617 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright Â© 2006-2007 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "psb_intel_drv.h"
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_intel_sdvo_regs.h"
+#include "psb_intel_reg.h"
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK  (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK   (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+                         SDVO_TV_MASK)
+
+#define IS_TV(c)       (c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c)     (c->output_flag & SDVO_TMDS_MASK)
+#define IS_LVDS(c)     (c->output_flag & SDVO_LVDS_MASK)
+#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+
+
+static const char *tv_format_names[] = {
+       "NTSC_M"   , "NTSC_J"  , "NTSC_443",
+       "PAL_B"    , "PAL_D"   , "PAL_G"   ,
+       "PAL_H"    , "PAL_I"   , "PAL_M"   ,
+       "PAL_N"    , "PAL_NC"  , "PAL_60"  ,
+       "SECAM_B"  , "SECAM_D" , "SECAM_G" ,
+       "SECAM_K"  , "SECAM_K1", "SECAM_L" ,
+       "SECAM_60"
+};
+
+#define TV_FORMAT_NUM  (sizeof(tv_format_names) / sizeof(*tv_format_names))
+
+struct psb_intel_sdvo {
+       struct psb_intel_encoder base;
+
+       struct i2c_adapter *i2c;
+       u8 slave_addr;
+
+       struct i2c_adapter ddc;
+
+       /* Register for the SDVO device: SDVOB or SDVOC */
+       int sdvo_reg;
+
+       /* Active outputs controlled by this SDVO output */
+       uint16_t controlled_output;
+
+       /*
+        * Capabilities of the SDVO device returned by
+        * i830_sdvo_get_capabilities()
+        */
+       struct psb_intel_sdvo_caps caps;
+
+       /* Pixel clock limitations reported by the SDVO device, in kHz */
+       int pixel_clock_min, pixel_clock_max;
+
+       /*
+       * For multiple function SDVO device,
+       * this is for current attached outputs.
+       */
+       uint16_t attached_output;
+
+       /**
+        * This is used to select the color range of RBG outputs in HDMI mode.
+        * It is only valid when using TMDS encoding and 8 bit per color mode.
+        */
+       uint32_t color_range;
+
+       /**
+        * This is set if we're going to treat the device as TV-out.
+        *
+        * While we have these nice friendly flags for output types that ought
+        * to decide this for us, the S-Video output on our HDMI+S-Video card
+        * shows up as RGB1 (VGA).
+        */
+       bool is_tv;
+
+       /* This is for current tv format name */
+       int tv_format_index;
+
+       /**
+        * This is set if we treat the device as HDMI, instead of DVI.
+        */
+       bool is_hdmi;
+       bool has_hdmi_monitor;
+       bool has_hdmi_audio;
+
+       /**
+        * This is set if we detect output of sdvo device as LVDS and
+        * have a valid fixed mode to use with the panel.
+        */
+       bool is_lvds;
+
+       /**
+        * This is sdvo fixed pannel mode pointer
+        */
+       struct drm_display_mode *sdvo_lvds_fixed_mode;
+
+       /* DDC bus used by this SDVO encoder */
+       uint8_t ddc_bus;
+
+       /* Input timings for adjusted_mode */
+       struct psb_intel_sdvo_dtd input_dtd;
+};
+
+struct psb_intel_sdvo_connector {
+       struct psb_intel_connector base;
+
+       /* Mark the type of connector */
+       uint16_t output_flag;
+
+       int force_audio;
+
+       /* This contains all current supported TV format */
+       u8 tv_format_supported[TV_FORMAT_NUM];
+       int   format_supported_num;
+       struct drm_property *tv_format;
+
+       /* add the property for the SDVO-TV */
+       struct drm_property *left;
+       struct drm_property *right;
+       struct drm_property *top;
+       struct drm_property *bottom;
+       struct drm_property *hpos;
+       struct drm_property *vpos;
+       struct drm_property *contrast;
+       struct drm_property *saturation;
+       struct drm_property *hue;
+       struct drm_property *sharpness;
+       struct drm_property *flicker_filter;
+       struct drm_property *flicker_filter_adaptive;
+       struct drm_property *flicker_filter_2d;
+       struct drm_property *tv_chroma_filter;
+       struct drm_property *tv_luma_filter;
+       struct drm_property *dot_crawl;
+
+       /* add the property for the SDVO-TV/LVDS */
+       struct drm_property *brightness;
+
+       /* Add variable to record current setting for the above property */
+       u32     left_margin, right_margin, top_margin, bottom_margin;
+
+       /* this is to get the range of margin.*/
+       u32     max_hscan,  max_vscan;
+       u32     max_hpos, cur_hpos;
+       u32     max_vpos, cur_vpos;
+       u32     cur_brightness, max_brightness;
+       u32     cur_contrast,   max_contrast;
+       u32     cur_saturation, max_saturation;
+       u32     cur_hue,        max_hue;
+       u32     cur_sharpness,  max_sharpness;
+       u32     cur_flicker_filter,             max_flicker_filter;
+       u32     cur_flicker_filter_adaptive,    max_flicker_filter_adaptive;
+       u32     cur_flicker_filter_2d,          max_flicker_filter_2d;
+       u32     cur_tv_chroma_filter,   max_tv_chroma_filter;
+       u32     cur_tv_luma_filter,     max_tv_luma_filter;
+       u32     cur_dot_crawl,  max_dot_crawl;
+};
+
+static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder)
+{
+       return container_of(encoder, struct psb_intel_sdvo, base.base);
+}
+
+static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+{
+       return container_of(psb_intel_attached_encoder(connector),
+                           struct psb_intel_sdvo, base);
+}
+
+static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector)
+{
+       return container_of(to_psb_intel_connector(connector), struct psb_intel_sdvo_connector, base);
+}
+
+static bool
+psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags);
+static bool
+psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
+                             struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+                             int type);
+static bool
+psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
+                                  struct psb_intel_sdvo_connector *psb_intel_sdvo_connector);
+
+/**
+ * Writes the SDVOB or SDVOC with the given value, but always writes both
+ * SDVOB and SDVOC to work around apparent hardware issues (according to
+ * comments in the BIOS).
+ */
+static void psb_intel_sdvo_write_sdvox(struct psb_intel_sdvo *psb_intel_sdvo, u32 val)
+{
+       struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+       u32 bval = val, cval = val;
+       int i;
+
+       if (psb_intel_sdvo->sdvo_reg == SDVOB) {
+               cval = REG_READ(SDVOC);
+       } else {
+               bval = REG_READ(SDVOB);
+       }
+       /*
+        * Write the registers twice for luck. Sometimes,
+        * writing them only once doesn't appear to 'stick'.
+        * The BIOS does this too. Yay, magic
+        */
+       for (i = 0; i < 2; i++)
+       {
+               REG_WRITE(SDVOB, bval);
+               REG_READ(SDVOB);
+               REG_WRITE(SDVOC, cval);
+               REG_READ(SDVOC);
+       }
+}
+
+static bool psb_intel_sdvo_read_byte(struct psb_intel_sdvo *psb_intel_sdvo, u8 addr, u8 *ch)
+{
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = psb_intel_sdvo->slave_addr,
+                       .flags = 0,
+                       .len = 1,
+                       .buf = &addr,
+               },
+               {
+                       .addr = psb_intel_sdvo->slave_addr,
+                       .flags = I2C_M_RD,
+                       .len = 1,
+                       .buf = ch,
+               }
+       };
+       int ret;
+
+       if ((ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, 2)) == 2)
+               return true;
+
+       DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
+       return false;
+}
+
+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+/** Mapping of command numbers to names, for debug output */
+static const struct _sdvo_cmd_name {
+       u8 cmd;
+       const char *name;
+} sdvo_cmd_names[] = {
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
+    /* Add the op code for SDVO enhancements */
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
+    /* HDMI op code */
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+};
+
+#define IS_SDVOB(reg)  (reg == SDVOB)
+#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
+
+static void psb_intel_sdvo_debug_write(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+                                  const void *args, int args_len)
+{
+       int i;
+
+       DRM_DEBUG_KMS("%s: W: %02X ",
+                               SDVO_NAME(psb_intel_sdvo), cmd);
+       for (i = 0; i < args_len; i++)
+               DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
+       for (; i < 8; i++)
+               DRM_LOG_KMS("   ");
+       for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
+               if (cmd == sdvo_cmd_names[i].cmd) {
+                       DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
+                       break;
+               }
+       }
+       if (i == ARRAY_SIZE(sdvo_cmd_names))
+               DRM_LOG_KMS("(%02X)", cmd);
+       DRM_LOG_KMS("\n");
+}
+
+static const char *cmd_status_names[] = {
+       "Power on",
+       "Success",
+       "Not supported",
+       "Invalid arg",
+       "Pending",
+       "Target not specified",
+       "Scaling not supported"
+};
+
+static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+                                const void *args, int args_len)
+{
+       u8 buf[args_len*2 + 2], status;
+       struct i2c_msg msgs[args_len + 3];
+       int i, ret;
+
+       psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
+
+       for (i = 0; i < args_len; i++) {
+               msgs[i].addr = psb_intel_sdvo->slave_addr;
+               msgs[i].flags = 0;
+               msgs[i].len = 2;
+               msgs[i].buf = buf + 2 *i;
+               buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+               buf[2*i + 1] = ((u8*)args)[i];
+       }
+       msgs[i].addr = psb_intel_sdvo->slave_addr;
+       msgs[i].flags = 0;
+       msgs[i].len = 2;
+       msgs[i].buf = buf + 2*i;
+       buf[2*i + 0] = SDVO_I2C_OPCODE;
+       buf[2*i + 1] = cmd;
+
+       /* the following two are to read the response */
+       status = SDVO_I2C_CMD_STATUS;
+       msgs[i+1].addr = psb_intel_sdvo->slave_addr;
+       msgs[i+1].flags = 0;
+       msgs[i+1].len = 1;
+       msgs[i+1].buf = &status;
+
+       msgs[i+2].addr = psb_intel_sdvo->slave_addr;
+       msgs[i+2].flags = I2C_M_RD;
+       msgs[i+2].len = 1;
+       msgs[i+2].buf = &status;
+
+       ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, i+3);
+       if (ret < 0) {
+               DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+               return false;
+       }
+       if (ret != i+3) {
+               /* failure in I2C transfer */
+               DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+               return false;
+       }
+
+       return true;
+}
+
+static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
+                                    void *response, int response_len)
+{
+       u8 retry = 5;
+       u8 status;
+       int i;
+
+       DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(psb_intel_sdvo));
+
+       /*
+        * The documentation states that all commands will be
+        * processed within 15µs, and that we need only poll
+        * the status byte a maximum of 3 times in order for the
+        * command to be complete.
+        *
+        * Check 5 times in case the hardware failed to read the docs.
+        */
+       if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+                                 SDVO_I2C_CMD_STATUS,
+                                 &status))
+               goto log_fail;
+
+       while (status == SDVO_CMD_STATUS_PENDING && retry--) {
+               udelay(15);
+               if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+                                         SDVO_I2C_CMD_STATUS,
+                                         &status))
+                       goto log_fail;
+       }
+
+       if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+               DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+       else
+               DRM_LOG_KMS("(??? %d)", status);
+
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               goto log_fail;
+
+       /* Read the command response */
+       for (i = 0; i < response_len; i++) {
+               if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+                                         SDVO_I2C_RETURN_0 + i,
+                                         &((u8 *)response)[i]))
+                       goto log_fail;
+               DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
+       }
+       DRM_LOG_KMS("\n");
+       return true;
+
+log_fail:
+       DRM_LOG_KMS("... failed\n");
+       return false;
+}
+
+static int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+{
+       if (mode->clock >= 100000)
+               return 1;
+       else if (mode->clock >= 50000)
+               return 2;
+       else
+               return 4;
+}
+
+static bool psb_intel_sdvo_set_control_bus_switch(struct psb_intel_sdvo *psb_intel_sdvo,
+                                             u8 ddc_bus)
+{
+       /* This must be the immediately preceding write before the i2c xfer */
+       return psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+                                   SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+                                   &ddc_bus, 1);
+}
+
+static bool psb_intel_sdvo_set_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, const void *data, int len)
+{
+       if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, data, len))
+               return false;
+
+       return psb_intel_sdvo_read_response(psb_intel_sdvo, NULL, 0);
+}
+
+static bool
+psb_intel_sdvo_get_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, void *value, int len)
+{
+       if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, NULL, 0))
+               return false;
+
+       return psb_intel_sdvo_read_response(psb_intel_sdvo, value, len);
+}
+
+static bool psb_intel_sdvo_set_target_input(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+       struct psb_intel_sdvo_set_target_input_args targets = {0};
+       return psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                   SDVO_CMD_SET_TARGET_INPUT,
+                                   &targets, sizeof(targets));
+}
+
+/**
+ * Return whether each input is trained.
+ *
+ * This function is making an assumption about the layout of the response,
+ * which should be checked against the docs.
+ */
+static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_sdvo *psb_intel_sdvo, bool *input_1, bool *input_2)
+{
+       struct psb_intel_sdvo_get_trained_inputs_response response;
+
+       BUILD_BUG_ON(sizeof(response) != 1);
+       if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
+                                 &response, sizeof(response)))
+               return false;
+
+       *input_1 = response.input0_trained;
+       *input_2 = response.input1_trained;
+       return true;
+}
+
+static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_sdvo *psb_intel_sdvo,
+                                         u16 outputs)
+{
+       return psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                   SDVO_CMD_SET_ACTIVE_OUTPUTS,
+                                   &outputs, sizeof(outputs));
+}
+
+static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_sdvo *psb_intel_sdvo,
+                                              int mode)
+{
+       u8 state = SDVO_ENCODER_STATE_ON;
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               state = SDVO_ENCODER_STATE_ON;
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+               state = SDVO_ENCODER_STATE_STANDBY;
+               break;
+       case DRM_MODE_DPMS_SUSPEND:
+               state = SDVO_ENCODER_STATE_SUSPEND;
+               break;
+       case DRM_MODE_DPMS_OFF:
+               state = SDVO_ENCODER_STATE_OFF;
+               break;
+       }
+
+       return psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                   SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
+}
+
+static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_sdvo *psb_intel_sdvo,
+                                                  int *clock_min,
+                                                  int *clock_max)
+{
+       struct psb_intel_sdvo_pixel_clock_range clocks;
+
+       BUILD_BUG_ON(sizeof(clocks) != 4);
+       if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+                                 SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+                                 &clocks, sizeof(clocks)))
+               return false;
+
+       /* Convert the values from units of 10 kHz to kHz. */
+       *clock_min = clocks.min * 10;
+       *clock_max = clocks.max * 10;
+       return true;
+}
+
+static bool psb_intel_sdvo_set_target_output(struct psb_intel_sdvo *psb_intel_sdvo,
+                                        u16 outputs)
+{
+       return psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                   SDVO_CMD_SET_TARGET_OUTPUT,
+                                   &outputs, sizeof(outputs));
+}
+
+static bool psb_intel_sdvo_set_timing(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+                                 struct psb_intel_sdvo_dtd *dtd)
+{
+       return psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+               psb_intel_sdvo_set_value(psb_intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool psb_intel_sdvo_set_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+                                        struct psb_intel_sdvo_dtd *dtd)
+{
+       return psb_intel_sdvo_set_timing(psb_intel_sdvo,
+                                    SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool psb_intel_sdvo_set_output_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+                                        struct psb_intel_sdvo_dtd *dtd)
+{
+       return psb_intel_sdvo_set_timing(psb_intel_sdvo,
+                                    SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+static bool
+psb_intel_sdvo_create_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+                                        uint16_t clock,
+                                        uint16_t width,
+                                        uint16_t height)
+{
+       struct psb_intel_sdvo_preferred_input_timing_args args;
+
+       memset(&args, 0, sizeof(args));
+       args.clock = clock;
+       args.width = width;
+       args.height = height;
+       args.interlace = 0;
+
+       if (psb_intel_sdvo->is_lvds &&
+          (psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
+           psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
+               args.scaled = 1;
+
+       return psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                   SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+                                   &args, sizeof(args));
+}
+
+static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+                                                 struct psb_intel_sdvo_dtd *dtd)
+{
+       BUILD_BUG_ON(sizeof(dtd->part1) != 8);
+       BUILD_BUG_ON(sizeof(dtd->part2) != 8);
+       return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+                                   &dtd->part1, sizeof(dtd->part1)) &&
+               psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+                                    &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_sdvo *psb_intel_sdvo, u8 val)
+{
+       return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+}
+
+static void psb_intel_sdvo_get_dtd_from_mode(struct psb_intel_sdvo_dtd *dtd,
+                                        const struct drm_display_mode *mode)
+{
+       uint16_t width, height;
+       uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+       uint16_t h_sync_offset, v_sync_offset;
+
+       width = mode->crtc_hdisplay;
+       height = mode->crtc_vdisplay;
+
+       /* do some mode translations */
+       h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
+       h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+
+       v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
+       v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+
+       h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
+       v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+
+       dtd->part1.clock = mode->clock / 10;
+       dtd->part1.h_active = width & 0xff;
+       dtd->part1.h_blank = h_blank_len & 0xff;
+       dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
+               ((h_blank_len >> 8) & 0xf);
+       dtd->part1.v_active = height & 0xff;
+       dtd->part1.v_blank = v_blank_len & 0xff;
+       dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
+               ((v_blank_len >> 8) & 0xf);
+
+       dtd->part2.h_sync_off = h_sync_offset & 0xff;
+       dtd->part2.h_sync_width = h_sync_len & 0xff;
+       dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+               (v_sync_len & 0xf);
+       dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+               ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
+               ((v_sync_len & 0x30) >> 4);
+
+       dtd->part2.dtd_flags = 0x18;
+       if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+               dtd->part2.dtd_flags |= 0x2;
+       if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+               dtd->part2.dtd_flags |= 0x4;
+
+       dtd->part2.sdvo_flags = 0;
+       dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
+       dtd->part2.reserved = 0;
+}
+
+static void psb_intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+                                        const struct psb_intel_sdvo_dtd *dtd)
+{
+       mode->hdisplay = dtd->part1.h_active;
+       mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+       mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
+       mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+       mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
+       mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+       mode->htotal = mode->hdisplay + dtd->part1.h_blank;
+       mode->htotal += (dtd->part1.h_high & 0xf) << 8;
+
+       mode->vdisplay = dtd->part1.v_active;
+       mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+       mode->vsync_start = mode->vdisplay;
+       mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+       mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+       mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+       mode->vsync_end = mode->vsync_start +
+               (dtd->part2.v_sync_off_width & 0xf);
+       mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+       mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
+       mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
+
+       mode->clock = dtd->part1.clock * 10;
+
+       mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+       if (dtd->part2.dtd_flags & 0x2)
+               mode->flags |= DRM_MODE_FLAG_PHSYNC;
+       if (dtd->part2.dtd_flags & 0x4)
+               mode->flags |= DRM_MODE_FLAG_PVSYNC;
+}
+
+static bool psb_intel_sdvo_check_supp_encode(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+       struct psb_intel_sdvo_encode encode;
+
+       BUILD_BUG_ON(sizeof(encode) != 2);
+       return psb_intel_sdvo_get_value(psb_intel_sdvo,
+                                 SDVO_CMD_GET_SUPP_ENCODE,
+                                 &encode, sizeof(encode));
+}
+
+static bool psb_intel_sdvo_set_encode(struct psb_intel_sdvo *psb_intel_sdvo,
+                                 uint8_t mode)
+{
+       return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
+}
+
+static bool psb_intel_sdvo_set_colorimetry(struct psb_intel_sdvo *psb_intel_sdvo,
+                                      uint8_t mode)
+{
+       return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+}
+
+#if 0
+static void psb_intel_sdvo_dump_hdmi_buf(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+       int i, j;
+       uint8_t set_buf_index[2];
+       uint8_t av_split;
+       uint8_t buf_size;
+       uint8_t buf[48];
+       uint8_t *pos;
+
+       psb_intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
+
+       for (i = 0; i <= av_split; i++) {
+               set_buf_index[0] = i; set_buf_index[1] = 0;
+               psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
+                                    set_buf_index, 2);
+               psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
+               psb_intel_sdvo_read_response(encoder, &buf_size, 1);
+
+               pos = buf;
+               for (j = 0; j <= buf_size; j += 8) {
+                       psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
+                                            NULL, 0);
+                       psb_intel_sdvo_read_response(encoder, pos, 8);
+                       pos += 8;
+               }
+       }
+}
+#endif
+
+static bool psb_intel_sdvo_set_avi_infoframe(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+       DRM_INFO("HDMI is not supported yet");
+
+       return false;
+#if 0
+       struct dip_infoframe avi_if = {
+               .type = DIP_TYPE_AVI,
+               .ver = DIP_VERSION_AVI,
+               .len = DIP_LEN_AVI,
+       };
+       uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
+       uint8_t set_buf_index[2] = { 1, 0 };
+       uint64_t *data = (uint64_t *)&avi_if;
+       unsigned i;
+
+       intel_dip_infoframe_csum(&avi_if);
+
+       if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                 SDVO_CMD_SET_HBUF_INDEX,
+                                 set_buf_index, 2))
+               return false;
+
+       for (i = 0; i < sizeof(avi_if); i += 8) {
+               if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                         SDVO_CMD_SET_HBUF_DATA,
+                                         data, 8))
+                       return false;
+               data++;
+       }
+
+       return psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                   SDVO_CMD_SET_HBUF_TXRATE,
+                                   &tx_rate, 1);
+#endif
+}
+
+static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+       struct psb_intel_sdvo_tv_format format;
+       uint32_t format_map;
+
+       format_map = 1 << psb_intel_sdvo->tv_format_index;
+       memset(&format, 0, sizeof(format));
+       memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
+
+       BUILD_BUG_ON(sizeof(format) != 6);
+       return psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                   SDVO_CMD_SET_TV_FORMAT,
+                                   &format, sizeof(format));
+}
+
+static bool
+psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdvo,
+                                       struct drm_display_mode *mode)
+{
+       struct psb_intel_sdvo_dtd output_dtd;
+
+       if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+                                         psb_intel_sdvo->attached_output))
+               return false;
+
+       psb_intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+       if (!psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &output_dtd))
+               return false;
+
+       return true;
+}
+
+static bool
+psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo,
+                                       struct drm_display_mode *mode,
+                                       struct drm_display_mode *adjusted_mode)
+{
+       /* Reset the input timing to the screen. Assume always input 0. */
+       if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+               return false;
+
+       if (!psb_intel_sdvo_create_preferred_input_timing(psb_intel_sdvo,
+                                                     mode->clock / 10,
+                                                     mode->hdisplay,
+                                                     mode->vdisplay))
+               return false;
+
+       if (!psb_intel_sdvo_get_preferred_input_timing(psb_intel_sdvo,
+                                                  &psb_intel_sdvo->input_dtd))
+               return false;
+
+       psb_intel_sdvo_get_mode_from_dtd(adjusted_mode, &psb_intel_sdvo->input_dtd);
+
+       drm_mode_set_crtcinfo(adjusted_mode, 0);
+       return true;
+}
+
+static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+       int multiplier;
+
+       /* We need to construct preferred input timings based on our
+        * output timings.  To do that, we have to set the output
+        * timings, even though this isn't really the right place in
+        * the sequence to do it. Oh well.
+        */
+       if (psb_intel_sdvo->is_tv) {
+               if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo, mode))
+                       return false;
+
+               (void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
+                                                            mode,
+                                                            adjusted_mode);
+       } else if (psb_intel_sdvo->is_lvds) {
+               if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo,
+                                                            psb_intel_sdvo->sdvo_lvds_fixed_mode))
+                       return false;
+
+               (void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
+                                                            mode,
+                                                            adjusted_mode);
+       }
+
+       /* Make the CRTC code factor in the SDVO pixel multiplier.  The
+        * SDVO device will factor out the multiplier during mode_set.
+        */
+       multiplier = psb_intel_sdvo_get_pixel_multiplier(adjusted_mode);
+       psb_intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
+
+       return true;
+}
+
+static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_crtc *crtc = encoder->crtc;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+       u32 sdvox;
+       struct psb_intel_sdvo_in_out_map in_out;
+       struct psb_intel_sdvo_dtd input_dtd;
+       int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
+       int rate;
+
+       if (!mode)
+               return;
+
+       /* First, set the input mapping for the first input to our controlled
+        * output. This is only correct if we're a single-input device, in
+        * which case the first input is the output from the appropriate SDVO
+        * channel on the motherboard.  In a two-input device, the first input
+        * will be SDVOB and the second SDVOC.
+        */
+       in_out.in0 = psb_intel_sdvo->attached_output;
+       in_out.in1 = 0;
+
+       psb_intel_sdvo_set_value(psb_intel_sdvo,
+                            SDVO_CMD_SET_IN_OUT_MAP,
+                            &in_out, sizeof(in_out));
+
+       /* Set the output timings to the screen */
+       if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+                                         psb_intel_sdvo->attached_output))
+               return;
+
+       /* We have tried to get input timing in mode_fixup, and filled into
+        * adjusted_mode.
+        */
+       if (psb_intel_sdvo->is_tv || psb_intel_sdvo->is_lvds) {
+               input_dtd = psb_intel_sdvo->input_dtd;
+       } else {
+               /* Set the output timing to the screen */
+               if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+                                                 psb_intel_sdvo->attached_output))
+                       return;
+
+               psb_intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+               (void) psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &input_dtd);
+       }
+
+       /* Set the input timing to the screen. Assume always input 0. */
+       if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+               return;
+
+       if (psb_intel_sdvo->has_hdmi_monitor) {
+               psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_HDMI);
+               psb_intel_sdvo_set_colorimetry(psb_intel_sdvo,
+                                          SDVO_COLORIMETRY_RGB256);
+               psb_intel_sdvo_set_avi_infoframe(psb_intel_sdvo);
+       } else
+               psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_DVI);
+
+       if (psb_intel_sdvo->is_tv &&
+           !psb_intel_sdvo_set_tv_format(psb_intel_sdvo))
+               return;
+
+       (void) psb_intel_sdvo_set_input_timing(psb_intel_sdvo, &input_dtd);
+
+       switch (pixel_multiplier) {
+       default:
+       case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+       case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+       case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
+       }
+       if (!psb_intel_sdvo_set_clock_rate_mult(psb_intel_sdvo, rate))
+               return;
+
+       /* Set the SDVO control regs. */
+       sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
+       switch (psb_intel_sdvo->sdvo_reg) {
+       case SDVOB:
+               sdvox &= SDVOB_PRESERVE_MASK;
+               break;
+       case SDVOC:
+               sdvox &= SDVOC_PRESERVE_MASK;
+               break;
+       }
+       sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+
+       if (psb_intel_crtc->pipe == 1)
+               sdvox |= SDVO_PIPE_B_SELECT;
+       if (psb_intel_sdvo->has_hdmi_audio)
+               sdvox |= SDVO_AUDIO_ENABLE;
+
+       /* FIXME: Check if this is needed for PSB
+       sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+       */
+
+       if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
+               sdvox |= SDVO_STALL_SELECT;
+       psb_intel_sdvo_write_sdvox(psb_intel_sdvo, sdvox);
+}
+
+static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+       u32 temp;
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               DRM_DEBUG("DPMS_ON");
+               break;
+       case DRM_MODE_DPMS_OFF:
+               DRM_DEBUG("DPMS_OFF");
+               break;
+       default:
+               DRM_DEBUG("DPMS: %d", mode);
+       }
+
+       if (mode != DRM_MODE_DPMS_ON) {
+               psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, 0);
+               if (0)
+                       psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
+
+               if (mode == DRM_MODE_DPMS_OFF) {
+                       temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+                       if ((temp & SDVO_ENABLE) != 0) {
+                               psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
+                       }
+               }
+       } else {
+               bool input1, input2;
+               int i;
+               u8 status;
+
+               temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+               if ((temp & SDVO_ENABLE) == 0)
+                       psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
+               for (i = 0; i < 2; i++)
+                       psb_intel_wait_for_vblank(dev);
+
+               status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2);
+               /* Warn if the device reported failure to sync.
+                * A lot of SDVO devices fail to notify of sync, but it's
+                * a given it the status is a success, we succeeded.
+                */
+               if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+                       DRM_DEBUG_KMS("First %s output reported failure to "
+                                       "sync\n", SDVO_NAME(psb_intel_sdvo));
+               }
+
+               if (0)
+                       psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
+               psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, psb_intel_sdvo->attached_output);
+       }
+       return;
+}
+
+static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
+                                struct drm_display_mode *mode)
+{
+       struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       if (psb_intel_sdvo->pixel_clock_min > mode->clock)
+               return MODE_CLOCK_LOW;
+
+       if (psb_intel_sdvo->pixel_clock_max < mode->clock)
+               return MODE_CLOCK_HIGH;
+
+       if (psb_intel_sdvo->is_lvds) {
+               if (mode->hdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+                       return MODE_PANEL;
+
+               if (mode->vdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+                       return MODE_PANEL;
+       }
+
+       return MODE_OK;
+}
+
+static bool psb_intel_sdvo_get_capabilities(struct psb_intel_sdvo *psb_intel_sdvo, struct psb_intel_sdvo_caps *caps)
+{
+       BUILD_BUG_ON(sizeof(*caps) != 8);
+       if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+                                 SDVO_CMD_GET_DEVICE_CAPS,
+                                 caps, sizeof(*caps)))
+               return false;
+
+       DRM_DEBUG_KMS("SDVO capabilities:\n"
+                     "  vendor_id: %d\n"
+                     "  device_id: %d\n"
+                     "  device_rev_id: %d\n"
+                     "  sdvo_version_major: %d\n"
+                     "  sdvo_version_minor: %d\n"
+                     "  sdvo_inputs_mask: %d\n"
+                     "  smooth_scaling: %d\n"
+                     "  sharp_scaling: %d\n"
+                     "  up_scaling: %d\n"
+                     "  down_scaling: %d\n"
+                     "  stall_support: %d\n"
+                     "  output_flags: %d\n",
+                     caps->vendor_id,
+                     caps->device_id,
+                     caps->device_rev_id,
+                     caps->sdvo_version_major,
+                     caps->sdvo_version_minor,
+                     caps->sdvo_inputs_mask,
+                     caps->smooth_scaling,
+                     caps->sharp_scaling,
+                     caps->up_scaling,
+                     caps->down_scaling,
+                     caps->stall_support,
+                     caps->output_flags);
+
+       return true;
+}
+
+/* No use! */
+#if 0
+struct drm_connector* psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
+{
+       struct drm_connector *connector = NULL;
+       struct psb_intel_sdvo *iout = NULL;
+       struct psb_intel_sdvo *sdvo;
+
+       /* find the sdvo connector */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               iout = to_psb_intel_sdvo(connector);
+
+               if (iout->type != INTEL_OUTPUT_SDVO)
+                       continue;
+
+               sdvo = iout->dev_priv;
+
+               if (sdvo->sdvo_reg == SDVOB && sdvoB)
+                       return connector;
+
+               if (sdvo->sdvo_reg == SDVOC && !sdvoB)
+                       return connector;
+
+       }
+
+       return NULL;
+}
+
+int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
+{
+       u8 response[2];
+       u8 status;
+       struct psb_intel_sdvo *psb_intel_sdvo;
+       DRM_DEBUG_KMS("\n");
+
+       if (!connector)
+               return 0;
+
+       psb_intel_sdvo = to_psb_intel_sdvo(connector);
+
+       return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+                                   &response, 2) && response[0];
+}
+
+void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
+{
+       u8 response[2];
+       u8 status;
+       struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(connector);
+
+       psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+       psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+
+       if (on) {
+               psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+               status = psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+
+               psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+       } else {
+               response[0] = 0;
+               response[1] = 0;
+               psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+       }
+
+       psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+       psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+}
+#endif
+
+static bool
+psb_intel_sdvo_multifunc_encoder(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+       /* Is there more than one type of output? */
+       int caps = psb_intel_sdvo->caps.output_flags & 0xf;
+       return caps & -caps;
+}
+
+static struct edid *
+psb_intel_sdvo_get_edid(struct drm_connector *connector)
+{
+       struct psb_intel_sdvo *sdvo = intel_attached_sdvo(connector);
+       return drm_get_edid(connector, &sdvo->ddc);
+}
+
+/* Mac mini hack -- use the same DDC as the analog connector */
+static struct edid *
+psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
+{
+       struct drm_psb_private *dev_priv = connector->dev->dev_private;
+
+       return drm_get_edid(connector,
+                           &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+       return NULL;
+}
+
+enum drm_connector_status
+psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
+{
+       struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+       enum drm_connector_status status;
+       struct edid *edid;
+
+       edid = psb_intel_sdvo_get_edid(connector);
+
+       if (edid == NULL && psb_intel_sdvo_multifunc_encoder(psb_intel_sdvo)) {
+               u8 ddc, saved_ddc = psb_intel_sdvo->ddc_bus;
+
+               /*
+                * Don't use the 1 as the argument of DDC bus switch to get
+                * the EDID. It is used for SDVO SPD ROM.
+                */
+               for (ddc = psb_intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+                       psb_intel_sdvo->ddc_bus = ddc;
+                       edid = psb_intel_sdvo_get_edid(connector);
+                       if (edid)
+                               break;
+               }
+               /*
+                * If we found the EDID on the other bus,
+                * assume that is the correct DDC bus.
+                */
+               if (edid == NULL)
+                       psb_intel_sdvo->ddc_bus = saved_ddc;
+       }
+
+       /*
+        * When there is no edid and no monitor is connected with VGA
+        * port, try to use the CRT ddc to read the EDID for DVI-connector.
+        */
+       if (edid == NULL)
+               edid = psb_intel_sdvo_get_analog_edid(connector);
+
+       status = connector_status_unknown;
+       if (edid != NULL) {
+               /* DDC bus is shared, match EDID to connector type */
+               if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+                       status = connector_status_connected;
+                       if (psb_intel_sdvo->is_hdmi) {
+                               psb_intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+                               psb_intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+                       }
+               } else
+                       status = connector_status_disconnected;
+               connector->display_info.raw_edid = NULL;
+               kfree(edid);
+       }
+
+       if (status == connector_status_connected) {
+               struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+               if (psb_intel_sdvo_connector->force_audio)
+                       psb_intel_sdvo->has_hdmi_audio = psb_intel_sdvo_connector->force_audio > 0;
+       }
+
+       return status;
+}
+
+static enum drm_connector_status
+psb_intel_sdvo_detect(struct drm_connector *connector, bool force)
+{
+       uint16_t response;
+       struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+       enum drm_connector_status ret;
+
+       if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+                                 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
+               return connector_status_unknown;
+
+       /* add 30ms delay when the output type might be TV */
+       if (psb_intel_sdvo->caps.output_flags &
+           (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
+               mdelay(30);
+
+       if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2))
+               return connector_status_unknown;
+
+       DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+                     response & 0xff, response >> 8,
+                     psb_intel_sdvo_connector->output_flag);
+
+       if (response == 0)
+               return connector_status_disconnected;
+
+       psb_intel_sdvo->attached_output = response;
+
+       psb_intel_sdvo->has_hdmi_monitor = false;
+       psb_intel_sdvo->has_hdmi_audio = false;
+
+       if ((psb_intel_sdvo_connector->output_flag & response) == 0)
+               ret = connector_status_disconnected;
+       else if (IS_TMDS(psb_intel_sdvo_connector))
+               ret = psb_intel_sdvo_hdmi_sink_detect(connector);
+       else {
+               struct edid *edid;
+
+               /* if we have an edid check it matches the connection */
+               edid = psb_intel_sdvo_get_edid(connector);
+               if (edid == NULL)
+                       edid = psb_intel_sdvo_get_analog_edid(connector);
+               if (edid != NULL) {
+                       if (edid->input & DRM_EDID_INPUT_DIGITAL)
+                               ret = connector_status_disconnected;
+                       else
+                               ret = connector_status_connected;
+                       connector->display_info.raw_edid = NULL;
+                       kfree(edid);
+               } else
+                       ret = connector_status_connected;
+       }
+
+       /* May update encoder flag for like clock for SDVO TV, etc.*/
+       if (ret == connector_status_connected) {
+               psb_intel_sdvo->is_tv = false;
+               psb_intel_sdvo->is_lvds = false;
+               psb_intel_sdvo->base.needs_tv_clock = false;
+
+               if (response & SDVO_TV_MASK) {
+                       psb_intel_sdvo->is_tv = true;
+                       psb_intel_sdvo->base.needs_tv_clock = true;
+               }
+               if (response & SDVO_LVDS_MASK)
+                       psb_intel_sdvo->is_lvds = psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL;
+       }
+
+       return ret;
+}
+
+static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+{
+       struct edid *edid;
+
+       /* set the bus switch and get the modes */
+       edid = psb_intel_sdvo_get_edid(connector);
+
+       /*
+        * Mac mini hack.  On this device, the DVI-I connector shares one DDC
+        * link between analog and digital outputs. So, if the regular SDVO
+        * DDC fails, check to see if the analog output is disconnected, in
+        * which case we'll look there for the digital DDC data.
+        */
+       if (edid == NULL)
+               edid = psb_intel_sdvo_get_analog_edid(connector);
+
+       if (edid != NULL) {
+               struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+               bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+               bool connector_is_digital = !!IS_TMDS(psb_intel_sdvo_connector);
+
+               if (connector_is_digital == monitor_is_digital) {
+                       drm_mode_connector_update_edid_property(connector, edid);
+                       drm_add_edid_modes(connector, edid);
+               }
+
+               connector->display_info.raw_edid = NULL;
+               kfree(edid);
+       }
+}
+
+/*
+ * Set of SDVO TV modes.
+ * Note!  This is in reply order (see loop in get_tv_modes).
+ * XXX: all 60Hz refresh?
+ */
+static const struct drm_display_mode sdvo_tv_modes[] = {
+       { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
+                  416, 0, 200, 201, 232, 233, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384,
+                  416, 0, 240, 241, 272, 273, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464,
+                  496, 0, 300, 301, 332, 333, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704,
+                  736, 0, 350, 351, 382, 383, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704,
+                  736, 0, 400, 401, 432, 433, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704,
+                  736, 0, 480, 481, 512, 513, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768,
+                  800, 0, 480, 481, 512, 513, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768,
+                  800, 0, 576, 577, 608, 609, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784,
+                  816, 0, 350, 351, 382, 383, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784,
+                  816, 0, 400, 401, 432, 433, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784,
+                  816, 0, 480, 481, 512, 513, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784,
+                  816, 0, 540, 541, 572, 573, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784,
+                  816, 0, 576, 577, 608, 609, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832,
+                  864, 0, 576, 577, 608, 609, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864,
+                  896, 0, 600, 601, 632, 633, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896,
+                  928, 0, 624, 625, 656, 657, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984,
+                  1016, 0, 766, 767, 798, 799, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088,
+                  1120, 0, 768, 769, 800, 801, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344,
+                  1376, 0, 1024, 1025, 1056, 1057, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
+static void psb_intel_sdvo_get_tv_modes(struct drm_connector *connector)
+{
+       struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+       struct psb_intel_sdvo_sdtv_resolution_request tv_res;
+       uint32_t reply = 0, format_map = 0;
+       int i;
+
+       /* Read the list of supported input resolutions for the selected TV
+        * format.
+        */
+       format_map = 1 << psb_intel_sdvo->tv_format_index;
+       memcpy(&tv_res, &format_map,
+              min(sizeof(format_map), sizeof(struct psb_intel_sdvo_sdtv_resolution_request)));
+
+       if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, psb_intel_sdvo->attached_output))
+               return;
+
+       BUILD_BUG_ON(sizeof(tv_res) != 3);
+       if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+                                 SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+                                 &tv_res, sizeof(tv_res)))
+               return;
+       if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &reply, 3))
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
+               if (reply & (1 << i)) {
+                       struct drm_display_mode *nmode;
+                       nmode = drm_mode_duplicate(connector->dev,
+                                                  &sdvo_tv_modes[i]);
+                       if (nmode)
+                               drm_mode_probed_add(connector, nmode);
+               }
+}
+
+static void psb_intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+{
+       struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+       struct drm_psb_private *dev_priv = connector->dev->dev_private;
+       struct drm_display_mode *newmode;
+
+       /*
+        * Attempt to get the mode list from DDC.
+        * Assume that the preferred modes are
+        * arranged in priority order.
+        */
+       psb_intel_ddc_get_modes(connector, psb_intel_sdvo->i2c);
+       if (list_empty(&connector->probed_modes) == false)
+               goto end;
+
+       /* Fetch modes from VBT */
+       if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
+               newmode = drm_mode_duplicate(connector->dev,
+                                            dev_priv->sdvo_lvds_vbt_mode);
+               if (newmode != NULL) {
+                       /* Guarantee the mode is preferred */
+                       newmode->type = (DRM_MODE_TYPE_PREFERRED |
+                                        DRM_MODE_TYPE_DRIVER);
+                       drm_mode_probed_add(connector, newmode);
+               }
+       }
+
+end:
+       list_for_each_entry(newmode, &connector->probed_modes, head) {
+               if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+                       psb_intel_sdvo->sdvo_lvds_fixed_mode =
+                               drm_mode_duplicate(connector->dev, newmode);
+
+                       drm_mode_set_crtcinfo(psb_intel_sdvo->sdvo_lvds_fixed_mode,
+                                             0);
+
+                       psb_intel_sdvo->is_lvds = true;
+                       break;
+               }
+       }
+
+}
+
+static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
+{
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+
+       if (IS_TV(psb_intel_sdvo_connector))
+               psb_intel_sdvo_get_tv_modes(connector);
+       else if (IS_LVDS(psb_intel_sdvo_connector))
+               psb_intel_sdvo_get_lvds_modes(connector);
+       else
+               psb_intel_sdvo_get_ddc_modes(connector);
+
+       return !list_empty(&connector->probed_modes);
+}
+
+static void
+psb_intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+{
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+       struct drm_device *dev = connector->dev;
+
+       if (psb_intel_sdvo_connector->left)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->left);
+       if (psb_intel_sdvo_connector->right)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->right);
+       if (psb_intel_sdvo_connector->top)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->top);
+       if (psb_intel_sdvo_connector->bottom)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->bottom);
+       if (psb_intel_sdvo_connector->hpos)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->hpos);
+       if (psb_intel_sdvo_connector->vpos)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->vpos);
+       if (psb_intel_sdvo_connector->saturation)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->saturation);
+       if (psb_intel_sdvo_connector->contrast)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->contrast);
+       if (psb_intel_sdvo_connector->hue)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->hue);
+       if (psb_intel_sdvo_connector->sharpness)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->sharpness);
+       if (psb_intel_sdvo_connector->flicker_filter)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter);
+       if (psb_intel_sdvo_connector->flicker_filter_2d)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_2d);
+       if (psb_intel_sdvo_connector->flicker_filter_adaptive)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_adaptive);
+       if (psb_intel_sdvo_connector->tv_luma_filter)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->tv_luma_filter);
+       if (psb_intel_sdvo_connector->tv_chroma_filter)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->tv_chroma_filter);
+       if (psb_intel_sdvo_connector->dot_crawl)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->dot_crawl);
+       if (psb_intel_sdvo_connector->brightness)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->brightness);
+}
+
+static void psb_intel_sdvo_destroy(struct drm_connector *connector)
+{
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+
+       if (psb_intel_sdvo_connector->tv_format)
+               drm_property_destroy(connector->dev,
+                                    psb_intel_sdvo_connector->tv_format);
+
+       psb_intel_sdvo_destroy_enhance_property(connector);
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
+
+static bool psb_intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+       struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+       struct edid *edid;
+       bool has_audio = false;
+
+       if (!psb_intel_sdvo->is_hdmi)
+               return false;
+
+       edid = psb_intel_sdvo_get_edid(connector);
+       if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+               has_audio = drm_detect_monitor_audio(edid);
+
+       return has_audio;
+}
+
+static int
+psb_intel_sdvo_set_property(struct drm_connector *connector,
+                       struct drm_property *property,
+                       uint64_t val)
+{
+       struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+       struct drm_psb_private *dev_priv = connector->dev->dev_private;
+       uint16_t temp_value;
+       uint8_t cmd;
+       int ret;
+
+       ret = drm_connector_property_set_value(connector, property, val);
+       if (ret)
+               return ret;
+
+       if (property == dev_priv->force_audio_property) {
+               int i = val;
+               bool has_audio;
+
+               if (i == psb_intel_sdvo_connector->force_audio)
+                       return 0;
+
+               psb_intel_sdvo_connector->force_audio = i;
+
+               if (i == 0)
+                       has_audio = psb_intel_sdvo_detect_hdmi_audio(connector);
+               else
+                       has_audio = i > 0;
+
+               if (has_audio == psb_intel_sdvo->has_hdmi_audio)
+                       return 0;
+
+               psb_intel_sdvo->has_hdmi_audio = has_audio;
+               goto done;
+       }
+
+       if (property == dev_priv->broadcast_rgb_property) {
+               if (val == !!psb_intel_sdvo->color_range)
+                       return 0;
+
+               psb_intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+               goto done;
+       }
+
+#define CHECK_PROPERTY(name, NAME) \
+       if (psb_intel_sdvo_connector->name == property) { \
+               if (psb_intel_sdvo_connector->cur_##name == temp_value) return 0; \
+               if (psb_intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
+               cmd = SDVO_CMD_SET_##NAME; \
+               psb_intel_sdvo_connector->cur_##name = temp_value; \
+               goto set_value; \
+       }
+
+       if (property == psb_intel_sdvo_connector->tv_format) {
+               if (val >= TV_FORMAT_NUM)
+                       return -EINVAL;
+
+               if (psb_intel_sdvo->tv_format_index ==
+                   psb_intel_sdvo_connector->tv_format_supported[val])
+                       return 0;
+
+               psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[val];
+               goto done;
+       } else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
+               temp_value = val;
+               if (psb_intel_sdvo_connector->left == property) {
+                       drm_connector_property_set_value(connector,
+                                                        psb_intel_sdvo_connector->right, val);
+                       if (psb_intel_sdvo_connector->left_margin == temp_value)
+                               return 0;
+
+                       psb_intel_sdvo_connector->left_margin = temp_value;
+                       psb_intel_sdvo_connector->right_margin = temp_value;
+                       temp_value = psb_intel_sdvo_connector->max_hscan -
+                               psb_intel_sdvo_connector->left_margin;
+                       cmd = SDVO_CMD_SET_OVERSCAN_H;
+                       goto set_value;
+               } else if (psb_intel_sdvo_connector->right == property) {
+                       drm_connector_property_set_value(connector,
+                                                        psb_intel_sdvo_connector->left, val);
+                       if (psb_intel_sdvo_connector->right_margin == temp_value)
+                               return 0;
+
+                       psb_intel_sdvo_connector->left_margin = temp_value;
+                       psb_intel_sdvo_connector->right_margin = temp_value;
+                       temp_value = psb_intel_sdvo_connector->max_hscan -
+                               psb_intel_sdvo_connector->left_margin;
+                       cmd = SDVO_CMD_SET_OVERSCAN_H;
+                       goto set_value;
+               } else if (psb_intel_sdvo_connector->top == property) {
+                       drm_connector_property_set_value(connector,
+                                                        psb_intel_sdvo_connector->bottom, val);
+                       if (psb_intel_sdvo_connector->top_margin == temp_value)
+                               return 0;
+
+                       psb_intel_sdvo_connector->top_margin = temp_value;
+                       psb_intel_sdvo_connector->bottom_margin = temp_value;
+                       temp_value = psb_intel_sdvo_connector->max_vscan -
+                               psb_intel_sdvo_connector->top_margin;
+                       cmd = SDVO_CMD_SET_OVERSCAN_V;
+                       goto set_value;
+               } else if (psb_intel_sdvo_connector->bottom == property) {
+                       drm_connector_property_set_value(connector,
+                                                        psb_intel_sdvo_connector->top, val);
+                       if (psb_intel_sdvo_connector->bottom_margin == temp_value)
+                               return 0;
+
+                       psb_intel_sdvo_connector->top_margin = temp_value;
+                       psb_intel_sdvo_connector->bottom_margin = temp_value;
+                       temp_value = psb_intel_sdvo_connector->max_vscan -
+                               psb_intel_sdvo_connector->top_margin;
+                       cmd = SDVO_CMD_SET_OVERSCAN_V;
+                       goto set_value;
+               }
+               CHECK_PROPERTY(hpos, HPOS)
+               CHECK_PROPERTY(vpos, VPOS)
+               CHECK_PROPERTY(saturation, SATURATION)
+               CHECK_PROPERTY(contrast, CONTRAST)
+               CHECK_PROPERTY(hue, HUE)
+               CHECK_PROPERTY(brightness, BRIGHTNESS)
+               CHECK_PROPERTY(sharpness, SHARPNESS)
+               CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
+               CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
+               CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
+               CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
+               CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
+               CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
+       }
+
+       return -EINVAL; /* unknown property */
+
+set_value:
+       if (!psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &temp_value, 2))
+               return -EIO;
+
+
+done:
+       if (psb_intel_sdvo->base.base.crtc) {
+               struct drm_crtc *crtc = psb_intel_sdvo->base.base.crtc;
+               drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
+                                        crtc->y, crtc->fb);
+       }
+
+       return 0;
+#undef CHECK_PROPERTY
+}
+
+static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
+       .dpms = psb_intel_sdvo_dpms,
+       .mode_fixup = psb_intel_sdvo_mode_fixup,
+       .prepare = psb_intel_encoder_prepare,
+       .mode_set = psb_intel_sdvo_mode_set,
+       .commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .detect = psb_intel_sdvo_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .set_property = psb_intel_sdvo_set_property,
+       .destroy = psb_intel_sdvo_destroy,
+};
+
+static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = {
+       .get_modes = psb_intel_sdvo_get_modes,
+       .mode_valid = psb_intel_sdvo_mode_valid,
+       .best_encoder = psb_intel_best_encoder,
+};
+
+static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+{
+       struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+
+       if (psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL)
+               drm_mode_destroy(encoder->dev,
+                                psb_intel_sdvo->sdvo_lvds_fixed_mode);
+
+       i2c_del_adapter(&psb_intel_sdvo->ddc);
+       psb_intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
+       .destroy = psb_intel_sdvo_enc_destroy,
+};
+
+static void
+psb_intel_sdvo_guess_ddc_bus(struct psb_intel_sdvo *sdvo)
+{
+       /* FIXME: At the moment, ddc_bus = 2 is the only thing that works.
+        * We need to figure out if this is true for all available poulsbo
+        * hardware, or if we need to fiddle with the guessing code above.
+        * The problem might go away if we can parse sdvo mappings from bios */
+       sdvo->ddc_bus = 2;
+
+#if 0
+       uint16_t mask = 0;
+       unsigned int num_bits;
+
+       /* Make a mask of outputs less than or equal to our own priority in the
+        * list.
+        */
+       switch (sdvo->controlled_output) {
+       case SDVO_OUTPUT_LVDS1:
+               mask |= SDVO_OUTPUT_LVDS1;
+       case SDVO_OUTPUT_LVDS0:
+               mask |= SDVO_OUTPUT_LVDS0;
+       case SDVO_OUTPUT_TMDS1:
+               mask |= SDVO_OUTPUT_TMDS1;
+       case SDVO_OUTPUT_TMDS0:
+               mask |= SDVO_OUTPUT_TMDS0;
+       case SDVO_OUTPUT_RGB1:
+               mask |= SDVO_OUTPUT_RGB1;
+       case SDVO_OUTPUT_RGB0:
+               mask |= SDVO_OUTPUT_RGB0;
+               break;
+       }
+
+       /* Count bits to find what number we are in the priority list. */
+       mask &= sdvo->caps.output_flags;
+       num_bits = hweight16(mask);
+       /* If more than 3 outputs, default to DDC bus 3 for now. */
+       if (num_bits > 3)
+               num_bits = 3;
+
+       /* Corresponds to SDVO_CONTROL_BUS_DDCx */
+       sdvo->ddc_bus = 1 << num_bits;
+#endif
+}
+
+/**
+ * Choose the appropriate DDC bus for control bus switch command for this
+ * SDVO output based on the controlled output.
+ *
+ * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
+ * outputs, then LVDS outputs.
+ */
+static void
+psb_intel_sdvo_select_ddc_bus(struct drm_psb_private *dev_priv,
+                         struct psb_intel_sdvo *sdvo, u32 reg)
+{
+       struct sdvo_device_mapping *mapping;
+
+       if (IS_SDVOB(reg))
+               mapping = &(dev_priv->sdvo_mappings[0]);
+       else
+               mapping = &(dev_priv->sdvo_mappings[1]);
+
+       if (mapping->initialized)
+               sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+       else
+               psb_intel_sdvo_guess_ddc_bus(sdvo);
+}
+
+static void
+psb_intel_sdvo_select_i2c_bus(struct drm_psb_private *dev_priv,
+                         struct psb_intel_sdvo *sdvo, u32 reg)
+{
+       struct sdvo_device_mapping *mapping;
+       u8 pin, speed;
+
+       if (IS_SDVOB(reg))
+               mapping = &dev_priv->sdvo_mappings[0];
+       else
+               mapping = &dev_priv->sdvo_mappings[1];
+
+       pin = GMBUS_PORT_DPB;
+       speed = GMBUS_RATE_1MHZ >> 8;
+       if (mapping->initialized) {
+               pin = mapping->i2c_pin;
+               speed = mapping->i2c_speed;
+       }
+
+       if (pin < GMBUS_NUM_PORTS) {
+               sdvo->i2c = &dev_priv->gmbus[pin].adapter;
+               gma_intel_gmbus_set_speed(sdvo->i2c, speed);
+               gma_intel_gmbus_force_bit(sdvo->i2c, true);
+       } else
+               sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
+}
+
+static bool
+psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+       return psb_intel_sdvo_check_supp_encode(psb_intel_sdvo);
+}
+
+static u8
+psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct sdvo_device_mapping *my_mapping, *other_mapping;
+
+       if (IS_SDVOB(sdvo_reg)) {
+               my_mapping = &dev_priv->sdvo_mappings[0];
+               other_mapping = &dev_priv->sdvo_mappings[1];
+       } else {
+               my_mapping = &dev_priv->sdvo_mappings[1];
+               other_mapping = &dev_priv->sdvo_mappings[0];
+       }
+
+       /* If the BIOS described our SDVO device, take advantage of it. */
+       if (my_mapping->slave_addr)
+               return my_mapping->slave_addr;
+
+       /* If the BIOS only described a different SDVO device, use the
+        * address that it isn't using.
+        */
+       if (other_mapping->slave_addr) {
+               if (other_mapping->slave_addr == 0x70)
+                       return 0x72;
+               else
+                       return 0x70;
+       }
+
+       /* No SDVO device info is found for another DVO port,
+        * so use mapping assumption we had before BIOS parsing.
+        */
+       if (IS_SDVOB(sdvo_reg))
+               return 0x70;
+       else
+               return 0x72;
+}
+
+static void
+psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
+                         struct psb_intel_sdvo *encoder)
+{
+       drm_connector_init(encoder->base.base.dev,
+                          &connector->base.base,
+                          &psb_intel_sdvo_connector_funcs,
+                          connector->base.base.connector_type);
+
+       drm_connector_helper_add(&connector->base.base,
+                                &psb_intel_sdvo_connector_helper_funcs);
+
+       connector->base.base.interlace_allowed = 0;
+       connector->base.base.doublescan_allowed = 0;
+       connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+
+       psb_intel_connector_attach_encoder(&connector->base, &encoder->base);
+       drm_sysfs_connector_add(&connector->base.base);
+}
+
+static void
+psb_intel_sdvo_add_hdmi_properties(struct psb_intel_sdvo_connector *connector)
+{
+       /* FIXME: We don't support HDMI at the moment
+       struct drm_device *dev = connector->base.base.dev;
+
+       intel_attach_force_audio_property(&connector->base.base);
+       if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
+               intel_attach_broadcast_rgb_property(&connector->base.base);
+       */
+}
+
+static bool
+psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+       struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+       struct drm_connector *connector;
+       struct psb_intel_connector *intel_connector;
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+       psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+       if (!psb_intel_sdvo_connector)
+               return false;
+
+       if (device == 0) {
+               psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+               psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+       } else if (device == 1) {
+               psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+               psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+       }
+
+       intel_connector = &psb_intel_sdvo_connector->base;
+       connector = &intel_connector->base;
+       // connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+       encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+       connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+       if (psb_intel_sdvo_is_hdmi_connector(psb_intel_sdvo, device)) {
+               connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+               psb_intel_sdvo->is_hdmi = true;
+       }
+       psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+                                      (1 << INTEL_ANALOG_CLONE_BIT));
+
+       psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+       if (psb_intel_sdvo->is_hdmi)
+               psb_intel_sdvo_add_hdmi_properties(psb_intel_sdvo_connector);
+
+       return true;
+}
+
+static bool
+psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type)
+{
+       struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+       struct drm_connector *connector;
+       struct psb_intel_connector *intel_connector;
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+       psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+       if (!psb_intel_sdvo_connector)
+               return false;
+
+       intel_connector = &psb_intel_sdvo_connector->base;
+       connector = &intel_connector->base;
+       encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+       connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+
+       psb_intel_sdvo->controlled_output |= type;
+       psb_intel_sdvo_connector->output_flag = type;
+
+       psb_intel_sdvo->is_tv = true;
+       psb_intel_sdvo->base.needs_tv_clock = true;
+       psb_intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+
+       psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+
+       if (!psb_intel_sdvo_tv_create_property(psb_intel_sdvo, psb_intel_sdvo_connector, type))
+               goto err;
+
+       if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
+               goto err;
+
+       return true;
+
+err:
+       psb_intel_sdvo_destroy(connector);
+       return false;
+}
+
+static bool
+psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+       struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+       struct drm_connector *connector;
+       struct psb_intel_connector *intel_connector;
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+       psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+       if (!psb_intel_sdvo_connector)
+               return false;
+
+       intel_connector = &psb_intel_sdvo_connector->base;
+       connector = &intel_connector->base;
+       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+       encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+       connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+       if (device == 0) {
+               psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+               psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+       } else if (device == 1) {
+               psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+               psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+       }
+
+       psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+                                      (1 << INTEL_ANALOG_CLONE_BIT));
+
+       psb_intel_sdvo_connector_init(psb_intel_sdvo_connector,
+                                 psb_intel_sdvo);
+       return true;
+}
+
+static bool
+psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+       struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+       struct drm_connector *connector;
+       struct psb_intel_connector *intel_connector;
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+       psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+       if (!psb_intel_sdvo_connector)
+               return false;
+
+       intel_connector = &psb_intel_sdvo_connector->base;
+       connector = &intel_connector->base;
+       encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+       connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+       if (device == 0) {
+               psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+               psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+       } else if (device == 1) {
+               psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+               psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+       }
+
+       psb_intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
+                                      (1 << INTEL_SDVO_LVDS_CLONE_BIT));
+
+       psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+       if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
+               goto err;
+
+       return true;
+
+err:
+       psb_intel_sdvo_destroy(connector);
+       return false;
+}
+
+static bool
+psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags)
+{
+       psb_intel_sdvo->is_tv = false;
+       psb_intel_sdvo->base.needs_tv_clock = false;
+       psb_intel_sdvo->is_lvds = false;
+
+       /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+
+       if (flags & SDVO_OUTPUT_TMDS0)
+               if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 0))
+                       return false;
+
+       if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+               if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 1))
+                       return false;
+
+       /* TV has no XXX1 function block */
+       if (flags & SDVO_OUTPUT_SVID0)
+               if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_SVID0))
+                       return false;
+
+       if (flags & SDVO_OUTPUT_CVBS0)
+               if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_CVBS0))
+                       return false;
+
+       if (flags & SDVO_OUTPUT_RGB0)
+               if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 0))
+                       return false;
+
+       if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+               if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 1))
+                       return false;
+
+       if (flags & SDVO_OUTPUT_LVDS0)
+               if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 0))
+                       return false;
+
+       if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+               if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 1))
+                       return false;
+
+       if ((flags & SDVO_OUTPUT_MASK) == 0) {
+               unsigned char bytes[2];
+
+               psb_intel_sdvo->controlled_output = 0;
+               memcpy(bytes, &psb_intel_sdvo->caps.output_flags, 2);
+               DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
+                             SDVO_NAME(psb_intel_sdvo),
+                             bytes[0], bytes[1]);
+               return false;
+       }
+       psb_intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
+
+       return true;
+}
+
+static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
+                                         struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+                                         int type)
+{
+       struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+       struct psb_intel_sdvo_tv_format format;
+       uint32_t format_map, i;
+
+       if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, type))
+               return false;
+
+       BUILD_BUG_ON(sizeof(format) != 6);
+       if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+                                 SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
+                                 &format, sizeof(format)))
+               return false;
+
+       memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
+
+       if (format_map == 0)
+               return false;
+
+       psb_intel_sdvo_connector->format_supported_num = 0;
+       for (i = 0 ; i < TV_FORMAT_NUM; i++)
+               if (format_map & (1 << i))
+                       psb_intel_sdvo_connector->tv_format_supported[psb_intel_sdvo_connector->format_supported_num++] = i;
+
+
+       psb_intel_sdvo_connector->tv_format =
+                       drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                           "mode", psb_intel_sdvo_connector->format_supported_num);
+       if (!psb_intel_sdvo_connector->tv_format)
+               return false;
+
+       for (i = 0; i < psb_intel_sdvo_connector->format_supported_num; i++)
+               drm_property_add_enum(
+                               psb_intel_sdvo_connector->tv_format, i,
+                               i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
+
+       psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
+       drm_connector_attach_property(&psb_intel_sdvo_connector->base.base,
+                                     psb_intel_sdvo_connector->tv_format, 0);
+       return true;
+
+}
+
+#define ENHANCEMENT(name, NAME) do { \
+       if (enhancements.name) { \
+               if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
+                   !psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
+                       return false; \
+               psb_intel_sdvo_connector->max_##name = data_value[0]; \
+               psb_intel_sdvo_connector->cur_##name = response; \
+               psb_intel_sdvo_connector->name = \
+                       drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
+               if (!psb_intel_sdvo_connector->name) return false; \
+               psb_intel_sdvo_connector->name->values[0] = 0; \
+               psb_intel_sdvo_connector->name->values[1] = data_value[0]; \
+               drm_connector_attach_property(connector, \
+                                             psb_intel_sdvo_connector->name, \
+                                             psb_intel_sdvo_connector->cur_##name); \
+               DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
+                             data_value[0], data_value[1], response); \
+       } \
+} while(0)
+
+static bool
+psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
+                                     struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+                                     struct psb_intel_sdvo_enhancements_reply enhancements)
+{
+       struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+       struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
+       uint16_t response, data_value[2];
+
+       /* when horizontal overscan is supported, Add the left/right  property */
+       if (enhancements.overscan_h) {
+               if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+                                         SDVO_CMD_GET_MAX_OVERSCAN_H,
+                                         &data_value, 4))
+                       return false;
+
+               if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+                                         SDVO_CMD_GET_OVERSCAN_H,
+                                         &response, 2))
+                       return false;
+
+               psb_intel_sdvo_connector->max_hscan = data_value[0];
+               psb_intel_sdvo_connector->left_margin = data_value[0] - response;
+               psb_intel_sdvo_connector->right_margin = psb_intel_sdvo_connector->left_margin;
+               psb_intel_sdvo_connector->left =
+                       drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                           "left_margin", 2);
+               if (!psb_intel_sdvo_connector->left)
+                       return false;
+
+               psb_intel_sdvo_connector->left->values[0] = 0;
+               psb_intel_sdvo_connector->left->values[1] = data_value[0];
+               drm_connector_attach_property(connector,
+                                             psb_intel_sdvo_connector->left,
+                                             psb_intel_sdvo_connector->left_margin);
+
+               psb_intel_sdvo_connector->right =
+                       drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                           "right_margin", 2);
+               if (!psb_intel_sdvo_connector->right)
+                       return false;
+
+               psb_intel_sdvo_connector->right->values[0] = 0;
+               psb_intel_sdvo_connector->right->values[1] = data_value[0];
+               drm_connector_attach_property(connector,
+                                             psb_intel_sdvo_connector->right,
+                                             psb_intel_sdvo_connector->right_margin);
+               DRM_DEBUG_KMS("h_overscan: max %d, "
+                             "default %d, current %d\n",
+                             data_value[0], data_value[1], response);
+       }
+
+       if (enhancements.overscan_v) {
+               if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+                                         SDVO_CMD_GET_MAX_OVERSCAN_V,
+                                         &data_value, 4))
+                       return false;
+
+               if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+                                         SDVO_CMD_GET_OVERSCAN_V,
+                                         &response, 2))
+                       return false;
+
+               psb_intel_sdvo_connector->max_vscan = data_value[0];
+               psb_intel_sdvo_connector->top_margin = data_value[0] - response;
+               psb_intel_sdvo_connector->bottom_margin = psb_intel_sdvo_connector->top_margin;
+               psb_intel_sdvo_connector->top =
+                       drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                           "top_margin", 2);
+               if (!psb_intel_sdvo_connector->top)
+                       return false;
+
+               psb_intel_sdvo_connector->top->values[0] = 0;
+               psb_intel_sdvo_connector->top->values[1] = data_value[0];
+               drm_connector_attach_property(connector,
+                                             psb_intel_sdvo_connector->top,
+                                             psb_intel_sdvo_connector->top_margin);
+
+               psb_intel_sdvo_connector->bottom =
+                       drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                           "bottom_margin", 2);
+               if (!psb_intel_sdvo_connector->bottom)
+                       return false;
+
+               psb_intel_sdvo_connector->bottom->values[0] = 0;
+               psb_intel_sdvo_connector->bottom->values[1] = data_value[0];
+               drm_connector_attach_property(connector,
+                                             psb_intel_sdvo_connector->bottom,
+                                             psb_intel_sdvo_connector->bottom_margin);
+               DRM_DEBUG_KMS("v_overscan: max %d, "
+                             "default %d, current %d\n",
+                             data_value[0], data_value[1], response);
+       }
+
+       ENHANCEMENT(hpos, HPOS);
+       ENHANCEMENT(vpos, VPOS);
+       ENHANCEMENT(saturation, SATURATION);
+       ENHANCEMENT(contrast, CONTRAST);
+       ENHANCEMENT(hue, HUE);
+       ENHANCEMENT(sharpness, SHARPNESS);
+       ENHANCEMENT(brightness, BRIGHTNESS);
+       ENHANCEMENT(flicker_filter, FLICKER_FILTER);
+       ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+       ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
+       ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
+       ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
+
+       if (enhancements.dot_crawl) {
+               if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
+                       return false;
+
+               psb_intel_sdvo_connector->max_dot_crawl = 1;
+               psb_intel_sdvo_connector->cur_dot_crawl = response & 0x1;
+               psb_intel_sdvo_connector->dot_crawl =
+                       drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
+               if (!psb_intel_sdvo_connector->dot_crawl)
+                       return false;
+
+               psb_intel_sdvo_connector->dot_crawl->values[0] = 0;
+               psb_intel_sdvo_connector->dot_crawl->values[1] = 1;
+               drm_connector_attach_property(connector,
+                                             psb_intel_sdvo_connector->dot_crawl,
+                                             psb_intel_sdvo_connector->cur_dot_crawl);
+               DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+       }
+
+       return true;
+}
+
+static bool
+psb_intel_sdvo_create_enhance_property_lvds(struct psb_intel_sdvo *psb_intel_sdvo,
+                                       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+                                       struct psb_intel_sdvo_enhancements_reply enhancements)
+{
+       struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+       struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
+       uint16_t response, data_value[2];
+
+       ENHANCEMENT(brightness, BRIGHTNESS);
+
+       return true;
+}
+#undef ENHANCEMENT
+
+static bool psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
+                                              struct psb_intel_sdvo_connector *psb_intel_sdvo_connector)
+{
+       union {
+               struct psb_intel_sdvo_enhancements_reply reply;
+               uint16_t response;
+       } enhancements;
+
+       BUILD_BUG_ON(sizeof(enhancements) != 2);
+
+       enhancements.response = 0;
+       psb_intel_sdvo_get_value(psb_intel_sdvo,
+                            SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+                            &enhancements, sizeof(enhancements));
+       if (enhancements.response == 0) {
+               DRM_DEBUG_KMS("No enhancement is supported\n");
+               return true;
+       }
+
+       if (IS_TV(psb_intel_sdvo_connector))
+               return psb_intel_sdvo_create_enhance_property_tv(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
+       else if(IS_LVDS(psb_intel_sdvo_connector))
+               return psb_intel_sdvo_create_enhance_property_lvds(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
+       else
+               return true;
+}
+
+static int psb_intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
+                                    struct i2c_msg *msgs,
+                                    int num)
+{
+       struct psb_intel_sdvo *sdvo = adapter->algo_data;
+
+       if (!psb_intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+               return -EIO;
+
+       return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
+}
+
+static u32 psb_intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
+{
+       struct psb_intel_sdvo *sdvo = adapter->algo_data;
+       return sdvo->i2c->algo->functionality(sdvo->i2c);
+}
+
+static const struct i2c_algorithm psb_intel_sdvo_ddc_proxy = {
+       .master_xfer    = psb_intel_sdvo_ddc_proxy_xfer,
+       .functionality  = psb_intel_sdvo_ddc_proxy_func
+};
+
+static bool
+psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
+                         struct drm_device *dev)
+{
+       sdvo->ddc.owner = THIS_MODULE;
+       sdvo->ddc.class = I2C_CLASS_DDC;
+       snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
+       sdvo->ddc.dev.parent = &dev->pdev->dev;
+       sdvo->ddc.algo_data = sdvo;
+       sdvo->ddc.algo = &psb_intel_sdvo_ddc_proxy;
+
+       return i2c_add_adapter(&sdvo->ddc) == 0;
+}
+
+bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_encoder *psb_intel_encoder;
+       struct psb_intel_sdvo *psb_intel_sdvo;
+       int i;
+
+       psb_intel_sdvo = kzalloc(sizeof(struct psb_intel_sdvo), GFP_KERNEL);
+       if (!psb_intel_sdvo)
+               return false;
+
+       psb_intel_sdvo->sdvo_reg = sdvo_reg;
+       psb_intel_sdvo->slave_addr = psb_intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+       psb_intel_sdvo_select_i2c_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
+       if (!psb_intel_sdvo_init_ddc_proxy(psb_intel_sdvo, dev)) {
+               kfree(psb_intel_sdvo);
+               return false;
+       }
+
+       /* encoder type will be decided later */
+       psb_intel_encoder = &psb_intel_sdvo->base;
+       psb_intel_encoder->type = INTEL_OUTPUT_SDVO;
+       drm_encoder_init(dev, &psb_intel_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
+
+       /* Read the regs to test if we can talk to the device */
+       for (i = 0; i < 0x40; i++) {
+               u8 byte;
+
+               if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, i, &byte)) {
+                       DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
+                                     IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+                       goto err;
+               }
+       }
+
+       if (IS_SDVOB(sdvo_reg))
+               dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
+       else
+               dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
+
+       drm_encoder_helper_add(&psb_intel_encoder->base, &psb_intel_sdvo_helper_funcs);
+
+       /* In default case sdvo lvds is false */
+       if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps))
+               goto err;
+
+       if (psb_intel_sdvo_output_setup(psb_intel_sdvo,
+                                   psb_intel_sdvo->caps.output_flags) != true) {
+               DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
+                             IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+               goto err;
+       }
+
+       psb_intel_sdvo_select_ddc_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
+
+       /* Set the input timing to the screen. Assume always input 0. */
+       if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+               goto err;
+
+       if (!psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_sdvo,
+                                                   &psb_intel_sdvo->pixel_clock_min,
+                                                   &psb_intel_sdvo->pixel_clock_max))
+               goto err;
+
+       DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+                       "clock range %dMHz - %dMHz, "
+                       "input 1: %c, input 2: %c, "
+                       "output 1: %c, output 2: %c\n",
+                       SDVO_NAME(psb_intel_sdvo),
+                       psb_intel_sdvo->caps.vendor_id, psb_intel_sdvo->caps.device_id,
+                       psb_intel_sdvo->caps.device_rev_id,
+                       psb_intel_sdvo->pixel_clock_min / 1000,
+                       psb_intel_sdvo->pixel_clock_max / 1000,
+                       (psb_intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+                       (psb_intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+                       /* check currently supported outputs */
+                       psb_intel_sdvo->caps.output_flags &
+                       (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+                       psb_intel_sdvo->caps.output_flags &
+                       (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+       return true;
+
+err:
+       drm_encoder_cleanup(&psb_intel_encoder->base);
+       i2c_del_adapter(&psb_intel_sdvo->ddc);
+       kfree(psb_intel_sdvo);
+
+       return false;
+}
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h b/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h
new file mode 100644 (file)
index 0000000..600e797
--- /dev/null
@@ -0,0 +1,723 @@
+/*
+ * Copyright ? 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+
+/**
+ * @file SDVO command definitions and structures.
+ */
+
+#define SDVO_OUTPUT_FIRST   (0)
+#define SDVO_OUTPUT_TMDS0   (1 << 0)
+#define SDVO_OUTPUT_RGB0    (1 << 1)
+#define SDVO_OUTPUT_CVBS0   (1 << 2)
+#define SDVO_OUTPUT_SVID0   (1 << 3)
+#define SDVO_OUTPUT_YPRPB0  (1 << 4)
+#define SDVO_OUTPUT_SCART0  (1 << 5)
+#define SDVO_OUTPUT_LVDS0   (1 << 6)
+#define SDVO_OUTPUT_TMDS1   (1 << 8)
+#define SDVO_OUTPUT_RGB1    (1 << 9)
+#define SDVO_OUTPUT_CVBS1   (1 << 10)
+#define SDVO_OUTPUT_SVID1   (1 << 11)
+#define SDVO_OUTPUT_YPRPB1  (1 << 12)
+#define SDVO_OUTPUT_SCART1  (1 << 13)
+#define SDVO_OUTPUT_LVDS1   (1 << 14)
+#define SDVO_OUTPUT_LAST    (14)
+
+struct psb_intel_sdvo_caps {
+    u8 vendor_id;
+    u8 device_id;
+    u8 device_rev_id;
+    u8 sdvo_version_major;
+    u8 sdvo_version_minor;
+    unsigned int sdvo_inputs_mask:2;
+    unsigned int smooth_scaling:1;
+    unsigned int sharp_scaling:1;
+    unsigned int up_scaling:1;
+    unsigned int down_scaling:1;
+    unsigned int stall_support:1;
+    unsigned int pad:1;
+    u16 output_flags;
+} __attribute__((packed));
+
+/** This matches the EDID DTD structure, more or less */
+struct psb_intel_sdvo_dtd {
+    struct {
+       u16 clock;              /**< pixel clock, in 10kHz units */
+       u8 h_active;            /**< lower 8 bits (pixels) */
+       u8 h_blank;             /**< lower 8 bits (pixels) */
+       u8 h_high;              /**< upper 4 bits each h_active, h_blank */
+       u8 v_active;            /**< lower 8 bits (lines) */
+       u8 v_blank;             /**< lower 8 bits (lines) */
+       u8 v_high;              /**< upper 4 bits each v_active, v_blank */
+    } part1;
+
+    struct {
+       u8 h_sync_off;  /**< lower 8 bits, from hblank start */
+       u8 h_sync_width;        /**< lower 8 bits (pixels) */
+       /** lower 4 bits each vsync offset, vsync width */
+       u8 v_sync_off_width;
+       /**
+        * 2 high bits of hsync offset, 2 high bits of hsync width,
+        * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+        */
+       u8 sync_off_width_high;
+       u8 dtd_flags;
+       u8 sdvo_flags;
+       /** bits 6-7 of vsync offset at bits 6-7 */
+       u8 v_sync_off_high;
+       u8 reserved;
+    } part2;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_pixel_clock_range {
+    u16 min;                   /**< pixel clock, in 10kHz units */
+    u16 max;                   /**< pixel clock, in 10kHz units */
+} __attribute__((packed));
+
+struct psb_intel_sdvo_preferred_input_timing_args {
+    u16 clock;
+    u16 width;
+    u16 height;
+    u8 interlace:1;
+    u8 scaled:1;
+    u8 pad:6;
+} __attribute__((packed));
+
+/* I2C registers for SDVO */
+#define SDVO_I2C_ARG_0                         0x07
+#define SDVO_I2C_ARG_1                         0x06
+#define SDVO_I2C_ARG_2                         0x05
+#define SDVO_I2C_ARG_3                         0x04
+#define SDVO_I2C_ARG_4                         0x03
+#define SDVO_I2C_ARG_5                         0x02
+#define SDVO_I2C_ARG_6                         0x01
+#define SDVO_I2C_ARG_7                         0x00
+#define SDVO_I2C_OPCODE                                0x08
+#define SDVO_I2C_CMD_STATUS                    0x09
+#define SDVO_I2C_RETURN_0                      0x0a
+#define SDVO_I2C_RETURN_1                      0x0b
+#define SDVO_I2C_RETURN_2                      0x0c
+#define SDVO_I2C_RETURN_3                      0x0d
+#define SDVO_I2C_RETURN_4                      0x0e
+#define SDVO_I2C_RETURN_5                      0x0f
+#define SDVO_I2C_RETURN_6                      0x10
+#define SDVO_I2C_RETURN_7                      0x11
+#define SDVO_I2C_VENDOR_BEGIN                  0x20
+
+/* Status results */
+#define SDVO_CMD_STATUS_POWER_ON               0x0
+#define SDVO_CMD_STATUS_SUCCESS                        0x1
+#define SDVO_CMD_STATUS_NOTSUPP                        0x2
+#define SDVO_CMD_STATUS_INVALID_ARG            0x3
+#define SDVO_CMD_STATUS_PENDING                        0x4
+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED   0x5
+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP       0x6
+
+/* SDVO commands, argument/result registers */
+
+#define SDVO_CMD_RESET                                 0x01
+
+/** Returns a struct intel_sdvo_caps */
+#define SDVO_CMD_GET_DEVICE_CAPS                       0x02
+
+#define SDVO_CMD_GET_FIRMWARE_REV                      0x86
+# define SDVO_DEVICE_FIRMWARE_MINOR                    SDVO_I2C_RETURN_0
+# define SDVO_DEVICE_FIRMWARE_MAJOR                    SDVO_I2C_RETURN_1
+# define SDVO_DEVICE_FIRMWARE_PATCH                    SDVO_I2C_RETURN_2
+
+/**
+ * Reports which inputs are trained (managed to sync).
+ *
+ * Devices must have trained within 2 vsyncs of a mode change.
+ */
+#define SDVO_CMD_GET_TRAINED_INPUTS                    0x03
+struct psb_intel_sdvo_get_trained_inputs_response {
+    unsigned int input0_trained:1;
+    unsigned int input1_trained:1;
+    unsigned int pad:6;
+} __attribute__((packed));
+
+/** Returns a struct intel_sdvo_output_flags of active outputs. */
+#define SDVO_CMD_GET_ACTIVE_OUTPUTS                    0x04
+
+/**
+ * Sets the current set of active outputs.
+ *
+ * Takes a struct intel_sdvo_output_flags.  Must be preceded by a SET_IN_OUT_MAP
+ * on multi-output devices.
+ */
+#define SDVO_CMD_SET_ACTIVE_OUTPUTS                    0x05
+
+/**
+ * Returns the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Returns two struct intel_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_GET_IN_OUT_MAP                                0x06
+struct psb_intel_sdvo_in_out_map {
+    u16 in0, in1;
+};
+
+/**
+ * Sets the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Takes two struct i380_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_SET_IN_OUT_MAP                                0x07
+
+/**
+ * Returns a struct intel_sdvo_output_flags of attached displays.
+ */
+#define SDVO_CMD_GET_ATTACHED_DISPLAYS                 0x0b
+
+/**
+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
+ */
+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT                  0x0c
+
+/**
+ * Takes a struct intel_sdvo_output_flags.
+ */
+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG                   0x0d
+
+/**
+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
+ * interrupts enabled.
+ */
+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG                   0x0e
+
+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE            0x0f
+struct intel_sdvo_get_interrupt_event_source_response {
+    u16 interrupt_status;
+    unsigned int ambient_light_interrupt:1;
+    unsigned int hdmi_audio_encrypt_change:1;
+    unsigned int pad:6;
+} __attribute__((packed));
+
+/**
+ * Selects which input is affected by future input commands.
+ *
+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
+ */
+#define SDVO_CMD_SET_TARGET_INPUT                      0x10
+struct psb_intel_sdvo_set_target_input_args {
+    unsigned int target_1:1;
+    unsigned int pad:7;
+} __attribute__((packed));
+
+/**
+ * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
+ * future output commands.
+ *
+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
+ */
+#define SDVO_CMD_SET_TARGET_OUTPUT                     0x11
+
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1               0x12
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2               0x13
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1               0x14
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2               0x15
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1              0x16
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2              0x17
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1              0x18
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2              0x19
+/* Part 1 */
+# define SDVO_DTD_CLOCK_LOW                            SDVO_I2C_ARG_0
+# define SDVO_DTD_CLOCK_HIGH                           SDVO_I2C_ARG_1
+# define SDVO_DTD_H_ACTIVE                             SDVO_I2C_ARG_2
+# define SDVO_DTD_H_BLANK                              SDVO_I2C_ARG_3
+# define SDVO_DTD_H_HIGH                               SDVO_I2C_ARG_4
+# define SDVO_DTD_V_ACTIVE                             SDVO_I2C_ARG_5
+# define SDVO_DTD_V_BLANK                              SDVO_I2C_ARG_6
+# define SDVO_DTD_V_HIGH                               SDVO_I2C_ARG_7
+/* Part 2 */
+# define SDVO_DTD_HSYNC_OFF                            SDVO_I2C_ARG_0
+# define SDVO_DTD_HSYNC_WIDTH                          SDVO_I2C_ARG_1
+# define SDVO_DTD_VSYNC_OFF_WIDTH                      SDVO_I2C_ARG_2
+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH                  SDVO_I2C_ARG_3
+# define SDVO_DTD_DTD_FLAGS                            SDVO_I2C_ARG_4
+# define SDVO_DTD_DTD_FLAG_INTERLACED                          (1 << 7)
+# define SDVO_DTD_DTD_FLAG_STEREO_MASK                         (3 << 5)
+# define SDVO_DTD_DTD_FLAG_INPUT_MASK                          (3 << 3)
+# define SDVO_DTD_DTD_FLAG_SYNC_MASK                           (3 << 1)
+# define SDVO_DTD_SDVO_FLAS                            SDVO_I2C_ARG_5
+# define SDVO_DTD_SDVO_FLAG_STALL                              (1 << 7)
+# define SDVO_DTD_SDVO_FLAG_CENTERED                           (0 << 6)
+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT                         (1 << 6)
+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK                       (3 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE                       (0 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP                      (1 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH                     (2 << 4)
+# define SDVO_DTD_VSYNC_OFF_HIGH                       SDVO_I2C_ARG_6
+
+/**
+ * Generates a DTD based on the given width, height, and flags.
+ *
+ * This will be supported by any device supporting scaling or interlaced
+ * modes.
+ */
+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING         0x1a
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW         SDVO_I2C_ARG_0
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH                SDVO_I2C_ARG_1
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW         SDVO_I2C_ARG_2
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH                SDVO_I2C_ARG_3
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW                SDVO_I2C_ARG_4
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH       SDVO_I2C_ARG_5
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS             SDVO_I2C_ARG_6
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED          (1 << 0)
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED              (1 << 1)
+
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1      0x1b
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2      0x1c
+
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE           0x1d
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE          0x1e
+
+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS                0x1f
+
+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_GET_CLOCK_RATE_MULT                   0x20
+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_SET_CLOCK_RATE_MULT                   0x21
+# define SDVO_CLOCK_RATE_MULT_1X                               (1 << 0)
+# define SDVO_CLOCK_RATE_MULT_2X                               (1 << 1)
+# define SDVO_CLOCK_RATE_MULT_4X                               (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS              0x27
+/** 6 bytes of bit flags for TV formats shared by all TV format functions */
+struct psb_intel_sdvo_tv_format {
+    unsigned int ntsc_m:1;
+    unsigned int ntsc_j:1;
+    unsigned int ntsc_443:1;
+    unsigned int pal_b:1;
+    unsigned int pal_d:1;
+    unsigned int pal_g:1;
+    unsigned int pal_h:1;
+    unsigned int pal_i:1;
+
+    unsigned int pal_m:1;
+    unsigned int pal_n:1;
+    unsigned int pal_nc:1;
+    unsigned int pal_60:1;
+    unsigned int secam_b:1;
+    unsigned int secam_d:1;
+    unsigned int secam_g:1;
+    unsigned int secam_k:1;
+
+    unsigned int secam_k1:1;
+    unsigned int secam_l:1;
+    unsigned int secam_60:1;
+    unsigned int hdtv_std_smpte_240m_1080i_59:1;
+    unsigned int hdtv_std_smpte_240m_1080i_60:1;
+    unsigned int hdtv_std_smpte_260m_1080i_59:1;
+    unsigned int hdtv_std_smpte_260m_1080i_60:1;
+    unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+    unsigned int hdtv_std_smpte_274m_1080i_59:1;
+    unsigned int hdtv_std_smpte_274m_1080i_60:1;
+    unsigned int hdtv_std_smpte_274m_1080p_23:1;
+    unsigned int hdtv_std_smpte_274m_1080p_24:1;
+    unsigned int hdtv_std_smpte_274m_1080p_25:1;
+    unsigned int hdtv_std_smpte_274m_1080p_29:1;
+    unsigned int hdtv_std_smpte_274m_1080p_30:1;
+    unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+    unsigned int hdtv_std_smpte_274m_1080p_59:1;
+    unsigned int hdtv_std_smpte_274m_1080p_60:1;
+    unsigned int hdtv_std_smpte_295m_1080i_50:1;
+    unsigned int hdtv_std_smpte_295m_1080p_50:1;
+    unsigned int hdtv_std_smpte_296m_720p_59:1;
+    unsigned int hdtv_std_smpte_296m_720p_60:1;
+    unsigned int hdtv_std_smpte_296m_720p_50:1;
+    unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+    unsigned int hdtv_std_smpte_170m_480i_59:1;
+    unsigned int hdtv_std_iturbt601_576i_50:1;
+    unsigned int hdtv_std_iturbt601_576p_50:1;
+    unsigned int hdtv_std_eia_7702a_480i_60:1;
+    unsigned int hdtv_std_eia_7702a_480p_60:1;
+    unsigned int pad:3;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_TV_FORMAT                         0x28
+
+#define SDVO_CMD_SET_TV_FORMAT                         0x29
+
+/** Returns the resolutiosn that can be used with the given TV format */
+#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT           0x83
+struct psb_intel_sdvo_sdtv_resolution_request {
+    unsigned int ntsc_m:1;
+    unsigned int ntsc_j:1;
+    unsigned int ntsc_443:1;
+    unsigned int pal_b:1;
+    unsigned int pal_d:1;
+    unsigned int pal_g:1;
+    unsigned int pal_h:1;
+    unsigned int pal_i:1;
+
+    unsigned int pal_m:1;
+    unsigned int pal_n:1;
+    unsigned int pal_nc:1;
+    unsigned int pal_60:1;
+    unsigned int secam_b:1;
+    unsigned int secam_d:1;
+    unsigned int secam_g:1;
+    unsigned int secam_k:1;
+
+    unsigned int secam_k1:1;
+    unsigned int secam_l:1;
+    unsigned int secam_60:1;
+    unsigned int pad:5;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_sdtv_resolution_reply {
+    unsigned int res_320x200:1;
+    unsigned int res_320x240:1;
+    unsigned int res_400x300:1;
+    unsigned int res_640x350:1;
+    unsigned int res_640x400:1;
+    unsigned int res_640x480:1;
+    unsigned int res_704x480:1;
+    unsigned int res_704x576:1;
+
+    unsigned int res_720x350:1;
+    unsigned int res_720x400:1;
+    unsigned int res_720x480:1;
+    unsigned int res_720x540:1;
+    unsigned int res_720x576:1;
+    unsigned int res_768x576:1;
+    unsigned int res_800x600:1;
+    unsigned int res_832x624:1;
+
+    unsigned int res_920x766:1;
+    unsigned int res_1024x768:1;
+    unsigned int res_1280x1024:1;
+    unsigned int pad:5;
+} __attribute__((packed));
+
+/* Get supported resolution with squire pixel aspect ratio that can be
+   scaled for the requested HDTV format */
+#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT            0x85
+
+struct psb_intel_sdvo_hdtv_resolution_request {
+    unsigned int hdtv_std_smpte_240m_1080i_59:1;
+    unsigned int hdtv_std_smpte_240m_1080i_60:1;
+    unsigned int hdtv_std_smpte_260m_1080i_59:1;
+    unsigned int hdtv_std_smpte_260m_1080i_60:1;
+    unsigned int hdtv_std_smpte_274m_1080i_50:1;
+    unsigned int hdtv_std_smpte_274m_1080i_59:1;
+    unsigned int hdtv_std_smpte_274m_1080i_60:1;
+    unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+    unsigned int hdtv_std_smpte_274m_1080p_24:1;
+    unsigned int hdtv_std_smpte_274m_1080p_25:1;
+    unsigned int hdtv_std_smpte_274m_1080p_29:1;
+    unsigned int hdtv_std_smpte_274m_1080p_30:1;
+    unsigned int hdtv_std_smpte_274m_1080p_50:1;
+    unsigned int hdtv_std_smpte_274m_1080p_59:1;
+    unsigned int hdtv_std_smpte_274m_1080p_60:1;
+    unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+    unsigned int hdtv_std_smpte_295m_1080p_50:1;
+    unsigned int hdtv_std_smpte_296m_720p_59:1;
+    unsigned int hdtv_std_smpte_296m_720p_60:1;
+    unsigned int hdtv_std_smpte_296m_720p_50:1;
+    unsigned int hdtv_std_smpte_293m_480p_59:1;
+    unsigned int hdtv_std_smpte_170m_480i_59:1;
+    unsigned int hdtv_std_iturbt601_576i_50:1;
+    unsigned int hdtv_std_iturbt601_576p_50:1;
+
+    unsigned int hdtv_std_eia_7702a_480i_60:1;
+    unsigned int hdtv_std_eia_7702a_480p_60:1;
+    unsigned int pad:6;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_hdtv_resolution_reply {
+    unsigned int res_640x480:1;
+    unsigned int res_800x600:1;
+    unsigned int res_1024x768:1;
+    unsigned int res_1280x960:1;
+    unsigned int res_1400x1050:1;
+    unsigned int res_1600x1200:1;
+    unsigned int res_1920x1440:1;
+    unsigned int res_2048x1536:1;
+
+    unsigned int res_2560x1920:1;
+    unsigned int res_3200x2400:1;
+    unsigned int res_3840x2880:1;
+    unsigned int pad1:5;
+
+    unsigned int res_848x480:1;
+    unsigned int res_1064x600:1;
+    unsigned int res_1280x720:1;
+    unsigned int res_1360x768:1;
+    unsigned int res_1704x960:1;
+    unsigned int res_1864x1050:1;
+    unsigned int res_1920x1080:1;
+    unsigned int res_2128x1200:1;
+
+    unsigned int res_2560x1400:1;
+    unsigned int res_2728x1536:1;
+    unsigned int res_3408x1920:1;
+    unsigned int res_4264x2400:1;
+    unsigned int res_5120x2880:1;
+    unsigned int pad2:3;
+
+    unsigned int res_768x480:1;
+    unsigned int res_960x600:1;
+    unsigned int res_1152x720:1;
+    unsigned int res_1124x768:1;
+    unsigned int res_1536x960:1;
+    unsigned int res_1680x1050:1;
+    unsigned int res_1728x1080:1;
+    unsigned int res_1920x1200:1;
+
+    unsigned int res_2304x1440:1;
+    unsigned int res_2456x1536:1;
+    unsigned int res_3072x1920:1;
+    unsigned int res_3840x2400:1;
+    unsigned int res_4608x2880:1;
+    unsigned int pad3:3;
+
+    unsigned int res_1280x1024:1;
+    unsigned int pad4:7;
+
+    unsigned int res_1280x768:1;
+    unsigned int pad5:7;
+} __attribute__((packed));
+
+/* Get supported power state returns info for encoder and monitor, rely on
+   last SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES            0x2a
+/* Get power state returns info for encoder and monitor, rely on last
+   SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_POWER_STATE                       0x2b
+#define SDVO_CMD_GET_ENCODER_POWER_STATE               0x2b
+#define SDVO_CMD_SET_ENCODER_POWER_STATE               0x2c
+# define SDVO_ENCODER_STATE_ON                                 (1 << 0)
+# define SDVO_ENCODER_STATE_STANDBY                            (1 << 1)
+# define SDVO_ENCODER_STATE_SUSPEND                            (1 << 2)
+# define SDVO_ENCODER_STATE_OFF                                        (1 << 3)
+# define SDVO_MONITOR_STATE_ON                                 (1 << 4)
+# define SDVO_MONITOR_STATE_STANDBY                            (1 << 5)
+# define SDVO_MONITOR_STATE_SUSPEND                            (1 << 6)
+# define SDVO_MONITOR_STATE_OFF                                        (1 << 7)
+
+#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING                0x2d
+#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING            0x2e
+#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING            0x2f
+/**
+ * The panel power sequencing parameters are in units of milliseconds.
+ * The high fields are bits 8:9 of the 10-bit values.
+ */
+struct psb_sdvo_panel_power_sequencing {
+    u8 t0;
+    u8 t1;
+    u8 t2;
+    u8 t3;
+    u8 t4;
+
+    unsigned int t0_high:2;
+    unsigned int t1_high:2;
+    unsigned int t2_high:2;
+    unsigned int t3_high:2;
+
+    unsigned int t4_high:2;
+    unsigned int pad:6;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL               0x30
+struct sdvo_max_backlight_reply {
+    u8 max_value;
+    u8 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_BACKLIGHT_LEVEL                   0x31
+#define SDVO_CMD_SET_BACKLIGHT_LEVEL                   0x32
+
+#define SDVO_CMD_GET_AMBIENT_LIGHT                     0x33
+struct sdvo_get_ambient_light_reply {
+    u16 trip_low;
+    u16 trip_high;
+    u16 value;
+} __attribute__((packed));
+#define SDVO_CMD_SET_AMBIENT_LIGHT                     0x34
+struct sdvo_set_ambient_light_reply {
+    u16 trip_low;
+    u16 trip_high;
+    unsigned int enable:1;
+    unsigned int pad:7;
+} __attribute__((packed));
+
+/* Set display power state */
+#define SDVO_CMD_SET_DISPLAY_POWER_STATE               0x7d
+# define SDVO_DISPLAY_STATE_ON                         (1 << 0)
+# define SDVO_DISPLAY_STATE_STANDBY                    (1 << 1)
+# define SDVO_DISPLAY_STATE_SUSPEND                    (1 << 2)
+# define SDVO_DISPLAY_STATE_OFF                                (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS            0x84
+struct psb_intel_sdvo_enhancements_reply {
+    unsigned int flicker_filter:1;
+    unsigned int flicker_filter_adaptive:1;
+    unsigned int flicker_filter_2d:1;
+    unsigned int saturation:1;
+    unsigned int hue:1;
+    unsigned int brightness:1;
+    unsigned int contrast:1;
+    unsigned int overscan_h:1;
+
+    unsigned int overscan_v:1;
+    unsigned int hpos:1;
+    unsigned int vpos:1;
+    unsigned int sharpness:1;
+    unsigned int dot_crawl:1;
+    unsigned int dither:1;
+    unsigned int tv_chroma_filter:1;
+    unsigned int tv_luma_filter:1;
+} __attribute__((packed));
+
+/* Picture enhancement limits below are dependent on the current TV format,
+ * and thus need to be queried and set after it.
+ */
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER                        0x4d
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE       0x7b
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D             0x52
+#define SDVO_CMD_GET_MAX_SATURATION                    0x55
+#define SDVO_CMD_GET_MAX_HUE                           0x58
+#define SDVO_CMD_GET_MAX_BRIGHTNESS                    0x5b
+#define SDVO_CMD_GET_MAX_CONTRAST                      0x5e
+#define SDVO_CMD_GET_MAX_OVERSCAN_H                    0x61
+#define SDVO_CMD_GET_MAX_OVERSCAN_V                    0x64
+#define SDVO_CMD_GET_MAX_HPOS                          0x67
+#define SDVO_CMD_GET_MAX_VPOS                          0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS                     0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER              0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER                        0x77
+struct psb_intel_sdvo_enhancement_limits_reply {
+    u16 max_value;
+    u16 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION            0x7f
+#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION            0x80
+# define SDVO_LVDS_COLOR_DEPTH_18                      (0 << 0)
+# define SDVO_LVDS_COLOR_DEPTH_24                      (1 << 0)
+# define SDVO_LVDS_CONNECTOR_SPWG                      (0 << 2)
+# define SDVO_LVDS_CONNECTOR_OPENLDI                   (1 << 2)
+# define SDVO_LVDS_SINGLE_CHANNEL                      (0 << 4)
+# define SDVO_LVDS_DUAL_CHANNEL                                (1 << 4)
+
+#define SDVO_CMD_GET_FLICKER_FILTER                    0x4e
+#define SDVO_CMD_SET_FLICKER_FILTER                    0x4f
+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE           0x50
+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE           0x51
+#define SDVO_CMD_GET_FLICKER_FILTER_2D                 0x53
+#define SDVO_CMD_SET_FLICKER_FILTER_2D                 0x54
+#define SDVO_CMD_GET_SATURATION                                0x56
+#define SDVO_CMD_SET_SATURATION                                0x57
+#define SDVO_CMD_GET_HUE                               0x59
+#define SDVO_CMD_SET_HUE                               0x5a
+#define SDVO_CMD_GET_BRIGHTNESS                                0x5c
+#define SDVO_CMD_SET_BRIGHTNESS                                0x5d
+#define SDVO_CMD_GET_CONTRAST                          0x5f
+#define SDVO_CMD_SET_CONTRAST                          0x60
+#define SDVO_CMD_GET_OVERSCAN_H                                0x62
+#define SDVO_CMD_SET_OVERSCAN_H                                0x63
+#define SDVO_CMD_GET_OVERSCAN_V                                0x65
+#define SDVO_CMD_SET_OVERSCAN_V                                0x66
+#define SDVO_CMD_GET_HPOS                              0x68
+#define SDVO_CMD_SET_HPOS                              0x69
+#define SDVO_CMD_GET_VPOS                              0x6b
+#define SDVO_CMD_SET_VPOS                              0x6c
+#define SDVO_CMD_GET_SHARPNESS                         0x6e
+#define SDVO_CMD_SET_SHARPNESS                         0x6f
+#define SDVO_CMD_GET_TV_CHROMA_FILTER                  0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER                  0x76
+#define SDVO_CMD_GET_TV_LUMA_FILTER                    0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER                    0x79
+struct psb_intel_sdvo_enhancements_arg {
+    u16 value;
+}__attribute__((packed));
+
+#define SDVO_CMD_GET_DOT_CRAWL                         0x70
+#define SDVO_CMD_SET_DOT_CRAWL                         0x71
+# define SDVO_DOT_CRAWL_ON                                     (1 << 0)
+# define SDVO_DOT_CRAWL_DEFAULT_ON                             (1 << 1)
+
+#define SDVO_CMD_GET_DITHER                            0x72
+#define SDVO_CMD_SET_DITHER                            0x73
+# define SDVO_DITHER_ON                                                (1 << 0)
+# define SDVO_DITHER_DEFAULT_ON                                        (1 << 1)
+
+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH                        0x7a
+# define SDVO_CONTROL_BUS_PROM                         (1 << 0)
+# define SDVO_CONTROL_BUS_DDC1                         (1 << 1)
+# define SDVO_CONTROL_BUS_DDC2                         (1 << 2)
+# define SDVO_CONTROL_BUS_DDC3                         (1 << 3)
+
+/* HDMI op codes */
+#define SDVO_CMD_GET_SUPP_ENCODE       0x9d
+#define SDVO_CMD_GET_ENCODE            0x9e
+#define SDVO_CMD_SET_ENCODE            0x9f
+  #define SDVO_ENCODE_DVI      0x0
+  #define SDVO_ENCODE_HDMI     0x1
+#define SDVO_CMD_SET_PIXEL_REPLI       0x8b
+#define SDVO_CMD_GET_PIXEL_REPLI       0x8c
+#define SDVO_CMD_GET_COLORIMETRY_CAP   0x8d
+#define SDVO_CMD_SET_COLORIMETRY       0x8e
+  #define SDVO_COLORIMETRY_RGB256   0x0
+  #define SDVO_COLORIMETRY_RGB220   0x1
+  #define SDVO_COLORIMETRY_YCrCb422 0x3
+  #define SDVO_COLORIMETRY_YCrCb444 0x4
+#define SDVO_CMD_GET_COLORIMETRY       0x8f
+#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
+#define SDVO_CMD_SET_AUDIO_STAT                0x91
+#define SDVO_CMD_GET_AUDIO_STAT                0x92
+#define SDVO_CMD_SET_HBUF_INDEX                0x93
+#define SDVO_CMD_GET_HBUF_INDEX                0x94
+#define SDVO_CMD_GET_HBUF_INFO         0x95
+#define SDVO_CMD_SET_HBUF_AV_SPLIT     0x96
+#define SDVO_CMD_GET_HBUF_AV_SPLIT     0x97
+#define SDVO_CMD_SET_HBUF_DATA         0x98
+#define SDVO_CMD_GET_HBUF_DATA         0x99
+#define SDVO_CMD_SET_HBUF_TXRATE       0x9a
+#define SDVO_CMD_GET_HBUF_TXRATE       0x9b
+  #define SDVO_HBUF_TX_DISABLED        (0 << 6)
+  #define SDVO_HBUF_TX_ONCE    (2 << 6)
+  #define SDVO_HBUF_TX_VSYNC   (3 << 6)
+#define SDVO_CMD_GET_AUDIO_TX_INFO     0x9c
+#define SDVO_NEED_TO_STALL  (1 << 7)
+
+struct psb_intel_sdvo_encode {
+    u8 dvi_rev;
+    u8 hdmi_rev;
+} __attribute__ ((packed));
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
new file mode 100644 (file)
index 0000000..7be802b
--- /dev/null
@@ -0,0 +1,564 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ */
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+
+/*
+ * inline functions
+ */
+
+static inline u32
+psb_pipestat(int pipe)
+{
+       if (pipe == 0)
+               return PIPEASTAT;
+       if (pipe == 1)
+               return PIPEBSTAT;
+       if (pipe == 2)
+               return PIPECSTAT;
+       BUG();
+}
+
+static inline u32
+mid_pipe_event(int pipe)
+{
+       if (pipe == 0)
+               return _PSB_PIPEA_EVENT_FLAG;
+       if (pipe == 1)
+               return _MDFLD_PIPEB_EVENT_FLAG;
+       if (pipe == 2)
+               return _MDFLD_PIPEC_EVENT_FLAG;
+       BUG();
+}
+
+static inline u32
+mid_pipe_vsync(int pipe)
+{
+       if (pipe == 0)
+               return _PSB_VSYNC_PIPEA_FLAG;
+       if (pipe == 1)
+               return _PSB_VSYNC_PIPEB_FLAG;
+       if (pipe == 2)
+               return _MDFLD_PIPEC_VBLANK_FLAG;
+       BUG();
+}
+
+static inline u32
+mid_pipeconf(int pipe)
+{
+       if (pipe == 0)
+               return PIPEACONF;
+       if (pipe == 1)
+               return PIPEBCONF;
+       if (pipe == 2)
+               return PIPECCONF;
+       BUG();
+}
+
+void
+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+{
+       if ((dev_priv->pipestat[pipe] & mask) != mask) {
+               u32 reg = psb_pipestat(pipe);
+               dev_priv->pipestat[pipe] |= mask;
+               /* Enable the interrupt, clear any pending status */
+               if (gma_power_begin(dev_priv->dev, false)) {
+                       u32 writeVal = PSB_RVDC32(reg);
+                       writeVal |= (mask | (mask >> 16));
+                       PSB_WVDC32(writeVal, reg);
+                       (void) PSB_RVDC32(reg);
+                       gma_power_end(dev_priv->dev);
+               }
+       }
+}
+
+void
+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+{
+       if ((dev_priv->pipestat[pipe] & mask) != 0) {
+               u32 reg = psb_pipestat(pipe);
+               dev_priv->pipestat[pipe] &= ~mask;
+               if (gma_power_begin(dev_priv->dev, false)) {
+                       u32 writeVal = PSB_RVDC32(reg);
+                       writeVal &= ~mask;
+                       PSB_WVDC32(writeVal, reg);
+                       (void) PSB_RVDC32(reg);
+                       gma_power_end(dev_priv->dev);
+               }
+       }
+}
+
+void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+{
+       if (gma_power_begin(dev_priv->dev, false)) {
+               u32 pipe_event = mid_pipe_event(pipe);
+               dev_priv->vdc_irq_mask |= pipe_event;
+               PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+               PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+               gma_power_end(dev_priv->dev);
+       }
+}
+
+void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+{
+       if (dev_priv->pipestat[pipe] == 0) {
+               if (gma_power_begin(dev_priv->dev, false)) {
+                       u32 pipe_event = mid_pipe_event(pipe);
+                       dev_priv->vdc_irq_mask &= ~pipe_event;
+                       PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+                       PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+                       gma_power_end(dev_priv->dev);
+               }
+       }
+}
+
+/**
+ * Display controller interrupt handler for pipe event.
+ *
+ */
+static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *) dev->dev_private;
+
+       uint32_t pipe_stat_val = 0;
+       uint32_t pipe_stat_reg = psb_pipestat(pipe);
+       uint32_t pipe_enable = dev_priv->pipestat[pipe];
+       uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
+       uint32_t pipe_clear;
+       uint32_t i = 0;
+
+       spin_lock(&dev_priv->irqmask_lock);
+
+       pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
+       pipe_stat_val &= pipe_enable | pipe_status;
+       pipe_stat_val &= pipe_stat_val >> 16;
+
+       spin_unlock(&dev_priv->irqmask_lock);
+
+       /* Clear the 2nd level interrupt status bits
+        * Sometimes the bits are very sticky so we repeat until they unstick */
+       for (i = 0; i < 0xffff; i++) {
+               PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
+               pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
+
+               if (pipe_clear == 0)
+                       break;
+       }
+
+       if (pipe_clear)
+               dev_err(dev->dev,
+               "%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
+               __func__, pipe, PSB_RVDC32(pipe_stat_reg));
+
+       if (pipe_stat_val & PIPE_VBLANK_STATUS)
+               drm_handle_vblank(dev, pipe);
+
+       if (pipe_stat_val & PIPE_TE_STATUS)
+               drm_handle_vblank(dev, pipe);
+}
+
+/*
+ * Display controller interrupt handler.
+ */
+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
+{
+       if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
+               mid_pipe_event_handler(dev, 0);
+
+       if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
+               mid_pipe_event_handler(dev, 1);
+}
+
+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
+{
+       struct drm_device *dev = (struct drm_device *) arg;
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *) dev->dev_private;
+
+       uint32_t vdc_stat, dsp_int = 0, sgx_int = 0;
+       int handled = 0;
+
+       spin_lock(&dev_priv->irqmask_lock);
+
+       vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
+
+       if (vdc_stat & _PSB_PIPE_EVENT_FLAG)
+               dsp_int = 1;
+
+       /* FIXME: Handle Medfield
+       if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG)
+               dsp_int = 1;
+       */
+
+       if (vdc_stat & _PSB_IRQ_SGX_FLAG)
+               sgx_int = 1;
+
+       vdc_stat &= dev_priv->vdc_irq_mask;
+       spin_unlock(&dev_priv->irqmask_lock);
+
+       if (dsp_int && gma_power_is_on(dev)) {
+               psb_vdc_interrupt(dev, vdc_stat);
+               handled = 1;
+       }
+
+       if (sgx_int) {
+               /* Not expected - we have it masked, shut it up */
+               u32 s, s2;
+               s = PSB_RSGX32(PSB_CR_EVENT_STATUS);
+               s2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
+               PSB_WSGX32(s, PSB_CR_EVENT_HOST_CLEAR);
+               PSB_WSGX32(s2, PSB_CR_EVENT_HOST_CLEAR2);
+               /* if s & _PSB_CE_TWOD_COMPLETE we have 2D done but
+                  we may as well poll even if we add that ! */
+               handled = 1;
+       }
+
+       PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
+       (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
+       DRM_READMEMORYBARRIER();
+
+       if (!handled)
+               return IRQ_NONE;
+
+       return IRQ_HANDLED;
+}
+
+void psb_irq_preinstall(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+       if (gma_power_is_on(dev))
+               PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+       if (dev->vblank_enabled[0])
+               dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
+       if (dev->vblank_enabled[1])
+               dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+
+       /* FIXME: Handle Medfield irq mask
+       if (dev->vblank_enabled[1])
+               dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
+       if (dev->vblank_enabled[2])
+               dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
+       */
+
+       /* This register is safe even if display island is off */
+       PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+int psb_irq_postinstall(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+       /* This register is safe even if display island is off */
+       PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+       PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+
+       if (dev->vblank_enabled[0])
+               psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+       else
+               psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+       if (dev->vblank_enabled[1])
+               psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+       else
+               psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+       if (dev->vblank_enabled[2])
+               psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+       else
+               psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+       return 0;
+}
+
+void psb_irq_uninstall(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+       PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+
+       if (dev->vblank_enabled[0])
+               psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+       if (dev->vblank_enabled[1])
+               psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+       if (dev->vblank_enabled[2])
+               psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+       dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
+                                 _PSB_IRQ_MSVDX_FLAG |
+                                 _LNC_IRQ_TOPAZ_FLAG;
+
+       /* These two registers are safe even if display island is off */
+       PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+       PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+
+       wmb();
+
+       /* This register is safe even if display island is off */
+       PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+void psb_irq_turn_on_dpst(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+               (struct drm_psb_private *) dev->dev_private;
+       u32 hist_reg;
+       u32 pwm_reg;
+
+       if (gma_power_begin(dev, false)) {
+               PSB_WVDC32(1 << 31, HISTOGRAM_LOGIC_CONTROL);
+               hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+               PSB_WVDC32(1 << 31, HISTOGRAM_INT_CONTROL);
+               hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+
+               PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
+               pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+               PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE
+                                               | PWM_PHASEIN_INT_ENABLE,
+                                                          PWM_CONTROL_LOGIC);
+               pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+               psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+
+               hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+               PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,
+                                                       HISTOGRAM_INT_CONTROL);
+               pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+               PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
+                                                       PWM_CONTROL_LOGIC);
+
+               gma_power_end(dev);
+       }
+}
+
+int psb_irq_enable_dpst(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+               (struct drm_psb_private *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+       /* enable DPST */
+       mid_enable_pipe_event(dev_priv, 0);
+       psb_irq_turn_on_dpst(dev);
+
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+       return 0;
+}
+
+void psb_irq_turn_off_dpst(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *) dev->dev_private;
+       u32 hist_reg;
+       u32 pwm_reg;
+
+       if (gma_power_begin(dev, false)) {
+               PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
+               hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+
+               psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+
+               pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+               PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE),
+                                                       PWM_CONTROL_LOGIC);
+               pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+               gma_power_end(dev);
+       }
+}
+
+int psb_irq_disable_dpst(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+       mid_disable_pipe_event(dev_priv, 0);
+       psb_irq_turn_off_dpst(dev);
+
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+       return 0;
+}
+
+#ifdef PSB_FIXME
+static int psb_vblank_do_wait(struct drm_device *dev,
+                             unsigned int *sequence, atomic_t *counter)
+{
+       unsigned int cur_vblank;
+       int ret = 0;
+       DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+                   (((cur_vblank = atomic_read(counter))
+                     - *sequence) <= (1 << 23)));
+       *sequence = cur_vblank;
+
+       return ret;
+}
+#endif
+
+/*
+ * It is used to enable VBLANK interrupt
+ */
+int psb_enable_vblank(struct drm_device *dev, int pipe)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long irqflags;
+       uint32_t reg_val = 0;
+       uint32_t pipeconf_reg = mid_pipeconf(pipe);
+
+       if (gma_power_begin(dev, false)) {
+               reg_val = REG_READ(pipeconf_reg);
+               gma_power_end(dev);
+       }
+
+       if (!(reg_val & PIPEACONF_ENABLE))
+               return -EINVAL;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+       if (pipe == 0)
+               dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
+       else if (pipe == 1)
+               dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+
+       PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+       PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+       psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+       return 0;
+}
+
+/*
+ * It is used to disable VBLANK interrupt
+ */
+void psb_disable_vblank(struct drm_device *dev, int pipe)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+       if (pipe == 0)
+               dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
+       else if (pipe == 1)
+               dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
+
+       PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+       PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+       psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+/* Called from drm generic code, passed a 'crtc', which
+ * we use as a pipe index
+ */
+u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+       uint32_t high_frame = PIPEAFRAMEHIGH;
+       uint32_t low_frame = PIPEAFRAMEPIXEL;
+       uint32_t pipeconf_reg = PIPEACONF;
+       uint32_t reg_val = 0;
+       uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
+
+       switch (pipe) {
+       case 0:
+               break;
+       case 1:
+               high_frame = PIPEBFRAMEHIGH;
+               low_frame = PIPEBFRAMEPIXEL;
+               pipeconf_reg = PIPEBCONF;
+               break;
+       case 2:
+               high_frame = PIPECFRAMEHIGH;
+               low_frame = PIPECFRAMEPIXEL;
+               pipeconf_reg = PIPECCONF;
+               break;
+       default:
+               dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
+               return 0;
+       }
+
+       if (!gma_power_begin(dev, false))
+               return 0;
+
+       reg_val = REG_READ(pipeconf_reg);
+
+       if (!(reg_val & PIPEACONF_ENABLE)) {
+               dev_err(dev->dev, "trying to get vblank count for disabled pipe %d\n",
+                                                               pipe);
+               goto psb_get_vblank_counter_exit;
+       }
+
+       /*
+        * High & low register fields aren't synchronized, so make sure
+        * we get a low value that's stable across two reads of the high
+        * register.
+        */
+       do {
+               high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+                        PIPE_FRAME_HIGH_SHIFT);
+               low =  ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
+                       PIPE_FRAME_LOW_SHIFT);
+               high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+                        PIPE_FRAME_HIGH_SHIFT);
+       } while (high1 != high2);
+
+       count = (high1 << 8) | low;
+
+psb_get_vblank_counter_exit:
+
+       gma_power_end(dev);
+
+       return count;
+}
+
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
new file mode 100644 (file)
index 0000000..216fda3
--- /dev/null
@@ -0,0 +1,45 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Benjamin Defnet <benjamin.r.defnet@intel.com>
+ *    Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _SYSIRQ_H_
+#define _SYSIRQ_H_
+
+#include <drm/drmP.h>
+
+bool sysirq_init(struct drm_device *dev);
+void sysirq_uninit(struct drm_device *dev);
+
+void psb_irq_preinstall(struct drm_device *dev);
+int  psb_irq_postinstall(struct drm_device *dev);
+void psb_irq_uninstall(struct drm_device *dev);
+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+
+int psb_irq_enable_dpst(struct drm_device *dev);
+int psb_irq_disable_dpst(struct drm_device *dev);
+void psb_irq_turn_on_dpst(struct drm_device *dev);
+void psb_irq_turn_off_dpst(struct drm_device *dev);
+int  psb_enable_vblank(struct drm_device *dev, int pipe);
+void psb_disable_vblank(struct drm_device *dev, int pipe);
+u32  psb_get_vblank_counter(struct drm_device *dev, int pipe);
+
+#endif /* _SYSIRQ_H_ */
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
new file mode 100644 (file)
index 0000000..b867aab
--- /dev/null
@@ -0,0 +1,88 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <linux/spinlock.h>
+
+static void psb_lid_timer_func(unsigned long data)
+{
+       struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
+       struct drm_device *dev = (struct drm_device *)dev_priv->dev;
+       struct timer_list *lid_timer = &dev_priv->lid_timer;
+       unsigned long irq_flags;
+       u32 *lid_state = dev_priv->lid_state;
+       u32 pp_status;
+
+       if (readl(lid_state) == dev_priv->lid_last_state)
+               goto lid_timer_schedule;
+
+       if ((readl(lid_state)) & 0x01) {
+               /*lid state is open*/
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while ((pp_status & PP_ON) == 0);
+
+               /*FIXME: should be backlight level before*/
+               psb_intel_lvds_set_brightness(dev, 100);
+       } else {
+               psb_intel_lvds_set_brightness(dev, 0);
+
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while ((pp_status & PP_ON) == 0);
+       }
+       dev_priv->lid_last_state =  readl(lid_state);
+
+lid_timer_schedule:
+       spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
+       if (!timer_pending(lid_timer)) {
+               lid_timer->expires = jiffies + PSB_LID_DELAY;
+               add_timer(lid_timer);
+       }
+       spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
+}
+
+void psb_lid_timer_init(struct drm_psb_private *dev_priv)
+{
+       struct timer_list *lid_timer = &dev_priv->lid_timer;
+       unsigned long irq_flags;
+
+       spin_lock_init(&dev_priv->lid_lock);
+       spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
+
+       init_timer(lid_timer);
+
+       lid_timer->data = (unsigned long)dev_priv;
+       lid_timer->function = psb_lid_timer_func;
+       lid_timer->expires = jiffies + PSB_LID_DELAY;
+
+       add_timer(lid_timer);
+       spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
+}
+
+void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
+{
+       del_timer_sync(&dev_priv->lid_timer);
+}
+
diff --git a/drivers/gpu/drm/gma500/psb_reg.h b/drivers/gpu/drm/gma500/psb_reg.h
new file mode 100644 (file)
index 0000000..b81c7c1
--- /dev/null
@@ -0,0 +1,582 @@
+/**************************************************************************
+ *
+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA..
+ *
+ **************************************************************************/
+
+#ifndef _PSB_REG_H_
+#define _PSB_REG_H_
+
+#define PSB_CR_CLKGATECTL              0x0000
+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG         (1 << 24)
+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT       (20)
+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK                (0x3 << 20)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT       (16)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK                (0x3 << 16)
+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT                (12)
+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK         (0x3 << 12)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT       (8)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK                (0x3 << 8)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT       (4)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK                (0x3 << 4)
+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT                (0)
+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK         (0x3 << 0)
+#define _PSB_C_CLKGATECTL_CLKG_ENABLED         (0)
+#define _PSB_C_CLKGATECTL_CLKG_DISABLED                (1)
+#define _PSB_C_CLKGATECTL_CLKG_AUTO            (2)
+
+#define PSB_CR_CORE_ID                 0x0010
+#define _PSB_CC_ID_ID_SHIFT                    (16)
+#define _PSB_CC_ID_ID_MASK                     (0xFFFF << 16)
+#define _PSB_CC_ID_CONFIG_SHIFT                        (0)
+#define _PSB_CC_ID_CONFIG_MASK                 (0xFFFF << 0)
+
+#define PSB_CR_CORE_REVISION           0x0014
+#define _PSB_CC_REVISION_DESIGNER_SHIFT                (24)
+#define _PSB_CC_REVISION_DESIGNER_MASK         (0xFF << 24)
+#define _PSB_CC_REVISION_MAJOR_SHIFT           (16)
+#define _PSB_CC_REVISION_MAJOR_MASK            (0xFF << 16)
+#define _PSB_CC_REVISION_MINOR_SHIFT           (8)
+#define _PSB_CC_REVISION_MINOR_MASK            (0xFF << 8)
+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT     (0)
+#define _PSB_CC_REVISION_MAINTENANCE_MASK      (0xFF << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD1     0x0018
+
+#define PSB_CR_SOFT_RESET              0x0080
+#define _PSB_CS_RESET_TSP_RESET                (1 << 6)
+#define _PSB_CS_RESET_ISP_RESET                (1 << 5)
+#define _PSB_CS_RESET_USE_RESET                (1 << 4)
+#define _PSB_CS_RESET_TA_RESET         (1 << 3)
+#define _PSB_CS_RESET_DPM_RESET                (1 << 2)
+#define _PSB_CS_RESET_TWOD_RESET       (1 << 1)
+#define _PSB_CS_RESET_BIF_RESET                        (1 << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD2     0x001C
+
+#define PSB_CR_EVENT_HOST_ENABLE2      0x0110
+
+#define PSB_CR_EVENT_STATUS2           0x0118
+
+#define PSB_CR_EVENT_HOST_CLEAR2       0x0114
+#define _PSB_CE2_BIF_REQUESTER_FAULT           (1 << 4)
+
+#define PSB_CR_EVENT_STATUS            0x012C
+
+#define PSB_CR_EVENT_HOST_ENABLE       0x0130
+
+#define PSB_CR_EVENT_HOST_CLEAR                0x0134
+#define _PSB_CE_MASTER_INTERRUPT               (1 << 31)
+#define _PSB_CE_TA_DPM_FAULT                   (1 << 28)
+#define _PSB_CE_TWOD_COMPLETE                  (1 << 27)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS          (1 << 25)
+#define _PSB_CE_DPM_TA_MEM_FREE                        (1 << 24)
+#define _PSB_CE_PIXELBE_END_RENDER             (1 << 18)
+#define _PSB_CE_SW_EVENT                       (1 << 14)
+#define _PSB_CE_TA_FINISHED                    (1 << 13)
+#define _PSB_CE_TA_TERMINATE                   (1 << 12)
+#define _PSB_CE_DPM_REACHED_MEM_THRESH         (1 << 3)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL          (1 << 2)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT           (1 << 1)
+#define _PSB_CE_DPM_3D_MEM_FREE                        (1 << 0)
+
+
+#define PSB_USE_OFFSET_MASK            0x0007FFFF
+#define PSB_USE_OFFSET_SIZE            (PSB_USE_OFFSET_MASK + 1)
+#define PSB_CR_USE_CODE_BASE0          0x0A0C
+#define PSB_CR_USE_CODE_BASE1          0x0A10
+#define PSB_CR_USE_CODE_BASE2          0x0A14
+#define PSB_CR_USE_CODE_BASE3          0x0A18
+#define PSB_CR_USE_CODE_BASE4          0x0A1C
+#define PSB_CR_USE_CODE_BASE5          0x0A20
+#define PSB_CR_USE_CODE_BASE6          0x0A24
+#define PSB_CR_USE_CODE_BASE7          0x0A28
+#define PSB_CR_USE_CODE_BASE8          0x0A2C
+#define PSB_CR_USE_CODE_BASE9          0x0A30
+#define PSB_CR_USE_CODE_BASE10         0x0A34
+#define PSB_CR_USE_CODE_BASE11         0x0A38
+#define PSB_CR_USE_CODE_BASE12         0x0A3C
+#define PSB_CR_USE_CODE_BASE13         0x0A40
+#define PSB_CR_USE_CODE_BASE14         0x0A44
+#define PSB_CR_USE_CODE_BASE15         0x0A48
+#define PSB_CR_USE_CODE_BASE(_i)       (0x0A0C + ((_i) << 2))
+#define _PSB_CUC_BASE_DM_SHIFT                 (25)
+#define _PSB_CUC_BASE_DM_MASK                  (0x3 << 25)
+#define _PSB_CUC_BASE_ADDR_SHIFT               (0)     /* 1024-bit aligned address? */
+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT          (7)
+#define _PSB_CUC_BASE_ADDR_MASK                        (0x1FFFFFF << 0)
+#define _PSB_CUC_DM_VERTEX                     (0)
+#define _PSB_CUC_DM_PIXEL                      (1)
+#define _PSB_CUC_DM_RESERVED                   (2)
+#define _PSB_CUC_DM_EDM                                (3)
+
+#define PSB_CR_PDS_EXEC_BASE           0x0AB8
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT       (20)    /* 1MB aligned address */
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT  (20)
+
+#define PSB_CR_EVENT_KICKER            0x0AC4
+#define _PSB_CE_KICKER_ADDRESS_SHIFT           (4)     /* 128-bit aligned address */
+
+#define PSB_CR_EVENT_KICK              0x0AC8
+#define _PSB_CE_KICK_NOW                       (1 << 0)
+
+#define PSB_CR_BIF_DIR_LIST_BASE1      0x0C38
+
+#define PSB_CR_BIF_CTRL                        0x0C00
+#define _PSB_CB_CTRL_CLEAR_FAULT               (1 << 4)
+#define _PSB_CB_CTRL_INVALDC                   (1 << 3)
+#define _PSB_CB_CTRL_FLUSH                     (1 << 2)
+
+#define PSB_CR_BIF_INT_STAT            0x0C04
+
+#define PSB_CR_BIF_FAULT               0x0C08
+#define _PSB_CBI_STAT_PF_N_RW                  (1 << 14)
+#define _PSB_CBI_STAT_FAULT_SHIFT              (0)
+#define _PSB_CBI_STAT_FAULT_MASK               (0x3FFF << 0)
+#define _PSB_CBI_STAT_FAULT_CACHE              (1 << 1)
+#define _PSB_CBI_STAT_FAULT_TA                 (1 << 2)
+#define _PSB_CBI_STAT_FAULT_VDM                        (1 << 3)
+#define _PSB_CBI_STAT_FAULT_2D                 (1 << 4)
+#define _PSB_CBI_STAT_FAULT_PBE                        (1 << 5)
+#define _PSB_CBI_STAT_FAULT_TSP                        (1 << 6)
+#define _PSB_CBI_STAT_FAULT_ISP                        (1 << 7)
+#define _PSB_CBI_STAT_FAULT_USSEPDS            (1 << 8)
+#define _PSB_CBI_STAT_FAULT_HOST               (1 << 9)
+
+#define PSB_CR_BIF_BANK0               0x0C78
+#define PSB_CR_BIF_BANK1               0x0C7C
+#define PSB_CR_BIF_DIR_LIST_BASE0      0x0C84
+#define PSB_CR_BIF_TWOD_REQ_BASE       0x0C88
+#define PSB_CR_BIF_3D_REQ_BASE         0x0CAC
+
+#define PSB_CR_2D_SOCIF                        0x0E18
+#define _PSB_C2_SOCIF_FREESPACE_SHIFT          (0)
+#define _PSB_C2_SOCIF_FREESPACE_MASK           (0xFF << 0)
+#define _PSB_C2_SOCIF_EMPTY                    (0x80 << 0)
+
+#define PSB_CR_2D_BLIT_STATUS          0x0E04
+#define _PSB_C2B_STATUS_BUSY                   (1 << 24)
+#define _PSB_C2B_STATUS_COMPLETE_SHIFT         (0)
+#define _PSB_C2B_STATUS_COMPLETE_MASK          (0xFFFFFF << 0)
+
+/*
+ * 2D defs.
+ */
+
+/*
+ * 2D Slave Port Data : Block Header's Object Type
+ */
+
+#define        PSB_2D_CLIP_BH                  (0x00000000)
+#define        PSB_2D_PAT_BH                   (0x10000000)
+#define        PSB_2D_CTRL_BH                  (0x20000000)
+#define        PSB_2D_SRC_OFF_BH               (0x30000000)
+#define        PSB_2D_MASK_OFF_BH              (0x40000000)
+#define        PSB_2D_RESERVED1_BH             (0x50000000)
+#define        PSB_2D_RESERVED2_BH             (0x60000000)
+#define        PSB_2D_FENCE_BH                 (0x70000000)
+#define        PSB_2D_BLIT_BH                  (0x80000000)
+#define        PSB_2D_SRC_SURF_BH              (0x90000000)
+#define        PSB_2D_DST_SURF_BH              (0xA0000000)
+#define        PSB_2D_PAT_SURF_BH              (0xB0000000)
+#define        PSB_2D_SRC_PAL_BH               (0xC0000000)
+#define        PSB_2D_PAT_PAL_BH               (0xD0000000)
+#define        PSB_2D_MASK_SURF_BH             (0xE0000000)
+#define        PSB_2D_FLUSH_BH                 (0xF0000000)
+
+/*
+ * Clip Definition block (PSB_2D_CLIP_BH)
+ */
+#define PSB_2D_CLIPCOUNT_MAX           (1)
+#define PSB_2D_CLIPCOUNT_MASK          (0x00000000)
+#define PSB_2D_CLIPCOUNT_CLRMASK       (0xFFFFFFFF)
+#define PSB_2D_CLIPCOUNT_SHIFT         (0)
+/* clip rectangle min & max */
+#define PSB_2D_CLIP_XMAX_MASK          (0x00FFF000)
+#define PSB_2D_CLIP_XMAX_CLRMASK       (0xFF000FFF)
+#define PSB_2D_CLIP_XMAX_SHIFT         (12)
+#define PSB_2D_CLIP_XMIN_MASK          (0x00000FFF)
+#define PSB_2D_CLIP_XMIN_CLRMASK       (0x00FFF000)
+#define PSB_2D_CLIP_XMIN_SHIFT         (0)
+/* clip rectangle offset */
+#define PSB_2D_CLIP_YMAX_MASK          (0x00FFF000)
+#define PSB_2D_CLIP_YMAX_CLRMASK       (0xFF000FFF)
+#define PSB_2D_CLIP_YMAX_SHIFT         (12)
+#define PSB_2D_CLIP_YMIN_MASK          (0x00000FFF)
+#define PSB_2D_CLIP_YMIN_CLRMASK       (0x00FFF000)
+#define PSB_2D_CLIP_YMIN_SHIFT         (0)
+
+/*
+ * Pattern Control (PSB_2D_PAT_BH)
+ */
+#define PSB_2D_PAT_HEIGHT_MASK         (0x0000001F)
+#define PSB_2D_PAT_HEIGHT_SHIFT                (0)
+#define PSB_2D_PAT_WIDTH_MASK          (0x000003E0)
+#define PSB_2D_PAT_WIDTH_SHIFT         (5)
+#define PSB_2D_PAT_YSTART_MASK         (0x00007C00)
+#define PSB_2D_PAT_YSTART_SHIFT                (10)
+#define PSB_2D_PAT_XSTART_MASK         (0x000F8000)
+#define PSB_2D_PAT_XSTART_SHIFT                (15)
+
+/*
+ * 2D Control block (PSB_2D_CTRL_BH)
+ */
+/* Present Flags */
+#define PSB_2D_SRCCK_CTRL              (0x00000001)
+#define PSB_2D_DSTCK_CTRL              (0x00000002)
+#define PSB_2D_ALPHA_CTRL              (0x00000004)
+/* Colour Key Colour (SRC/DST)*/
+#define PSB_2D_CK_COL_MASK             (0xFFFFFFFF)
+#define PSB_2D_CK_COL_CLRMASK          (0x00000000)
+#define PSB_2D_CK_COL_SHIFT            (0)
+/* Colour Key Mask (SRC/DST)*/
+#define PSB_2D_CK_MASK_MASK            (0xFFFFFFFF)
+#define PSB_2D_CK_MASK_CLRMASK         (0x00000000)
+#define PSB_2D_CK_MASK_SHIFT           (0)
+/* Alpha Control (Alpha/RGB)*/
+#define PSB_2D_GBLALPHA_MASK           (0x000FF000)
+#define PSB_2D_GBLALPHA_CLRMASK                (0xFFF00FFF)
+#define PSB_2D_GBLALPHA_SHIFT          (12)
+#define PSB_2D_SRCALPHA_OP_MASK                (0x00700000)
+#define PSB_2D_SRCALPHA_OP_CLRMASK     (0xFF8FFFFF)
+#define PSB_2D_SRCALPHA_OP_SHIFT       (20)
+#define PSB_2D_SRCALPHA_OP_ONE         (0x00000000)
+#define PSB_2D_SRCALPHA_OP_SRC         (0x00100000)
+#define PSB_2D_SRCALPHA_OP_DST         (0x00200000)
+#define PSB_2D_SRCALPHA_OP_SG          (0x00300000)
+#define PSB_2D_SRCALPHA_OP_DG          (0x00400000)
+#define PSB_2D_SRCALPHA_OP_GBL         (0x00500000)
+#define PSB_2D_SRCALPHA_OP_ZERO                (0x00600000)
+#define PSB_2D_SRCALPHA_INVERT         (0x00800000)
+#define PSB_2D_SRCALPHA_INVERT_CLR     (0xFF7FFFFF)
+#define PSB_2D_DSTALPHA_OP_MASK                (0x07000000)
+#define PSB_2D_DSTALPHA_OP_CLRMASK     (0xF8FFFFFF)
+#define PSB_2D_DSTALPHA_OP_SHIFT       (24)
+#define PSB_2D_DSTALPHA_OP_ONE         (0x00000000)
+#define PSB_2D_DSTALPHA_OP_SRC         (0x01000000)
+#define PSB_2D_DSTALPHA_OP_DST         (0x02000000)
+#define PSB_2D_DSTALPHA_OP_SG          (0x03000000)
+#define PSB_2D_DSTALPHA_OP_DG          (0x04000000)
+#define PSB_2D_DSTALPHA_OP_GBL         (0x05000000)
+#define PSB_2D_DSTALPHA_OP_ZERO                (0x06000000)
+#define PSB_2D_DSTALPHA_INVERT         (0x08000000)
+#define PSB_2D_DSTALPHA_INVERT_CLR     (0xF7FFFFFF)
+
+#define PSB_2D_PRE_MULTIPLICATION_ENABLE       (0x10000000)
+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK      (0xEFFFFFFF)
+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE                (0x20000000)
+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK       (0xDFFFFFFF)
+
+/*
+ *Source Offset (PSB_2D_SRC_OFF_BH)
+ */
+#define PSB_2D_SRCOFF_XSTART_MASK      ((0x00000FFF) << 12)
+#define PSB_2D_SRCOFF_XSTART_SHIFT     (12)
+#define PSB_2D_SRCOFF_YSTART_MASK      (0x00000FFF)
+#define PSB_2D_SRCOFF_YSTART_SHIFT     (0)
+
+/*
+ * Mask Offset (PSB_2D_MASK_OFF_BH)
+ */
+#define PSB_2D_MASKOFF_XSTART_MASK     ((0x00000FFF) << 12)
+#define PSB_2D_MASKOFF_XSTART_SHIFT    (12)
+#define PSB_2D_MASKOFF_YSTART_MASK     (0x00000FFF)
+#define PSB_2D_MASKOFF_YSTART_SHIFT    (0)
+
+/*
+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
+ */
+
+/*
+ *Blit Rectangle (PSB_2D_BLIT_BH)
+ */
+
+#define PSB_2D_ROT_MASK                        (3 << 25)
+#define PSB_2D_ROT_CLRMASK             (~PSB_2D_ROT_MASK)
+#define PSB_2D_ROT_NONE                        (0 << 25)
+#define PSB_2D_ROT_90DEGS              (1 << 25)
+#define PSB_2D_ROT_180DEGS             (2 << 25)
+#define PSB_2D_ROT_270DEGS             (3 << 25)
+
+#define PSB_2D_COPYORDER_MASK          (3 << 23)
+#define PSB_2D_COPYORDER_CLRMASK       (~PSB_2D_COPYORDER_MASK)
+#define PSB_2D_COPYORDER_TL2BR         (0 << 23)
+#define PSB_2D_COPYORDER_BR2TL         (1 << 23)
+#define PSB_2D_COPYORDER_TR2BL         (2 << 23)
+#define PSB_2D_COPYORDER_BL2TR         (3 << 23)
+
+#define PSB_2D_DSTCK_CLRMASK           (0xFF9FFFFF)
+#define PSB_2D_DSTCK_DISABLE           (0x00000000)
+#define PSB_2D_DSTCK_PASS              (0x00200000)
+#define PSB_2D_DSTCK_REJECT            (0x00400000)
+
+#define PSB_2D_SRCCK_CLRMASK           (0xFFE7FFFF)
+#define PSB_2D_SRCCK_DISABLE           (0x00000000)
+#define PSB_2D_SRCCK_PASS              (0x00080000)
+#define PSB_2D_SRCCK_REJECT            (0x00100000)
+
+#define PSB_2D_CLIP_ENABLE             (0x00040000)
+
+#define PSB_2D_ALPHA_ENABLE            (0x00020000)
+
+#define PSB_2D_PAT_CLRMASK             (0xFFFEFFFF)
+#define PSB_2D_PAT_MASK                        (0x00010000)
+#define PSB_2D_USE_PAT                 (0x00010000)
+#define PSB_2D_USE_FILL                        (0x00000000)
+/*
+ * Tungsten Graphics note on rop codes: If rop A and rop B are
+ * identical, the mask surface will not be read and need not be
+ * set up.
+ */
+
+#define PSB_2D_ROP3B_MASK              (0x0000FF00)
+#define PSB_2D_ROP3B_CLRMASK           (0xFFFF00FF)
+#define PSB_2D_ROP3B_SHIFT             (8)
+/* rop code A */
+#define PSB_2D_ROP3A_MASK              (0x000000FF)
+#define PSB_2D_ROP3A_CLRMASK           (0xFFFFFF00)
+#define PSB_2D_ROP3A_SHIFT             (0)
+
+#define PSB_2D_ROP4_MASK               (0x0000FFFF)
+/*
+ *     DWORD0: (Only pass if Pattern control == Use Fill Colour)
+ *     Fill Colour RGBA8888
+ */
+#define PSB_2D_FILLCOLOUR_MASK         (0xFFFFFFFF)
+#define PSB_2D_FILLCOLOUR_SHIFT                (0)
+/*
+ *     DWORD1: (Always Present)
+ *     X Start (Dest)
+ *     Y Start (Dest)
+ */
+#define PSB_2D_DST_XSTART_MASK         (0x00FFF000)
+#define PSB_2D_DST_XSTART_CLRMASK      (0xFF000FFF)
+#define PSB_2D_DST_XSTART_SHIFT                (12)
+#define PSB_2D_DST_YSTART_MASK         (0x00000FFF)
+#define PSB_2D_DST_YSTART_CLRMASK      (0xFFFFF000)
+#define PSB_2D_DST_YSTART_SHIFT                (0)
+/*
+ *     DWORD2: (Always Present)
+ *     X Size (Dest)
+ *     Y Size (Dest)
+ */
+#define PSB_2D_DST_XSIZE_MASK          (0x00FFF000)
+#define PSB_2D_DST_XSIZE_CLRMASK       (0xFF000FFF)
+#define PSB_2D_DST_XSIZE_SHIFT         (12)
+#define PSB_2D_DST_YSIZE_MASK          (0x00000FFF)
+#define PSB_2D_DST_YSIZE_CLRMASK       (0xFFFFF000)
+#define PSB_2D_DST_YSIZE_SHIFT         (0)
+
+/*
+ * Source Surface (PSB_2D_SRC_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+
+#define PSB_2D_SRC_FORMAT_MASK         (0x00078000)
+#define PSB_2D_SRC_1_PAL               (0x00000000)
+#define PSB_2D_SRC_2_PAL               (0x00008000)
+#define PSB_2D_SRC_4_PAL               (0x00010000)
+#define PSB_2D_SRC_8_PAL               (0x00018000)
+#define PSB_2D_SRC_8_ALPHA             (0x00020000)
+#define PSB_2D_SRC_4_ALPHA             (0x00028000)
+#define PSB_2D_SRC_332RGB              (0x00030000)
+#define PSB_2D_SRC_4444ARGB            (0x00038000)
+#define PSB_2D_SRC_555RGB              (0x00040000)
+#define PSB_2D_SRC_1555ARGB            (0x00048000)
+#define PSB_2D_SRC_565RGB              (0x00050000)
+#define PSB_2D_SRC_0888ARGB            (0x00058000)
+#define PSB_2D_SRC_8888ARGB            (0x00060000)
+#define PSB_2D_SRC_8888UYVY            (0x00068000)
+#define PSB_2D_SRC_RESERVED            (0x00070000)
+#define PSB_2D_SRC_1555ARGB_LOOKUP     (0x00078000)
+
+
+#define PSB_2D_SRC_STRIDE_MASK         (0x00007FFF)
+#define PSB_2D_SRC_STRIDE_CLRMASK      (0xFFFF8000)
+#define PSB_2D_SRC_STRIDE_SHIFT                (0)
+/*
+ *  WORD 1 - Base Address
+ */
+#define PSB_2D_SRC_ADDR_MASK           (0x0FFFFFFC)
+#define PSB_2D_SRC_ADDR_CLRMASK                (0x00000003)
+#define PSB_2D_SRC_ADDR_SHIFT          (2)
+#define PSB_2D_SRC_ADDR_ALIGNSHIFT     (2)
+
+/*
+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
+ */
+/*
+ *  WORD 0
+ */
+
+#define PSB_2D_PAT_FORMAT_MASK         (0x00078000)
+#define PSB_2D_PAT_1_PAL               (0x00000000)
+#define PSB_2D_PAT_2_PAL               (0x00008000)
+#define PSB_2D_PAT_4_PAL               (0x00010000)
+#define PSB_2D_PAT_8_PAL               (0x00018000)
+#define PSB_2D_PAT_8_ALPHA             (0x00020000)
+#define PSB_2D_PAT_4_ALPHA             (0x00028000)
+#define PSB_2D_PAT_332RGB              (0x00030000)
+#define PSB_2D_PAT_4444ARGB            (0x00038000)
+#define PSB_2D_PAT_555RGB              (0x00040000)
+#define PSB_2D_PAT_1555ARGB            (0x00048000)
+#define PSB_2D_PAT_565RGB              (0x00050000)
+#define PSB_2D_PAT_0888ARGB            (0x00058000)
+#define PSB_2D_PAT_8888ARGB            (0x00060000)
+
+#define PSB_2D_PAT_STRIDE_MASK         (0x00007FFF)
+#define PSB_2D_PAT_STRIDE_CLRMASK      (0xFFFF8000)
+#define PSB_2D_PAT_STRIDE_SHIFT                (0)
+/*
+ *  WORD 1 - Base Address
+ */
+#define PSB_2D_PAT_ADDR_MASK           (0x0FFFFFFC)
+#define PSB_2D_PAT_ADDR_CLRMASK                (0x00000003)
+#define PSB_2D_PAT_ADDR_SHIFT          (2)
+#define PSB_2D_PAT_ADDR_ALIGNSHIFT     (2)
+
+/*
+ * Destination Surface (PSB_2D_DST_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+
+#define PSB_2D_DST_FORMAT_MASK         (0x00078000)
+#define PSB_2D_DST_332RGB              (0x00030000)
+#define PSB_2D_DST_4444ARGB            (0x00038000)
+#define PSB_2D_DST_555RGB              (0x00040000)
+#define PSB_2D_DST_1555ARGB            (0x00048000)
+#define PSB_2D_DST_565RGB              (0x00050000)
+#define PSB_2D_DST_0888ARGB            (0x00058000)
+#define PSB_2D_DST_8888ARGB            (0x00060000)
+#define PSB_2D_DST_8888AYUV            (0x00070000)
+
+#define PSB_2D_DST_STRIDE_MASK         (0x00007FFF)
+#define PSB_2D_DST_STRIDE_CLRMASK      (0xFFFF8000)
+#define PSB_2D_DST_STRIDE_SHIFT                (0)
+/*
+ * WORD 1 - Base Address
+ */
+#define PSB_2D_DST_ADDR_MASK           (0x0FFFFFFC)
+#define PSB_2D_DST_ADDR_CLRMASK                (0x00000003)
+#define PSB_2D_DST_ADDR_SHIFT          (2)
+#define PSB_2D_DST_ADDR_ALIGNSHIFT     (2)
+
+/*
+ * Mask Surface (PSB_2D_MASK_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+#define PSB_2D_MASK_STRIDE_MASK                (0x00007FFF)
+#define PSB_2D_MASK_STRIDE_CLRMASK     (0xFFFF8000)
+#define PSB_2D_MASK_STRIDE_SHIFT       (0)
+/*
+ *  WORD 1 - Base Address
+ */
+#define PSB_2D_MASK_ADDR_MASK          (0x0FFFFFFC)
+#define PSB_2D_MASK_ADDR_CLRMASK       (0x00000003)
+#define PSB_2D_MASK_ADDR_SHIFT         (2)
+#define PSB_2D_MASK_ADDR_ALIGNSHIFT    (2)
+
+/*
+ * Source Palette (PSB_2D_SRC_PAL_BH)
+ */
+
+#define PSB_2D_SRCPAL_ADDR_SHIFT       (0)
+#define PSB_2D_SRCPAL_ADDR_CLRMASK     (0xF0000007)
+#define PSB_2D_SRCPAL_ADDR_MASK                (0x0FFFFFF8)
+#define PSB_2D_SRCPAL_BYTEALIGN                (1024)
+
+/*
+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
+ */
+
+#define PSB_2D_PATPAL_ADDR_SHIFT       (0)
+#define PSB_2D_PATPAL_ADDR_CLRMASK     (0xF0000007)
+#define PSB_2D_PATPAL_ADDR_MASK                (0x0FFFFFF8)
+#define PSB_2D_PATPAL_BYTEALIGN                (1024)
+
+/*
+ * Rop3 Codes (2 LS bytes)
+ */
+
+#define PSB_2D_ROP3_SRCCOPY            (0xCCCC)
+#define PSB_2D_ROP3_PATCOPY            (0xF0F0)
+#define PSB_2D_ROP3_WHITENESS          (0xFFFF)
+#define PSB_2D_ROP3_BLACKNESS          (0x0000)
+#define PSB_2D_ROP3_SRC                        (0xCC)
+#define PSB_2D_ROP3_PAT                        (0xF0)
+#define PSB_2D_ROP3_DST                        (0xAA)
+
+/*
+ * Sizes.
+ */
+
+#define PSB_SCENE_HW_COOKIE_SIZE       16
+#define PSB_TA_MEM_HW_COOKIE_SIZE      16
+
+/*
+ * Scene stuff.
+ */
+
+#define PSB_NUM_HW_SCENES              2
+
+/*
+ * Scheduler completion actions.
+ */
+
+#define PSB_RASTER_BLOCK               0
+#define PSB_RASTER                     1
+#define PSB_RETURN                     2
+#define PSB_TA                         3
+
+/* Power management */
+#define PSB_PUNIT_PORT                 0x04
+#define PSB_OSPMBA                     0x78
+#define PSB_APMBA                      0x7a
+#define PSB_APM_CMD                    0x0
+#define PSB_APM_STS                    0x04
+#define PSB_PWRGT_VID_ENC_MASK         0x30
+#define PSB_PWRGT_VID_DEC_MASK         0xc
+#define PSB_PWRGT_GL3_MASK             0xc0
+
+#define PSB_PM_SSC                     0x20
+#define PSB_PM_SSS                     0x30
+#define PSB_PWRGT_DISPLAY_MASK         0xc /*on a different BA than video/gfx*/
+#define MDFLD_PWRGT_DISPLAY_A_CNTR     0x0000000c
+#define MDFLD_PWRGT_DISPLAY_B_CNTR     0x0000c000
+#define MDFLD_PWRGT_DISPLAY_C_CNTR     0x00030000
+#define MDFLD_PWRGT_DISP_MIPI_CNTR     0x000c0000
+#define MDFLD_PWRGT_DISPLAY_CNTR    (MDFLD_PWRGT_DISPLAY_A_CNTR | MDFLD_PWRGT_DISPLAY_B_CNTR | MDFLD_PWRGT_DISPLAY_C_CNTR | MDFLD_PWRGT_DISP_MIPI_CNTR) /* 0x000fc00c */
+/* Display SSS register bits are different in A0 vs. B0 */
+#define PSB_PWRGT_GFX_MASK             0x3
+#define MDFLD_PWRGT_DISPLAY_A_STS      0x000000c0
+#define MDFLD_PWRGT_DISPLAY_B_STS      0x00000300
+#define MDFLD_PWRGT_DISPLAY_C_STS      0x00000c00
+#define PSB_PWRGT_GFX_MASK_B0          0xc3
+#define MDFLD_PWRGT_DISPLAY_A_STS_B0   0x0000000c
+#define MDFLD_PWRGT_DISPLAY_B_STS_B0   0x0000c000
+#define MDFLD_PWRGT_DISPLAY_C_STS_B0   0x00030000
+#define MDFLD_PWRGT_DISP_MIPI_STS      0x000c0000
+#define MDFLD_PWRGT_DISPLAY_STS_A0    (MDFLD_PWRGT_DISPLAY_A_STS | MDFLD_PWRGT_DISPLAY_B_STS | MDFLD_PWRGT_DISPLAY_C_STS | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
+#define MDFLD_PWRGT_DISPLAY_STS_B0    (MDFLD_PWRGT_DISPLAY_A_STS_B0 | MDFLD_PWRGT_DISPLAY_B_STS_B0 | MDFLD_PWRGT_DISPLAY_C_STS_B0 | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
+#endif
index 8f371e8d630ffeeced2b295400d2a7a4f3233b60..f7c17b23983389ae7b978224180dadcd4b6616c3 100644 (file)
@@ -222,8 +222,6 @@ static int i810_dma_cleanup(struct drm_device *dev)
                        pci_free_consistent(dev->pdev, PAGE_SIZE,
                                            dev_priv->hw_status_page,
                                            dev_priv->dma_status_page);
-                       /* Need to rewrite hardware status page */
-                       I810_WRITE(0x02080, 0x1ffff000);
                }
                kfree(dev->dev_private);
                dev->dev_private = NULL;
@@ -888,7 +886,7 @@ static int i810_flush_queue(struct drm_device *dev)
 }
 
 /* Must be called with the lock held */
-static void i810_reclaim_buffers(struct drm_device *dev,
+void i810_driver_reclaim_buffers(struct drm_device *dev,
                                 struct drm_file *file_priv)
 {
        struct drm_device_dma *dma = dev->dma;
@@ -1225,12 +1223,17 @@ void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
                if (dev_priv->page_flipping)
                        i810_do_cleanup_pageflip(dev);
        }
-}
 
-void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
-                                       struct drm_file *file_priv)
-{
-       i810_reclaim_buffers(dev, file_priv);
+       if (file_priv->master && file_priv->master->lock.hw_lock) {
+               drm_idlelock_take(&file_priv->master->lock);
+               i810_driver_reclaim_buffers(dev, file_priv);
+               drm_idlelock_release(&file_priv->master->lock);
+       } else {
+               /* master disappeared, clean up stuff anyway and hope nothing
+                * goes wrong */
+               i810_driver_reclaim_buffers(dev, file_priv);
+       }
+
 }
 
 int i810_driver_dma_quiescent(struct drm_device *dev)
index d4266bdf6fb4eb3d3cb6ac6ea991b0de8a8cd7c9..053f1ee58393a885e6aff3d1b8e9c5de8f7c5cf5 100644 (file)
@@ -43,6 +43,17 @@ static struct pci_device_id pciidlist[] = {
        i810_PCI_IDS
 };
 
+static const struct file_operations i810_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
@@ -52,20 +63,9 @@ static struct drm_driver driver = {
        .lastclose = i810_driver_lastclose,
        .preclose = i810_driver_preclose,
        .device_is_agp = i810_driver_device_is_agp,
-       .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
        .dma_quiescent = i810_driver_dma_quiescent,
        .ioctls = i810_ioctls,
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = drm_ioctl,
-                .mmap = drm_mmap,
-                .poll = drm_poll,
-                .fasync = drm_fasync,
-                .llseek = noop_llseek,
-       },
-
+       .fops = &i810_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
index c9339f48179551dacd84b03bd74bd327d4750d80..6e0acad9e0f556549621e7945a00d7cafd0abbac 100644 (file)
@@ -116,14 +116,12 @@ typedef struct drm_i810_private {
 
                                /* i810_dma.c */
 extern int i810_driver_dma_quiescent(struct drm_device *dev);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
-                                              struct drm_file *file_priv);
+void i810_driver_reclaim_buffers(struct drm_device *dev,
+                                struct drm_file *file_priv);
 extern int i810_driver_load(struct drm_device *, unsigned long flags);
 extern void i810_driver_lastclose(struct drm_device *dev);
 extern void i810_driver_preclose(struct drm_device *dev,
                                 struct drm_file *file_priv);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
-                                              struct drm_file *file_priv);
 extern int i810_driver_device_is_agp(struct drm_device *dev);
 
 extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
index 0ae6a7c5020f55272b97e7c380a5c2c847a60e9b..808b255d7fc68a2b967470f5656a1bbbc0b7768c 100644 (file)
@@ -28,6 +28,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
          intel_dvo.o \
          intel_ringbuffer.o \
          intel_overlay.o \
+         intel_sprite.o \
          intel_opregion.o \
          dvo_ch7xxx.o \
          dvo_ch7017.o \
index 004b048c5192979ce0e180d14824220eb271c9ab..11807989f918b351d0585e5e4a54b9d87c0a3125 100644 (file)
@@ -1001,7 +1001,7 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
        return 0;
 }
 
-static int i915_drpc_info(struct seq_file *m, void *unused)
+static int ironlake_drpc_info(struct seq_file *m)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
@@ -1068,6 +1068,90 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
        return 0;
 }
 
+static int gen6_drpc_info(struct seq_file *m)
+{
+
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 rpmodectl1, gt_core_status, rcctl1;
+       int count=0, ret;
+
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       if (atomic_read(&dev_priv->forcewake_count)) {
+               seq_printf(m, "RC information inaccurate because userspace "
+                             "holds a reference \n");
+       } else {
+               /* NB: we cannot use forcewake, else we read the wrong values */
+               while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
+                       udelay(10);
+               seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
+       }
+
+       gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
+       trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
+
+       rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
+       rcctl1 = I915_READ(GEN6_RC_CONTROL);
+       mutex_unlock(&dev->struct_mutex);
+
+       seq_printf(m, "Video Turbo Mode: %s\n",
+                  yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
+       seq_printf(m, "HW control enabled: %s\n",
+                  yesno(rpmodectl1 & GEN6_RP_ENABLE));
+       seq_printf(m, "SW control enabled: %s\n",
+                  yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
+                         GEN6_RP_MEDIA_SW_MODE));
+       seq_printf(m, "RC6 Enabled: %s\n",
+                  yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
+       seq_printf(m, "RC6 Enabled: %s\n",
+                  yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+       seq_printf(m, "Deep RC6 Enabled: %s\n",
+                  yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
+       seq_printf(m, "Deepest RC6 Enabled: %s\n",
+                  yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
+       seq_printf(m, "Current RC state: ");
+       switch (gt_core_status & GEN6_RCn_MASK) {
+       case GEN6_RC0:
+               if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
+                       seq_printf(m, "Core Power Down\n");
+               else
+                       seq_printf(m, "on\n");
+               break;
+       case GEN6_RC3:
+               seq_printf(m, "RC3\n");
+               break;
+       case GEN6_RC6:
+               seq_printf(m, "RC6\n");
+               break;
+       case GEN6_RC7:
+               seq_printf(m, "RC7\n");
+               break;
+       default:
+               seq_printf(m, "Unknown\n");
+               break;
+       }
+
+       seq_printf(m, "Core Power Down: %s\n",
+                  yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+       return 0;
+}
+
+static int i915_drpc_info(struct seq_file *m, void *unused)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+
+       if (IS_GEN6(dev) || IS_GEN7(dev))
+               return gen6_drpc_info(m);
+       else
+               return ironlake_drpc_info(m);
+}
+
 static int i915_fbc_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
index a9ae374861e788fe7ced3b6cf8f1f129c6e84c86..5f4d5893e98356ff8b657a3a3048858d8ba15fab 100644 (file)
@@ -781,6 +781,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_HAS_RELAXED_DELTA:
                value = 1;
                break;
+       case I915_PARAM_HAS_GEN7_SOL_RESET:
+               value = 1;
+               break;
        default:
                DRM_DEBUG_DRIVER("Unknown parameter %d\n",
                                 param->param);
@@ -2305,6 +2308,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
index a1103fc6597dde91958658d4d6faaabaec38e803..8f7187915b0dea430f864baef322ac509c8106be 100644 (file)
@@ -810,6 +810,21 @@ static struct vm_operations_struct i915_gem_vm_ops = {
        .close = drm_gem_vm_close,
 };
 
+static const struct file_operations i915_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_gem_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .read = drm_read,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = i915_compat_ioctl,
+#endif
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        /* Don't use MTRRs here; the Xserver or userspace app should
         * deal with them for Intel hardware.
@@ -843,21 +858,7 @@ static struct drm_driver driver = {
        .dumb_map_offset = i915_gem_mmap_gtt,
        .dumb_destroy = i915_gem_dumb_destroy,
        .ioctls = i915_ioctls,
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = drm_ioctl,
-                .mmap = drm_gem_mmap,
-                .poll = drm_poll,
-                .fasync = drm_fasync,
-                .read = drm_read,
-#ifdef CONFIG_COMPAT
-                .compat_ioctl = i915_compat_ioctl,
-#endif
-                .llseek = noop_llseek,
-       },
-
+       .fops = &i915_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
@@ -922,13 +923,6 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL and additional rights");
 
-/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(dev_priv, reg) \
-       (((dev_priv)->info->gen >= 6) && \
-        ((reg) < 0x40000) &&            \
-        ((reg) != FORCEWAKE) &&         \
-        ((reg) != ECOBUS))
-
 #define __i915_read(x, y) \
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
        u##x val = 0; \
index 554bef7a3b9c8db39c5963fcc72ba1d411f31125..602bc80baabb982814dcce3640e13b53a638f09e 100644 (file)
@@ -207,6 +207,8 @@ struct drm_i915_display_funcs {
        int (*get_display_clock_speed)(struct drm_device *dev);
        int (*get_fifo_size)(struct drm_device *dev, int plane);
        void (*update_wm)(struct drm_device *dev);
+       void (*update_sprite_wm)(struct drm_device *dev, int pipe,
+                                uint32_t sprite_width, int pixel_size);
        int (*crtc_mode_set)(struct drm_crtc *crtc,
                             struct drm_display_mode *mode,
                             struct drm_display_mode *adjusted_mode,
@@ -337,6 +339,8 @@ typedef struct drm_i915_private {
        struct timer_list hangcheck_timer;
        int hangcheck_count;
        uint32_t last_acthd;
+       uint32_t last_acthd_bsd;
+       uint32_t last_acthd_blt;
        uint32_t last_instdone;
        uint32_t last_instdone1;
 
@@ -350,6 +354,7 @@ typedef struct drm_i915_private {
 
        /* overlay */
        struct intel_overlay *overlay;
+       bool sprite_scaling_enabled;
 
        /* LVDS info */
        int backlight_level;  /* restore backlight to this value */
@@ -1362,8 +1367,7 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
        (((dev_priv)->info->gen >= 6) && \
         ((reg) < 0x40000) &&            \
-        ((reg) != FORCEWAKE) &&         \
-        ((reg) != ECOBUS))
+        ((reg) != FORCEWAKE))
 
 #define __i915_read(x, y) \
        u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
index 8359dc777041be9265e53907025564196b1d0fbc..e55badb2d86ddc5d7f245b8e81343a358f5dc43c 100644 (file)
@@ -2006,9 +2006,9 @@ i915_wait_request(struct intel_ring_buffer *ring,
                                           || atomic_read(&dev_priv->mm.wedged));
 
                        ring->irq_put(ring);
-               } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
-                                                     seqno) ||
-                                   atomic_read(&dev_priv->mm.wedged), 3000))
+               } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
+                                                            seqno) ||
+                                          atomic_read(&dev_priv->mm.wedged), 3000))
                        ret = -EBUSY;
                ring->waiting_seqno = 0;
 
@@ -3309,6 +3309,10 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 
                        if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
                                ret = -EIO;
+               } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
+                                                            seqno) ||
+                                   atomic_read(&dev_priv->mm.wedged), 3000)) {
+                       ret = -EBUSY;
                }
        }
 
index b9da8900ae4eaef10c12f2d68da79a226f9f3260..65e1f0043f9df5564d63eb8e95d413f7b5f01513 100644 (file)
@@ -970,6 +970,31 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
        }
 }
 
+static int
+i915_reset_gen7_sol_offsets(struct drm_device *dev,
+                           struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret, i;
+
+       if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
+               return 0;
+
+       ret = intel_ring_begin(ring, 4 * 3);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < 4; i++) {
+               intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+               intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
+               intel_ring_emit(ring, 0);
+       }
+
+       intel_ring_advance(ring);
+
+       return 0;
+}
+
 static int
 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_file *file,
@@ -984,6 +1009,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        struct intel_ring_buffer *ring;
        u32 exec_start, exec_len;
        u32 seqno;
+       u32 mask;
        int ret, mode, i;
 
        if (!i915_gem_check_execbuffer(args)) {
@@ -1021,6 +1047,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        }
 
        mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+       mask = I915_EXEC_CONSTANTS_MASK;
        switch (mode) {
        case I915_EXEC_CONSTANTS_REL_GENERAL:
        case I915_EXEC_CONSTANTS_ABSOLUTE:
@@ -1034,18 +1061,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                            mode == I915_EXEC_CONSTANTS_REL_SURFACE)
                                return -EINVAL;
 
-                       ret = intel_ring_begin(ring, 4);
-                       if (ret)
-                               return ret;
-
-                       intel_ring_emit(ring, MI_NOOP);
-                       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-                       intel_ring_emit(ring, INSTPM);
-                       intel_ring_emit(ring,
-                                       I915_EXEC_CONSTANTS_MASK << 16 | mode);
-                       intel_ring_advance(ring);
-
-                       dev_priv->relative_constants_mode = mode;
+                       /* The HW changed the meaning on this bit on gen6 */
+                       if (INTEL_INFO(dev)->gen >= 6)
+                               mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
                }
                break;
        default:
@@ -1176,6 +1194,27 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                }
        }
 
+       if (ring == &dev_priv->ring[RCS] &&
+           mode != dev_priv->relative_constants_mode) {
+               ret = intel_ring_begin(ring, 4);
+               if (ret)
+                               goto err;
+
+               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+               intel_ring_emit(ring, INSTPM);
+               intel_ring_emit(ring, mask << 16 | mode);
+               intel_ring_advance(ring);
+
+               dev_priv->relative_constants_mode = mode;
+       }
+
+       if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+               ret = i915_reset_gen7_sol_offsets(dev, ring);
+               if (ret)
+                       goto err;
+       }
+
        trace_i915_gem_ring_dispatch(ring, seqno);
 
        exec_start = batch_obj->gtt_offset + args->batch_start_offset;
index b40004b559771dc0244814335d44fb1537625087..5d433fc11ace138748907b17cbb3a9932732b46b 100644 (file)
@@ -1205,7 +1205,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
        } else {
                int dspaddr = DSPADDR(intel_crtc->plane);
                stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
-                                                       crtc->y * crtc->fb->pitch +
+                                                       crtc->y * crtc->fb->pitches[0] +
                                                        crtc->x * crtc->fb->bits_per_pixel/8);
        }
 
@@ -1649,13 +1649,6 @@ static bool kick_ring(struct intel_ring_buffer *ring)
                I915_WRITE_CTL(ring, tmp);
                return true;
        }
-       if (IS_GEN6(dev) &&
-           (tmp & RING_WAIT_SEMAPHORE)) {
-               DRM_ERROR("Kicking stuck semaphore on %s\n",
-                         ring->name);
-               I915_WRITE_CTL(ring, tmp);
-               return true;
-       }
        return false;
 }
 
@@ -1669,7 +1662,7 @@ void i915_hangcheck_elapsed(unsigned long data)
 {
        struct drm_device *dev = (struct drm_device *)data;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t acthd, instdone, instdone1;
+       uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
        bool err = false;
 
        if (!i915_enable_hangcheck)
@@ -1686,16 +1679,21 @@ void i915_hangcheck_elapsed(unsigned long data)
        }
 
        if (INTEL_INFO(dev)->gen < 4) {
-               acthd = I915_READ(ACTHD);
                instdone = I915_READ(INSTDONE);
                instdone1 = 0;
        } else {
-               acthd = I915_READ(ACTHD_I965);
                instdone = I915_READ(INSTDONE_I965);
                instdone1 = I915_READ(INSTDONE1);
        }
+       acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
+       acthd_bsd = HAS_BSD(dev) ?
+               intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
+       acthd_blt = HAS_BLT(dev) ?
+               intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
 
        if (dev_priv->last_acthd == acthd &&
+           dev_priv->last_acthd_bsd == acthd_bsd &&
+           dev_priv->last_acthd_blt == acthd_blt &&
            dev_priv->last_instdone == instdone &&
            dev_priv->last_instdone1 == instdone1) {
                if (dev_priv->hangcheck_count++ > 1) {
@@ -1727,6 +1725,8 @@ void i915_hangcheck_elapsed(unsigned long data)
                dev_priv->hangcheck_count = 0;
 
                dev_priv->last_acthd = acthd;
+               dev_priv->last_acthd_bsd = acthd_bsd;
+               dev_priv->last_acthd_blt = acthd_blt;
                dev_priv->last_instdone = instdone;
                dev_priv->last_instdone1 = instdone1;
        }
index a26d5b0a36908c3d7ddd5c9c7407921fd2826db9..c3afb783cb9d93bf7c79dd32a18d8627ed6bfd0d 100644 (file)
 #define   INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
                                        will not assert AGPBUSY# and will only
                                        be delivered when out of C3. */
+#define   INSTPM_FORCE_ORDERING                                (1<<7) /* GEN6+ */
 #define ACTHD          0x020c8
 #define FW_BLC         0x020d8
 #define FW_BLC2                0x020dc
 #define   PIPECONF_PROGRESSIVE (0 << 21)
 #define   PIPECONF_INTERLACE_W_FIELD_INDICATION        (6 << 21)
 #define   PIPECONF_INTERLACE_FIELD_0_ONLY              (7 << 21)
+#define   PIPECONF_INTERLACE_MASK      (7 << 21)
 #define   PIPECONF_CXSR_DOWNCLOCK      (1<<16)
 #define   PIPECONF_BPP_MASK    (0x000000e0)
 #define   PIPECONF_BPP_8       (0<<5)
 #define WM3_LP_ILK             0x45110
 #define  WM3_LP_EN             (1<<31)
 #define WM1S_LP_ILK            0x45120
+#define WM2S_LP_IVB            0x45124
+#define WM3S_LP_IVB            0x45128
 #define  WM1S_LP_EN            (1<<31)
 
 /* Memory latency timer register */
 #define _DSPBSURF              0x7119C
 #define _DSPBTILEOFF           0x711A4
 
+/* Sprite A control */
+#define _DVSACNTR              0x72180
+#define   DVS_ENABLE           (1<<31)
+#define   DVS_GAMMA_ENABLE     (1<<30)
+#define   DVS_PIXFORMAT_MASK   (3<<25)
+#define   DVS_FORMAT_YUV422    (0<<25)
+#define   DVS_FORMAT_RGBX101010        (1<<25)
+#define   DVS_FORMAT_RGBX888   (2<<25)
+#define   DVS_FORMAT_RGBX161616        (3<<25)
+#define   DVS_SOURCE_KEY       (1<<22)
+#define   DVS_RGB_ORDER_RGBX   (1<<20)
+#define   DVS_YUV_BYTE_ORDER_MASK (3<<16)
+#define   DVS_YUV_ORDER_YUYV   (0<<16)
+#define   DVS_YUV_ORDER_UYVY   (1<<16)
+#define   DVS_YUV_ORDER_YVYU   (2<<16)
+#define   DVS_YUV_ORDER_VYUY   (3<<16)
+#define   DVS_DEST_KEY         (1<<2)
+#define   DVS_TRICKLE_FEED_DISABLE (1<<14)
+#define   DVS_TILED            (1<<10)
+#define _DVSALINOFF            0x72184
+#define _DVSASTRIDE            0x72188
+#define _DVSAPOS               0x7218c
+#define _DVSASIZE              0x72190
+#define _DVSAKEYVAL            0x72194
+#define _DVSAKEYMSK            0x72198
+#define _DVSASURF              0x7219c
+#define _DVSAKEYMAXVAL         0x721a0
+#define _DVSATILEOFF           0x721a4
+#define _DVSASURFLIVE          0x721ac
+#define _DVSASCALE             0x72204
+#define   DVS_SCALE_ENABLE     (1<<31)
+#define   DVS_FILTER_MASK      (3<<29)
+#define   DVS_FILTER_MEDIUM    (0<<29)
+#define   DVS_FILTER_ENHANCING (1<<29)
+#define   DVS_FILTER_SOFTENING (2<<29)
+#define   DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
+#define   DVS_VERTICAL_OFFSET_ENABLE (1<<27)
+#define _DVSAGAMC              0x72300
+
+#define _DVSBCNTR              0x73180
+#define _DVSBLINOFF            0x73184
+#define _DVSBSTRIDE            0x73188
+#define _DVSBPOS               0x7318c
+#define _DVSBSIZE              0x73190
+#define _DVSBKEYVAL            0x73194
+#define _DVSBKEYMSK            0x73198
+#define _DVSBSURF              0x7319c
+#define _DVSBKEYMAXVAL         0x731a0
+#define _DVSBTILEOFF           0x731a4
+#define _DVSBSURFLIVE          0x731ac
+#define _DVSBSCALE             0x73204
+#define _DVSBGAMC              0x73300
+
+#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR)
+#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
+#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
+#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS)
+#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF)
+#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
+#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE)
+#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE)
+#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
+#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
+#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+
+#define _SPRA_CTL              0x70280
+#define   SPRITE_ENABLE                        (1<<31)
+#define   SPRITE_GAMMA_ENABLE          (1<<30)
+#define   SPRITE_PIXFORMAT_MASK                (7<<25)
+#define   SPRITE_FORMAT_YUV422         (0<<25)
+#define   SPRITE_FORMAT_RGBX101010     (1<<25)
+#define   SPRITE_FORMAT_RGBX888                (2<<25)
+#define   SPRITE_FORMAT_RGBX161616     (3<<25)
+#define   SPRITE_FORMAT_YUV444         (4<<25)
+#define   SPRITE_FORMAT_XR_BGR101010   (5<<25) /* Extended range */
+#define   SPRITE_CSC_ENABLE            (1<<24)
+#define   SPRITE_SOURCE_KEY            (1<<22)
+#define   SPRITE_RGB_ORDER_RGBX                (1<<20) /* only for 888 and 161616 */
+#define   SPRITE_YUV_TO_RGB_CSC_DISABLE        (1<<19)
+#define   SPRITE_YUV_CSC_FORMAT_BT709  (1<<18) /* 0 is BT601 */
+#define   SPRITE_YUV_BYTE_ORDER_MASK   (3<<16)
+#define   SPRITE_YUV_ORDER_YUYV                (0<<16)
+#define   SPRITE_YUV_ORDER_UYVY                (1<<16)
+#define   SPRITE_YUV_ORDER_YVYU                (2<<16)
+#define   SPRITE_YUV_ORDER_VYUY                (3<<16)
+#define   SPRITE_TRICKLE_FEED_DISABLE  (1<<14)
+#define   SPRITE_INT_GAMMA_ENABLE      (1<<13)
+#define   SPRITE_TILED                 (1<<10)
+#define   SPRITE_DEST_KEY              (1<<2)
+#define _SPRA_LINOFF           0x70284
+#define _SPRA_STRIDE           0x70288
+#define _SPRA_POS              0x7028c
+#define _SPRA_SIZE             0x70290
+#define _SPRA_KEYVAL           0x70294
+#define _SPRA_KEYMSK           0x70298
+#define _SPRA_SURF             0x7029c
+#define _SPRA_KEYMAX           0x702a0
+#define _SPRA_TILEOFF          0x702a4
+#define _SPRA_SCALE            0x70304
+#define   SPRITE_SCALE_ENABLE  (1<<31)
+#define   SPRITE_FILTER_MASK   (3<<29)
+#define   SPRITE_FILTER_MEDIUM (0<<29)
+#define   SPRITE_FILTER_ENHANCING      (1<<29)
+#define   SPRITE_FILTER_SOFTENING      (2<<29)
+#define   SPRITE_VERTICAL_OFFSET_HALF  (1<<28) /* must be enabled below */
+#define   SPRITE_VERTICAL_OFFSET_ENABLE        (1<<27)
+#define _SPRA_GAMC             0x70400
+
+#define _SPRB_CTL              0x71280
+#define _SPRB_LINOFF           0x71284
+#define _SPRB_STRIDE           0x71288
+#define _SPRB_POS              0x7128c
+#define _SPRB_SIZE             0x71290
+#define _SPRB_KEYVAL           0x71294
+#define _SPRB_KEYMSK           0x71298
+#define _SPRB_SURF             0x7129c
+#define _SPRB_KEYMAX           0x712a0
+#define _SPRB_TILEOFF          0x712a4
+#define _SPRB_SCALE            0x71304
+#define _SPRB_GAMC             0x71400
+
+#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
+#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
+#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
+#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS)
+#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
+#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
+#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
+#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
+#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
+#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
+#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+
 /* VBIOS regs */
 #define VGACNTRL               0x71400
 # define VGA_DISP_DISABLE                      (1 << 31)
 #define   ILK_DPFC_DIS1                (1<<8)
 #define   ILK_DPFC_DIS2                (1<<9)
 
+#define IVB_CHICKEN3   0x4200c
+# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE     (1 << 5)
+# define CHICKEN3_DGMG_DONE_FIX_DISABLE                (1 << 2)
+
 #define DISP_ARB_CTL   0x45000
 #define  DISP_TILE_SURFACE_SWIZZLING   (1<<13)
 #define  DISP_FBC_WM_DIS               (1<<15)
 #define   GEN6_CAGF_MASK                       (0x7f << GEN6_CAGF_SHIFT)
 #define GEN6_RP_CONTROL                                0xA024
 #define   GEN6_RP_MEDIA_TURBO                  (1<<11)
-#define   GEN6_RP_USE_NORMAL_FREQ              (1<<9)
+#define   GEN6_RP_MEDIA_MODE_MASK              (3<<9)
+#define   GEN6_RP_MEDIA_HW_TURBO_MODE          (3<<9)
+#define   GEN6_RP_MEDIA_HW_NORMAL_MODE         (2<<9)
+#define   GEN6_RP_MEDIA_HW_MODE                        (1<<9)
+#define   GEN6_RP_MEDIA_SW_MODE                        (0<<9)
 #define   GEN6_RP_MEDIA_IS_GFX                 (1<<8)
 #define   GEN6_RP_ENABLE                       (1<<7)
 #define   GEN6_RP_UP_IDLE_MIN                  (0x1<<3)
 #define GEN6_PCODE_DATA                                0x138128
 #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT       8
 
+#define GEN6_GT_CORE_STATUS            0x138060
+#define   GEN6_CORE_CPD_STATE_MASK     (7<<4)
+#define   GEN6_RCn_MASK                        7
+#define   GEN6_RC0                     0
+#define   GEN6_RC3                     2
+#define   GEN6_RC6                     3
+#define   GEN6_RC7                     4
+
 #define G4X_AUD_VID_DID                        0x62020
 #define INTEL_AUDIO_DEVCL              0x808629FB
 #define INTEL_AUDIO_DEVBLC             0x80862801
 #define G4X_ELD_ACK                    (1 << 4)
 #define G4X_HDMIW_HDMIEDID             0x6210C
 
-#define GEN5_HDMIW_HDMIEDID_A          0xE2050
-#define GEN5_AUD_CNTL_ST_A             0xE20B4
-#define GEN5_ELD_BUFFER_SIZE           (0x1f << 10)
-#define GEN5_ELD_ADDRESS               (0x1f << 5)
-#define GEN5_ELD_ACK                   (1 << 4)
-#define GEN5_AUD_CNTL_ST2              0xE20C0
-#define GEN5_ELD_VALIDB                        (1 << 0)
-#define GEN5_CP_READYB                 (1 << 1)
-
-#define GEN7_HDMIW_HDMIEDID_A          0xE5050
-#define GEN7_AUD_CNTRL_ST_A            0xE50B4
-#define GEN7_AUD_CNTRL_ST2             0xE50C0
+#define IBX_HDMIW_HDMIEDID_A           0xE2050
+#define IBX_AUD_CNTL_ST_A              0xE20B4
+#define IBX_ELD_BUFFER_SIZE            (0x1f << 10)
+#define IBX_ELD_ADDRESS                        (0x1f << 5)
+#define IBX_ELD_ACK                    (1 << 4)
+#define IBX_AUD_CNTL_ST2               0xE20C0
+#define IBX_ELD_VALIDB                 (1 << 0)
+#define IBX_CP_READYB                  (1 << 1)
+
+#define CPT_HDMIW_HDMIEDID_A           0xE5050
+#define CPT_AUD_CNTL_ST_A              0xE50B4
+#define CPT_AUD_CNTRL_ST2              0xE50C0
+
+/* These are the 4 32-bit write offset registers for each stream
+ * output buffer.  It determines the offset from the
+ * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
+ */
+#define GEN7_SO_WRITE_OFFSET(n)                (0x5280 + (n) * 4)
 
 #endif /* _I915_REG_H_ */
index daa5743ccbd63ad87a45697bf49b13fb5538d9e1..2a3f707caab8cc39b91e065071b60244da4d8ba6 100644 (file)
@@ -915,8 +915,8 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
             pipe_name(pipe));
 }
 
-static void assert_pipe(struct drm_i915_private *dev_priv,
-                       enum pipe pipe, bool state)
+void assert_pipe(struct drm_i915_private *dev_priv,
+                enum pipe pipe, bool state)
 {
        int reg;
        u32 val;
@@ -929,8 +929,6 @@ static void assert_pipe(struct drm_i915_private *dev_priv,
             "pipe %c assertion failure (expected %s, current %s)\n",
             pipe_name(pipe), state_string(state), state_string(cur_state));
 }
-#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
-#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
 
 static void assert_plane_enabled(struct drm_i915_private *dev_priv,
                                 enum plane plane)
@@ -1206,7 +1204,8 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
                                  enum pipe pipe)
 {
        int reg;
-       u32 val;
+       u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
+               pll_sel = TRANSC_DPLL_ENABLE;
 
        if (pipe > 1)
                return;
@@ -1217,6 +1216,15 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
        /* Make sure transcoder isn't still depending on us */
        assert_transcoder_disabled(dev_priv, pipe);
 
+       if (pipe == 0)
+               pll_sel |= TRANSC_DPLLA_SEL;
+       else if (pipe == 1)
+               pll_sel |= TRANSC_DPLLB_SEL;
+
+
+       if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
+               return;
+
        reg = PCH_DPLL(pipe);
        val = I915_READ(reg);
        val &= ~DPLL_VCO_ENABLE;
@@ -1511,8 +1519,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        u32 fbc_ctl, fbc_ctl2;
 
        cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
-       if (fb->pitch < cfb_pitch)
-               cfb_pitch = fb->pitch;
+       if (fb->pitches[0] < cfb_pitch)
+               cfb_pitch = fb->pitches[0];
 
        /* FBC_CTL wants 64B units */
        cfb_pitch = (cfb_pitch / 64) - 1;
@@ -2073,11 +2081,11 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        I915_WRITE(reg, dspcntr);
 
        Start = obj->gtt_offset;
-       Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+       Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
 
        DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
-                     Start, Offset, x, y, fb->pitch);
-       I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+                     Start, Offset, x, y, fb->pitches[0]);
+       I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        if (INTEL_INFO(dev)->gen >= 4) {
                I915_WRITE(DSPSURF(plane), Start);
                I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
@@ -2154,11 +2162,11 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
        I915_WRITE(reg, dspcntr);
 
        Start = obj->gtt_offset;
-       Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+       Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
 
        DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
-                     Start, Offset, x, y, fb->pitch);
-       I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+                     Start, Offset, x, y, fb->pitches[0]);
+       I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        I915_WRITE(DSPSURF(plane), Start);
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
        I915_WRITE(DSPADDR(plane), Offset);
@@ -4509,7 +4517,7 @@ static void ironlake_update_wm(struct drm_device *dev)
         */
 }
 
-static void sandybridge_update_wm(struct drm_device *dev)
+void sandybridge_update_wm(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
@@ -4569,7 +4577,8 @@ static void sandybridge_update_wm(struct drm_device *dev)
        I915_WRITE(WM2_LP_ILK, 0);
        I915_WRITE(WM1_LP_ILK, 0);
 
-       if (!single_plane_enabled(enabled))
+       if (!single_plane_enabled(enabled) ||
+           dev_priv->sprite_scaling_enabled)
                return;
        enabled = ffs(enabled) - 1;
 
@@ -4619,6 +4628,149 @@ static void sandybridge_update_wm(struct drm_device *dev)
                   cursor_wm);
 }
 
+static bool
+sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
+                             uint32_t sprite_width, int pixel_size,
+                             const struct intel_watermark_params *display,
+                             int display_latency_ns, int *sprite_wm)
+{
+       struct drm_crtc *crtc;
+       int clock;
+       int entries, tlb_miss;
+
+       crtc = intel_get_crtc_for_plane(dev, plane);
+       if (crtc->fb == NULL || !crtc->enabled) {
+               *sprite_wm = display->guard_size;
+               return false;
+       }
+
+       clock = crtc->mode.clock;
+
+       /* Use the small buffer method to calculate the sprite watermark */
+       entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+       tlb_miss = display->fifo_size*display->cacheline_size -
+               sprite_width * 8;
+       if (tlb_miss > 0)
+               entries += tlb_miss;
+       entries = DIV_ROUND_UP(entries, display->cacheline_size);
+       *sprite_wm = entries + display->guard_size;
+       if (*sprite_wm > (int)display->max_wm)
+               *sprite_wm = display->max_wm;
+
+       return true;
+}
+
+static bool
+sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
+                               uint32_t sprite_width, int pixel_size,
+                               const struct intel_watermark_params *display,
+                               int latency_ns, int *sprite_wm)
+{
+       struct drm_crtc *crtc;
+       unsigned long line_time_us;
+       int clock;
+       int line_count, line_size;
+       int small, large;
+       int entries;
+
+       if (!latency_ns) {
+               *sprite_wm = 0;
+               return false;
+       }
+
+       crtc = intel_get_crtc_for_plane(dev, plane);
+       clock = crtc->mode.clock;
+
+       line_time_us = (sprite_width * 1000) / clock;
+       line_count = (latency_ns / line_time_us + 1000) / 1000;
+       line_size = sprite_width * pixel_size;
+
+       /* Use the minimum of the small and large buffer method for primary */
+       small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+       large = line_count * line_size;
+
+       entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+       *sprite_wm = entries + display->guard_size;
+
+       return *sprite_wm > 0x3ff ? false : true;
+}
+
+static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
+                                        uint32_t sprite_width, int pixel_size)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
+       int sprite_wm, reg;
+       int ret;
+
+       switch (pipe) {
+       case 0:
+               reg = WM0_PIPEA_ILK;
+               break;
+       case 1:
+               reg = WM0_PIPEB_ILK;
+               break;
+       case 2:
+               reg = WM0_PIPEC_IVB;
+               break;
+       default:
+               return; /* bad pipe */
+       }
+
+       ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
+                                           &sandybridge_display_wm_info,
+                                           latency, &sprite_wm);
+       if (!ret) {
+               DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
+                             pipe);
+               return;
+       }
+
+       I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+       DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
+
+
+       ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+                                             pixel_size,
+                                             &sandybridge_display_srwm_info,
+                                             SNB_READ_WM1_LATENCY() * 500,
+                                             &sprite_wm);
+       if (!ret) {
+               DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
+                             pipe);
+               return;
+       }
+       I915_WRITE(WM1S_LP_ILK, sprite_wm);
+
+       /* Only IVB has two more LP watermarks for sprite */
+       if (!IS_IVYBRIDGE(dev))
+               return;
+
+       ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+                                             pixel_size,
+                                             &sandybridge_display_srwm_info,
+                                             SNB_READ_WM2_LATENCY() * 500,
+                                             &sprite_wm);
+       if (!ret) {
+               DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
+                             pipe);
+               return;
+       }
+       I915_WRITE(WM2S_LP_IVB, sprite_wm);
+
+       ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+                                             pixel_size,
+                                             &sandybridge_display_srwm_info,
+                                             SNB_READ_WM3_LATENCY() * 500,
+                                             &sprite_wm);
+       if (!ret) {
+               DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
+                             pipe);
+               return;
+       }
+       I915_WRITE(WM3S_LP_IVB, sprite_wm);
+}
+
 /**
  * intel_update_watermarks - update FIFO watermark values based on current modes
  *
@@ -4659,6 +4811,16 @@ static void intel_update_watermarks(struct drm_device *dev)
                dev_priv->display.update_wm(dev);
 }
 
+void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+                                   uint32_t sprite_width, int pixel_size)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (dev_priv->display.update_sprite_wm)
+               dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
+                                                  pixel_size);
+}
+
 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
 {
        if (i915_panel_use_ssc >= 0)
@@ -5155,7 +5317,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                adjusted_mode->crtc_vsync_end -= 1;
                adjusted_mode->crtc_vsync_start -= 1;
        } else
-               pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
+               pipeconf &= ~PIPECONF_INTERLACE_MASK; /* progressive */
 
        I915_WRITE(HTOTAL(pipe),
                   (adjusted_mode->crtc_hdisplay - 1) |
@@ -5822,14 +5984,45 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 
        ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
                                              x, y, old_fb);
-
        drm_vblank_post_modeset(dev, pipe);
 
-       intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
+       if (ret)
+               intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
+       else
+               intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
 
        return ret;
 }
 
+static bool intel_eld_uptodate(struct drm_connector *connector,
+                              int reg_eldv, uint32_t bits_eldv,
+                              int reg_elda, uint32_t bits_elda,
+                              int reg_edid)
+{
+       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       uint8_t *eld = connector->eld;
+       uint32_t i;
+
+       i = I915_READ(reg_eldv);
+       i &= bits_eldv;
+
+       if (!eld[0])
+               return !i;
+
+       if (!i)
+               return false;
+
+       i = I915_READ(reg_elda);
+       i &= ~bits_elda;
+       I915_WRITE(reg_elda, i);
+
+       for (i = 0; i < eld[2]; i++)
+               if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+                       return false;
+
+       return true;
+}
+
 static void g4x_write_eld(struct drm_connector *connector,
                          struct drm_crtc *crtc)
 {
@@ -5846,6 +6039,12 @@ static void g4x_write_eld(struct drm_connector *connector,
        else
                eldv = G4X_ELDV_DEVCTG;
 
+       if (intel_eld_uptodate(connector,
+                              G4X_AUD_CNTL_ST, eldv,
+                              G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
+                              G4X_HDMIW_HDMIEDID))
+               return;
+
        i = I915_READ(G4X_AUD_CNTL_ST);
        i &= ~(eldv | G4X_ELD_ADDR);
        len = (i >> 9) & 0x1f;          /* ELD buffer size */
@@ -5876,14 +6075,14 @@ static void ironlake_write_eld(struct drm_connector *connector,
        int aud_cntl_st;
        int aud_cntrl_st2;
 
-       if (IS_IVYBRIDGE(connector->dev)) {
-               hdmiw_hdmiedid = GEN7_HDMIW_HDMIEDID_A;
-               aud_cntl_st = GEN7_AUD_CNTRL_ST_A;
-               aud_cntrl_st2 = GEN7_AUD_CNTRL_ST2;
+       if (HAS_PCH_IBX(connector->dev)) {
+               hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
+               aud_cntl_st = IBX_AUD_CNTL_ST_A;
+               aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
        } else {
-               hdmiw_hdmiedid = GEN5_HDMIW_HDMIEDID_A;
-               aud_cntl_st = GEN5_AUD_CNTL_ST_A;
-               aud_cntrl_st2 = GEN5_AUD_CNTL_ST2;
+               hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
+               aud_cntl_st = CPT_AUD_CNTL_ST_A;
+               aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
        }
 
        i = to_intel_crtc(crtc)->pipe;
@@ -5897,14 +6096,25 @@ static void ironlake_write_eld(struct drm_connector *connector,
        if (!i) {
                DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
                /* operate blindly on all ports */
-               eldv = GEN5_ELD_VALIDB;
-               eldv |= GEN5_ELD_VALIDB << 4;
-               eldv |= GEN5_ELD_VALIDB << 8;
+               eldv = IBX_ELD_VALIDB;
+               eldv |= IBX_ELD_VALIDB << 4;
+               eldv |= IBX_ELD_VALIDB << 8;
        } else {
                DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
-               eldv = GEN5_ELD_VALIDB << ((i - 1) * 4);
+               eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
+       }
+
+       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+               DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+               eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
        }
 
+       if (intel_eld_uptodate(connector,
+                              aud_cntrl_st2, eldv,
+                              aud_cntl_st, IBX_ELD_ADDRESS,
+                              hdmiw_hdmiedid))
+               return;
+
        i = I915_READ(aud_cntrl_st2);
        i &= ~eldv;
        I915_WRITE(aud_cntrl_st2, i);
@@ -5912,13 +6122,8 @@ static void ironlake_write_eld(struct drm_connector *connector,
        if (!eld[0])
                return;
 
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
-               DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
-               eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
-       }
-
        i = I915_READ(aud_cntl_st);
-       i &= ~GEN5_ELD_ADDRESS;
+       i &= ~IBX_ELD_ADDRESS;
        I915_WRITE(aud_cntl_st, i);
 
        len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
@@ -6298,7 +6503,7 @@ static struct drm_display_mode load_detect_mode = {
 
 static struct drm_framebuffer *
 intel_framebuffer_create(struct drm_device *dev,
-                        struct drm_mode_fb_cmd *mode_cmd,
+                        struct drm_mode_fb_cmd2 *mode_cmd,
                         struct drm_i915_gem_object *obj)
 {
        struct intel_framebuffer *intel_fb;
@@ -6340,7 +6545,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
                                  int depth, int bpp)
 {
        struct drm_i915_gem_object *obj;
-       struct drm_mode_fb_cmd mode_cmd;
+       struct drm_mode_fb_cmd2 mode_cmd;
 
        obj = i915_gem_alloc_object(dev,
                                    intel_framebuffer_size_for_mode(mode, bpp));
@@ -6349,9 +6554,9 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
 
        mode_cmd.width = mode->hdisplay;
        mode_cmd.height = mode->vdisplay;
-       mode_cmd.depth = depth;
-       mode_cmd.bpp = bpp;
-       mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
+       mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
+                                                               bpp);
+       mode_cmd.pixel_format = 0;
 
        return intel_framebuffer_create(dev, &mode_cmd, obj);
 }
@@ -6372,11 +6577,11 @@ mode_fits_in_fbdev(struct drm_device *dev,
                return NULL;
 
        fb = &dev_priv->fbdev->ifb.base;
-       if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
-                                                         fb->bits_per_pixel))
+       if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
+                                                              fb->bits_per_pixel))
                return NULL;
 
-       if (obj->base.size < mode->vdisplay * fb->pitch)
+       if (obj->base.size < mode->vdisplay * fb->pitches[0])
                return NULL;
 
        return fb;
@@ -7009,7 +7214,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
                goto out;
 
        /* Offset into the new buffer for cases of shared fbs between CRTCs */
-       offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+       offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
 
        ret = BEGIN_LP_RING(6);
        if (ret)
@@ -7026,7 +7231,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
        OUT_RING(MI_NOOP);
        OUT_RING(MI_DISPLAY_FLIP |
                 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       OUT_RING(fb->pitch);
+       OUT_RING(fb->pitches[0]);
        OUT_RING(obj->gtt_offset + offset);
        OUT_RING(MI_NOOP);
        ADVANCE_LP_RING();
@@ -7050,7 +7255,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
                goto out;
 
        /* Offset into the new buffer for cases of shared fbs between CRTCs */
-       offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+       offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
 
        ret = BEGIN_LP_RING(6);
        if (ret)
@@ -7064,7 +7269,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
        OUT_RING(MI_NOOP);
        OUT_RING(MI_DISPLAY_FLIP_I915 |
                 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       OUT_RING(fb->pitch);
+       OUT_RING(fb->pitches[0]);
        OUT_RING(obj->gtt_offset + offset);
        OUT_RING(MI_NOOP);
 
@@ -7097,7 +7302,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
         */
        OUT_RING(MI_DISPLAY_FLIP |
                 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       OUT_RING(fb->pitch);
+       OUT_RING(fb->pitches[0]);
        OUT_RING(obj->gtt_offset | obj->tiling_mode);
 
        /* XXX Enabling the panel-fitter across page-flip is so far
@@ -7132,7 +7337,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 
        OUT_RING(MI_DISPLAY_FLIP |
                 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       OUT_RING(fb->pitch | obj->tiling_mode);
+       OUT_RING(fb->pitches[0] | obj->tiling_mode);
        OUT_RING(obj->gtt_offset);
 
        pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
@@ -7168,7 +7373,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
                goto out;
 
        intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
-       intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
+       intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
        intel_ring_emit(ring, (obj->gtt_offset));
        intel_ring_emit(ring, (MI_NOOP));
        intel_ring_advance(ring);
@@ -7594,7 +7799,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
 
 int intel_framebuffer_init(struct drm_device *dev,
                           struct intel_framebuffer *intel_fb,
-                          struct drm_mode_fb_cmd *mode_cmd,
+                          struct drm_mode_fb_cmd2 *mode_cmd,
                           struct drm_i915_gem_object *obj)
 {
        int ret;
@@ -7602,21 +7807,25 @@ int intel_framebuffer_init(struct drm_device *dev,
        if (obj->tiling_mode == I915_TILING_Y)
                return -EINVAL;
 
-       if (mode_cmd->pitch & 63)
+       if (mode_cmd->pitches[0] & 63)
                return -EINVAL;
 
-       switch (mode_cmd->bpp) {
-       case 8:
-       case 16:
-               /* Only pre-ILK can handle 5:5:5 */
-               if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
-                       return -EINVAL;
+       switch (mode_cmd->pixel_format) {
+       case DRM_FORMAT_RGB332:
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_ARGB2101010:
+               /* RGB formats are common across chipsets */
                break;
-
-       case 24:
-       case 32:
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_VYUY:
                break;
        default:
+               DRM_ERROR("unsupported pixel format\n");
                return -EINVAL;
        }
 
@@ -7634,11 +7843,12 @@ int intel_framebuffer_init(struct drm_device *dev,
 static struct drm_framebuffer *
 intel_user_framebuffer_create(struct drm_device *dev,
                              struct drm_file *filp,
-                             struct drm_mode_fb_cmd *mode_cmd)
+                             struct drm_mode_fb_cmd2 *mode_cmd)
 {
        struct drm_i915_gem_object *obj;
 
-       obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
+       obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
+                                               mode_cmd->handles[0]));
        if (&obj->base == NULL)
                return ERR_PTR(-ENOENT);
 
@@ -7995,7 +8205,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
        I915_WRITE(GEN6_RP_CONTROL,
                   GEN6_RP_MEDIA_TURBO |
-                  GEN6_RP_USE_NORMAL_FREQ |
+                  GEN6_RP_MEDIA_HW_MODE |
                   GEN6_RP_MEDIA_IS_GFX |
                   GEN6_RP_ENABLE |
                   GEN6_RP_UP_BUSY_AVG |
@@ -8250,6 +8460,10 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
 
        I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
 
+       I915_WRITE(IVB_CHICKEN3,
+                  CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+                  CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
        for_each_pipe(pipe) {
                I915_WRITE(DSPCNTR(pipe),
                           I915_READ(DSPCNTR(pipe)) |
@@ -8543,9 +8757,15 @@ static void intel_init_display(struct drm_device *dev)
                if (IS_IVYBRIDGE(dev)) {
                        u32     ecobus;
 
+                       /* A small trick here - if the bios hasn't configured MT forcewake,
+                        * and if the device is in RC6, then force_wake_mt_get will not wake
+                        * the device and the ECOBUS read will return zero. Which will be
+                        * (correctly) interpreted by the test below as MT forcewake being
+                        * disabled.
+                        */
                        mutex_lock(&dev->struct_mutex);
                        __gen6_gt_force_wake_mt_get(dev_priv);
-                       ecobus = I915_READ(ECOBUS);
+                       ecobus = I915_READ_NOTRACE(ECOBUS);
                        __gen6_gt_force_wake_mt_put(dev_priv);
                        mutex_unlock(&dev->struct_mutex);
 
@@ -8577,6 +8797,7 @@ static void intel_init_display(struct drm_device *dev)
                } else if (IS_GEN6(dev)) {
                        if (SNB_READ_WM0_LATENCY()) {
                                dev_priv->display.update_wm = sandybridge_update_wm;
+                               dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
                        } else {
                                DRM_DEBUG_KMS("Failed to read display plane latency. "
                                              "Disable CxSR\n");
@@ -8590,6 +8811,7 @@ static void intel_init_display(struct drm_device *dev)
                        dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
                        if (SNB_READ_WM0_LATENCY()) {
                                dev_priv->display.update_wm = sandybridge_update_wm;
+                               dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
                        } else {
                                DRM_DEBUG_KMS("Failed to read display plane latency. "
                                              "Disable CxSR\n");
@@ -8773,7 +8995,7 @@ static void i915_disable_vga(struct drm_device *dev)
 void intel_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int i;
+       int i, ret;
 
        drm_mode_config_init(dev);
 
@@ -8803,6 +9025,12 @@ void intel_modeset_init(struct drm_device *dev)
 
        for (i = 0; i < dev_priv->num_pipe; i++) {
                intel_crtc_init(dev, i);
+               if (HAS_PCH_SPLIT(dev)) {
+                       ret = intel_plane_init(dev, i);
+                       if (ret)
+                               DRM_ERROR("plane %d init failed: %d\n",
+                                         i, ret);
+               }
        }
 
        /* Just disable it once at startup */
index 92b041b66e491cbd7775e58458f03fe9b7eee303..db3b461ad4124babfa70f931a584cd433fb788ab 100644 (file)
@@ -1926,6 +1926,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
                        intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
        }
 
+       DP &= ~DP_AUDIO_OUTPUT_ENABLE;
        I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
        POSTING_READ(intel_dp->output_reg);
        msleep(intel_dp->panel_power_down_delay);
index a1b4343814e8faac807384057ba51a541746b6bd..1348705faf6bfbe5abf4caf0718107fa9ff85ee3 100644 (file)
@@ -26,6 +26,7 @@
 #define __INTEL_DRV_H__
 
 #include <linux/i2c.h>
+#include "i915_drm.h"
 #include "i915_drv.h"
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
@@ -39,7 +40,7 @@
                        ret__ = -ETIMEDOUT;                             \
                        break;                                          \
                }                                                       \
-               if (W && !(in_atomic() || in_dbg_master())) msleep(W);  \
+               if (W && drm_can_sleep()) msleep(W);    \
        }                                                               \
        ret__;                                                          \
 })
 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
 #define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
 
-#define MSLEEP(x) do { \
-       if (in_dbg_master()) \
-               mdelay(x); \
-       else \
-               msleep(x); \
-} while (0)
-
 #define KHz(x) (1000*x)
 #define MHz(x) KHz(1000*x)
 
@@ -177,10 +171,32 @@ struct intel_crtc {
        bool use_pll_a;
 };
 
+struct intel_plane {
+       struct drm_plane base;
+       enum pipe pipe;
+       struct drm_i915_gem_object *obj;
+       bool primary_disabled;
+       int max_downscale;
+       u32 lut_r[1024], lut_g[1024], lut_b[1024];
+       void (*update_plane)(struct drm_plane *plane,
+                            struct drm_framebuffer *fb,
+                            struct drm_i915_gem_object *obj,
+                            int crtc_x, int crtc_y,
+                            unsigned int crtc_w, unsigned int crtc_h,
+                            uint32_t x, uint32_t y,
+                            uint32_t src_w, uint32_t src_h);
+       void (*disable_plane)(struct drm_plane *plane);
+       int (*update_colorkey)(struct drm_plane *plane,
+                              struct drm_intel_sprite_colorkey *key);
+       void (*get_colorkey)(struct drm_plane *plane,
+                            struct drm_intel_sprite_colorkey *key);
+};
+
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
 #define to_intel_connector(x) container_of(x, struct intel_connector, base)
 #define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
 #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+#define to_intel_plane(x) container_of(x, struct intel_plane, base)
 
 #define DIP_HEADER_SIZE        5
 
@@ -290,6 +306,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 extern bool intel_dpd_is_edp(struct drm_device *dev);
 extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
 extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
+extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
 
 /* intel_panel.c */
 extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
@@ -360,7 +377,7 @@ extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
 
 extern int intel_framebuffer_init(struct drm_device *dev,
                                  struct intel_framebuffer *ifb,
-                                 struct drm_mode_fb_cmd *mode_cmd,
+                                 struct drm_mode_fb_cmd2 *mode_cmd,
                                  struct drm_i915_gem_object *obj);
 extern int intel_fbdev_init(struct drm_device *dev);
 extern void intel_fbdev_fini(struct drm_device *dev);
@@ -380,9 +397,25 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
 extern void intel_fb_output_poll_changed(struct drm_device *dev);
 extern void intel_fb_restore_mode(struct drm_device *dev);
 
+extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
+                       bool state);
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+
 extern void intel_init_clock_gating(struct drm_device *dev);
 extern void intel_write_eld(struct drm_encoder *encoder,
                            struct drm_display_mode *mode);
 extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
 
+/* For use by IVB LP watermark workaround in intel_sprite.c */
+extern void sandybridge_update_wm(struct drm_device *dev);
+extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+                                          uint32_t sprite_width,
+                                          int pixel_size);
+
+extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+                                    struct drm_file *file_priv);
+extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+                                    struct drm_file *file_priv);
+
 #endif /* __INTEL_DRV_H__ */
index ec49bae7338260d77cc6b78f9bc15d948303c1e8..571375a3eef4cb69e446ce5fa078cfbb28e49bae 100644 (file)
@@ -65,7 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct fb_info *info;
        struct drm_framebuffer *fb;
-       struct drm_mode_fb_cmd mode_cmd;
+       struct drm_mode_fb_cmd2 mode_cmd;
        struct drm_i915_gem_object *obj;
        struct device *device = &dev->pdev->dev;
        int size, ret;
@@ -77,11 +77,12 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
        mode_cmd.width = sizes->surface_width;
        mode_cmd.height = sizes->surface_height;
 
-       mode_cmd.bpp = sizes->surface_bpp;
-       mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
-       mode_cmd.depth = sizes->surface_depth;
+       mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
+                                                     8), 64);
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+                                                         sizes->surface_depth);
 
-       size = mode_cmd.pitch * mode_cmd.height;
+       size = mode_cmd.pitches[0] * mode_cmd.height;
        size = ALIGN(size, PAGE_SIZE);
        obj = i915_gem_alloc_object(dev, size);
        if (!obj) {
@@ -148,7 +149,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
 
 //     memset(info->screen_base, 0, size);
 
-       drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+       drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
        drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
 
        info->pixmap.size = 64*1024;
@@ -269,8 +270,14 @@ void intel_fb_restore_mode(struct drm_device *dev)
 {
        int ret;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_plane *plane;
 
        ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
        if (ret)
                DRM_DEBUG("failed to restore crtc mode\n");
+
+       /* Be sure to shut off any planes that may be active */
+       list_for_each_entry(plane, &config->plane_list, head)
+               plane->funcs->disable_plane(plane);
 }
index d4f5a0b2120d05b40a396695653594d2064ba15f..64541f7ef90069a07f073980bb17720af60bb3b3 100644 (file)
@@ -269,6 +269,10 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
        u32 temp;
+       u32 enable_bits = SDVO_ENABLE;
+
+       if (intel_hdmi->has_audio)
+               enable_bits |= SDVO_AUDIO_ENABLE;
 
        temp = I915_READ(intel_hdmi->sdvox_reg);
 
@@ -281,9 +285,9 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
        }
 
        if (mode != DRM_MODE_DPMS_ON) {
-               temp &= ~SDVO_ENABLE;
+               temp &= ~enable_bits;
        } else {
-               temp |= SDVO_ENABLE;
+               temp |= enable_bits;
        }
 
        I915_WRITE(intel_hdmi->sdvox_reg, temp);
index ca70e2f1044517425ce3ad0ea3ab85db7c0df5ee..77e729d4e4f02476b289aed344d344411c0eb2c1 100644 (file)
@@ -414,6 +414,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
                        return ret;
        }
 
+       if (INTEL_INFO(dev)->gen >= 6) {
+               I915_WRITE(INSTPM,
+                          INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
+       }
+
        return ret;
 }
 
@@ -786,6 +791,17 @@ ring_add_request(struct intel_ring_buffer *ring,
        return 0;
 }
 
+static bool
+gen7_blt_ring_get_irq(struct intel_ring_buffer *ring)
+{
+       /* The BLT ring on IVB appears to have broken synchronization
+        * between the seqno write and the interrupt, so that the
+        * interrupt appears first.  Returning false here makes
+        * i915_wait_request() do a polling loop, instead.
+        */
+       return false;
+}
+
 static bool
 gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
 {
@@ -1119,7 +1135,16 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
        }
 
        trace_i915_ring_wait_begin(ring);
-       end = jiffies + 3 * HZ;
+       if (drm_core_check_feature(dev, DRIVER_GEM))
+               /* With GEM the hangcheck timer should kick us out of the loop,
+                * leaving it early runs the risk of corrupting GEM state (due
+                * to running on almost untested codepaths). But on resume
+                * timers don't work yet, so prevent a complete hang in that
+                * case by choosing an insanely large timeout. */
+               end = jiffies + 60 * HZ;
+       else
+               end = jiffies + 3 * HZ;
+
        do {
                ring->head = I915_READ_HEAD(ring);
                ring->space = ring_space(ring);
@@ -1552,5 +1577,8 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
 
        *ring = gen6_blt_ring;
 
+       if (IS_GEN7(dev))
+               ring->irq_get = gen7_blt_ring_get_irq;
+
        return intel_init_ring_buffer(dev, ring);
 }
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
new file mode 100644 (file)
index 0000000..d13989f
--- /dev/null
@@ -0,0 +1,668 @@
+/*
+ * Copyright Â© 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *   Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * New plane/sprite handling.
+ *
+ * The older chips had a separate interface for programming plane related
+ * registers; newer ones are much simpler and we can use the new DRM plane
+ * support.
+ */
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_fourcc.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+static void
+ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+                struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+                unsigned int crtc_w, unsigned int crtc_h,
+                uint32_t x, uint32_t y,
+                uint32_t src_w, uint32_t src_h)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       int pipe = intel_plane->pipe;
+       u32 sprctl, sprscale = 0;
+       int pixel_size;
+
+       sprctl = I915_READ(SPRCTL(pipe));
+
+       /* Mask out pixel format bits in case we change it */
+       sprctl &= ~SPRITE_PIXFORMAT_MASK;
+       sprctl &= ~SPRITE_RGB_ORDER_RGBX;
+       sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
+
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_XBGR8888:
+               sprctl |= SPRITE_FORMAT_RGBX888;
+               pixel_size = 4;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+               pixel_size = 4;
+               break;
+       case DRM_FORMAT_YUYV:
+               sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
+               pixel_size = 2;
+               break;
+       case DRM_FORMAT_YVYU:
+               sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
+               pixel_size = 2;
+               break;
+       case DRM_FORMAT_UYVY:
+               sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
+               pixel_size = 2;
+               break;
+       case DRM_FORMAT_VYUY:
+               sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
+               pixel_size = 2;
+               break;
+       default:
+               DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
+               sprctl |= DVS_FORMAT_RGBX888;
+               pixel_size = 4;
+               break;
+       }
+
+       if (obj->tiling_mode != I915_TILING_NONE)
+               sprctl |= SPRITE_TILED;
+
+       /* must disable */
+       sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+       sprctl |= SPRITE_ENABLE;
+       sprctl |= SPRITE_DEST_KEY;
+
+       /* Sizes are 0 based */
+       src_w--;
+       src_h--;
+       crtc_w--;
+       crtc_h--;
+
+       intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+       /*
+        * IVB workaround: must disable low power watermarks for at least
+        * one frame before enabling scaling.  LP watermarks can be re-enabled
+        * when scaling is disabled.
+        */
+       if (crtc_w != src_w || crtc_h != src_h) {
+               dev_priv->sprite_scaling_enabled = true;
+               sandybridge_update_wm(dev);
+               intel_wait_for_vblank(dev, pipe);
+               sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
+       } else {
+               dev_priv->sprite_scaling_enabled = false;
+               /* potentially re-enable LP watermarks */
+               sandybridge_update_wm(dev);
+       }
+
+       I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
+       I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+       if (obj->tiling_mode != I915_TILING_NONE) {
+               I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
+       } else {
+               unsigned long offset;
+
+               offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+               I915_WRITE(SPRLINOFF(pipe), offset);
+       }
+       I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+       I915_WRITE(SPRSCALE(pipe), sprscale);
+       I915_WRITE(SPRCTL(pipe), sprctl);
+       I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
+       POSTING_READ(SPRSURF(pipe));
+}
+
+static void
+ivb_disable_plane(struct drm_plane *plane)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       int pipe = intel_plane->pipe;
+
+       I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
+       /* Can't leave the scaler enabled... */
+       I915_WRITE(SPRSCALE(pipe), 0);
+       /* Activate double buffered register update */
+       I915_WRITE(SPRSURF(pipe), 0);
+       POSTING_READ(SPRSURF(pipe));
+}
+
+static int
+ivb_update_colorkey(struct drm_plane *plane,
+                   struct drm_intel_sprite_colorkey *key)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane;
+       u32 sprctl;
+       int ret = 0;
+
+       intel_plane = to_intel_plane(plane);
+
+       I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
+       I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
+       I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
+
+       sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+       sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
+       if (key->flags & I915_SET_COLORKEY_DESTINATION)
+               sprctl |= SPRITE_DEST_KEY;
+       else if (key->flags & I915_SET_COLORKEY_SOURCE)
+               sprctl |= SPRITE_SOURCE_KEY;
+       I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
+
+       POSTING_READ(SPRKEYMSK(intel_plane->pipe));
+
+       return ret;
+}
+
+static void
+ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane;
+       u32 sprctl;
+
+       intel_plane = to_intel_plane(plane);
+
+       key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
+       key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
+       key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
+       key->flags = 0;
+
+       sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+
+       if (sprctl & SPRITE_DEST_KEY)
+               key->flags = I915_SET_COLORKEY_DESTINATION;
+       else if (sprctl & SPRITE_SOURCE_KEY)
+               key->flags = I915_SET_COLORKEY_SOURCE;
+       else
+               key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static void
+snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+                struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+                unsigned int crtc_w, unsigned int crtc_h,
+                uint32_t x, uint32_t y,
+                uint32_t src_w, uint32_t src_h)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       int pipe = intel_plane->pipe, pixel_size;
+       u32 dvscntr, dvsscale = 0;
+
+       dvscntr = I915_READ(DVSCNTR(pipe));
+
+       /* Mask out pixel format bits in case we change it */
+       dvscntr &= ~DVS_PIXFORMAT_MASK;
+       dvscntr &= ~DVS_RGB_ORDER_RGBX;
+       dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
+
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_XBGR8888:
+               dvscntr |= DVS_FORMAT_RGBX888;
+               pixel_size = 4;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_RGBX;
+               pixel_size = 4;
+               break;
+       case DRM_FORMAT_YUYV:
+               dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
+               pixel_size = 2;
+               break;
+       case DRM_FORMAT_YVYU:
+               dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
+               pixel_size = 2;
+               break;
+       case DRM_FORMAT_UYVY:
+               dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
+               pixel_size = 2;
+               break;
+       case DRM_FORMAT_VYUY:
+               dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
+               pixel_size = 2;
+               break;
+       default:
+               DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
+               dvscntr |= DVS_FORMAT_RGBX888;
+               pixel_size = 4;
+               break;
+       }
+
+       if (obj->tiling_mode != I915_TILING_NONE)
+               dvscntr |= DVS_TILED;
+
+       /* must disable */
+       dvscntr |= DVS_TRICKLE_FEED_DISABLE;
+       dvscntr |= DVS_ENABLE;
+
+       /* Sizes are 0 based */
+       src_w--;
+       src_h--;
+       crtc_w--;
+       crtc_h--;
+
+       intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+       if (crtc_w != src_w || crtc_h != src_h)
+               dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
+
+       I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
+       I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+       if (obj->tiling_mode != I915_TILING_NONE) {
+               I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
+       } else {
+               unsigned long offset;
+
+               offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+               I915_WRITE(DVSLINOFF(pipe), offset);
+       }
+       I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+       I915_WRITE(DVSSCALE(pipe), dvsscale);
+       I915_WRITE(DVSCNTR(pipe), dvscntr);
+       I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
+       POSTING_READ(DVSSURF(pipe));
+}
+
+static void
+snb_disable_plane(struct drm_plane *plane)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       int pipe = intel_plane->pipe;
+
+       I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
+       /* Disable the scaler */
+       I915_WRITE(DVSSCALE(pipe), 0);
+       /* Flush double buffered register updates */
+       I915_WRITE(DVSSURF(pipe), 0);
+       POSTING_READ(DVSSURF(pipe));
+}
+
+static void
+intel_enable_primary(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int reg = DSPCNTR(intel_crtc->plane);
+
+       I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
+}
+
+static void
+intel_disable_primary(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int reg = DSPCNTR(intel_crtc->plane);
+
+       I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+}
+
+static int
+snb_update_colorkey(struct drm_plane *plane,
+                   struct drm_intel_sprite_colorkey *key)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane;
+       u32 dvscntr;
+       int ret = 0;
+
+       intel_plane = to_intel_plane(plane);
+
+       I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
+       I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
+       I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
+
+       dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+       dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
+       if (key->flags & I915_SET_COLORKEY_DESTINATION)
+               dvscntr |= DVS_DEST_KEY;
+       else if (key->flags & I915_SET_COLORKEY_SOURCE)
+               dvscntr |= DVS_SOURCE_KEY;
+       I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
+
+       POSTING_READ(DVSKEYMSK(intel_plane->pipe));
+
+       return ret;
+}
+
+static void
+snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane;
+       u32 dvscntr;
+
+       intel_plane = to_intel_plane(plane);
+
+       key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
+       key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
+       key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
+       key->flags = 0;
+
+       dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+
+       if (dvscntr & DVS_DEST_KEY)
+               key->flags = I915_SET_COLORKEY_DESTINATION;
+       else if (dvscntr & DVS_SOURCE_KEY)
+               key->flags = I915_SET_COLORKEY_SOURCE;
+       else
+               key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static int
+intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+                  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+                  unsigned int crtc_w, unsigned int crtc_h,
+                  uint32_t src_x, uint32_t src_y,
+                  uint32_t src_w, uint32_t src_h)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       struct intel_framebuffer *intel_fb;
+       struct drm_i915_gem_object *obj, *old_obj;
+       int pipe = intel_plane->pipe;
+       int ret = 0;
+       int x = src_x >> 16, y = src_y >> 16;
+       int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
+       bool disable_primary = false;
+
+       intel_fb = to_intel_framebuffer(fb);
+       obj = intel_fb->obj;
+
+       old_obj = intel_plane->obj;
+
+       /* Pipe must be running... */
+       if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
+               return -EINVAL;
+
+       if (crtc_x >= primary_w || crtc_y >= primary_h)
+               return -EINVAL;
+
+       /* Don't modify another pipe's plane */
+       if (intel_plane->pipe != intel_crtc->pipe)
+               return -EINVAL;
+
+       /*
+        * Clamp the width & height into the visible area.  Note we don't
+        * try to scale the source if part of the visible region is offscreen.
+        * The caller must handle that by adjusting source offset and size.
+        */
+       if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
+               crtc_w += crtc_x;
+               crtc_x = 0;
+       }
+       if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
+               goto out;
+       if ((crtc_x + crtc_w) > primary_w)
+               crtc_w = primary_w - crtc_x;
+
+       if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
+               crtc_h += crtc_y;
+               crtc_y = 0;
+       }
+       if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
+               goto out;
+       if (crtc_y + crtc_h > primary_h)
+               crtc_h = primary_h - crtc_y;
+
+       if (!crtc_w || !crtc_h) /* Again, nothing to display */
+               goto out;
+
+       /*
+        * We can take a larger source and scale it down, but
+        * only so much...  16x is the max on SNB.
+        */
+       if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
+               return -EINVAL;
+
+       /*
+        * If the sprite is completely covering the primary plane,
+        * we can disable the primary and save power.
+        */
+       if ((crtc_x == 0) && (crtc_y == 0) &&
+           (crtc_w == primary_w) && (crtc_h == primary_h))
+               disable_primary = true;
+
+       mutex_lock(&dev->struct_mutex);
+
+       ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+       if (ret) {
+               DRM_ERROR("failed to pin object\n");
+               goto out_unlock;
+       }
+
+       intel_plane->obj = obj;
+
+       /*
+        * Be sure to re-enable the primary before the sprite is no longer
+        * covering it fully.
+        */
+       if (!disable_primary && intel_plane->primary_disabled) {
+               intel_enable_primary(crtc);
+               intel_plane->primary_disabled = false;
+       }
+
+       intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
+                                 crtc_w, crtc_h, x, y, src_w, src_h);
+
+       if (disable_primary) {
+               intel_disable_primary(crtc);
+               intel_plane->primary_disabled = true;
+       }
+
+       /* Unpin old obj after new one is active to avoid ugliness */
+       if (old_obj) {
+               /*
+                * It's fairly common to simply update the position of
+                * an existing object.  In that case, we don't need to
+                * wait for vblank to avoid ugliness, we only need to
+                * do the pin & ref bookkeeping.
+                */
+               if (old_obj != obj) {
+                       mutex_unlock(&dev->struct_mutex);
+                       intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
+                       mutex_lock(&dev->struct_mutex);
+               }
+               i915_gem_object_unpin(old_obj);
+       }
+
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+out:
+       return ret;
+}
+
+static int
+intel_disable_plane(struct drm_plane *plane)
+{
+       struct drm_device *dev = plane->dev;
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       int ret = 0;
+
+       if (intel_plane->primary_disabled) {
+               intel_enable_primary(plane->crtc);
+               intel_plane->primary_disabled = false;
+       }
+
+       intel_plane->disable_plane(plane);
+
+       if (!intel_plane->obj)
+               goto out;
+
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_object_unpin(intel_plane->obj);
+       intel_plane->obj = NULL;
+       mutex_unlock(&dev->struct_mutex);
+out:
+
+       return ret;
+}
+
+static void intel_destroy_plane(struct drm_plane *plane)
+{
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       intel_disable_plane(plane);
+       drm_plane_cleanup(plane);
+       kfree(intel_plane);
+}
+
+int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv)
+{
+       struct drm_intel_sprite_colorkey *set = data;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_mode_object *obj;
+       struct drm_plane *plane;
+       struct intel_plane *intel_plane;
+       int ret = 0;
+
+       if (!dev_priv)
+               return -EINVAL;
+
+       /* Make sure we don't try to enable both src & dest simultaneously */
+       if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+               return -EINVAL;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       plane = obj_to_plane(obj);
+       intel_plane = to_intel_plane(plane);
+       ret = intel_plane->update_colorkey(plane, set);
+
+out_unlock:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv)
+{
+       struct drm_intel_sprite_colorkey *get = data;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_mode_object *obj;
+       struct drm_plane *plane;
+       struct intel_plane *intel_plane;
+       int ret = 0;
+
+       if (!dev_priv)
+               return -EINVAL;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       plane = obj_to_plane(obj);
+       intel_plane = to_intel_plane(plane);
+       intel_plane->get_colorkey(plane, get);
+
+out_unlock:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+static const struct drm_plane_funcs intel_plane_funcs = {
+       .update_plane = intel_update_plane,
+       .disable_plane = intel_disable_plane,
+       .destroy = intel_destroy_plane,
+};
+
+static uint32_t snb_plane_formats[] = {
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+};
+
+int
+intel_plane_init(struct drm_device *dev, enum pipe pipe)
+{
+       struct intel_plane *intel_plane;
+       unsigned long possible_crtcs;
+       int ret;
+
+       if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
+               DRM_ERROR("new plane code only for SNB+\n");
+               return -ENODEV;
+       }
+
+       intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
+       if (!intel_plane)
+               return -ENOMEM;
+
+       if (IS_GEN6(dev)) {
+               intel_plane->max_downscale = 16;
+               intel_plane->update_plane = snb_update_plane;
+               intel_plane->disable_plane = snb_disable_plane;
+               intel_plane->update_colorkey = snb_update_colorkey;
+               intel_plane->get_colorkey = snb_get_colorkey;
+       } else if (IS_GEN7(dev)) {
+               intel_plane->max_downscale = 2;
+               intel_plane->update_plane = ivb_update_plane;
+               intel_plane->disable_plane = ivb_disable_plane;
+               intel_plane->update_colorkey = ivb_update_colorkey;
+               intel_plane->get_colorkey = ivb_get_colorkey;
+       }
+
+       intel_plane->pipe = pipe;
+       possible_crtcs = (1 << pipe);
+       ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
+                            &intel_plane_funcs, snb_plane_formats,
+                            ARRAY_SIZE(snb_plane_formats), false);
+       if (ret)
+               kfree(intel_plane);
+
+       return ret;
+}
+
index 33daa29eea6652e0afa9888cecb840f2eda1e9bd..f9a925d5881966d7f88acd66f065be29976f3659 100644 (file)
@@ -44,6 +44,20 @@ static struct pci_device_id pciidlist[] = {
        mga_PCI_IDS
 };
 
+static const struct file_operations mga_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = mga_compat_ioctl,
+#endif
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
@@ -64,20 +78,7 @@ static struct drm_driver driver = {
        .reclaim_buffers = drm_core_reclaim_buffers,
        .ioctls = mga_ioctls,
        .dma_ioctl = mga_dma_buffers,
-       .fops = {
-               .owner = THIS_MODULE,
-               .open = drm_open,
-               .release = drm_release,
-               .unlocked_ioctl = drm_ioctl,
-               .mmap = drm_mmap,
-               .poll = drm_poll,
-               .fasync = drm_fasync,
-#ifdef CONFIG_COMPAT
-               .compat_ioctl = mga_compat_ioctl,
-#endif
-               .llseek = noop_llseek,
-       },
-
+       .fops = &mga_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
index 35ef5b1e3566cf60ee9693f8c4aab9764dbb6478..9f27e3d9e69a9aa32bc4d6c9470861075dbff6ba 100644 (file)
@@ -9,9 +9,9 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
              nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
              nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
              nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
-             nouveau_dp.o nouveau_ramht.o \
+             nouveau_hdmi.o nouveau_dp.o nouveau_ramht.o \
             nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
-            nouveau_mm.o nouveau_vm.o \
+            nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \
              nv04_timer.o \
              nv04_mc.o nv40_mc.o nv50_mc.o \
              nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
@@ -19,9 +19,12 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
              nv04_graph.o nv10_graph.o nv20_graph.o \
              nv40_graph.o nv50_graph.o nvc0_graph.o \
              nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
-             nv84_crypt.o \
+             nv84_crypt.o nv98_crypt.o \
              nva3_copy.o nvc0_copy.o \
              nv31_mpeg.o nv50_mpeg.o \
+             nv84_bsp.o \
+             nv84_vp.o \
+             nv98_ppp.o \
              nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
              nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
              nv04_crtc.o nv04_display.o nv04_cursor.o \
index 5fc201b49d3070721cade7821c3d259f15b5f441..e5cbead85e50b602d1950ccbc670a34fe1a303b0 100644 (file)
@@ -27,6 +27,7 @@
 #include "nouveau_drv.h"
 #include "nouveau_hw.h"
 #include "nouveau_encoder.h"
+#include "nouveau_gpio.h"
 
 #include <linux/io-mapping.h>
 
@@ -34,9 +35,6 @@
 #define NV_CIO_CRE_44_HEADA 0x0
 #define NV_CIO_CRE_44_HEADB 0x3
 #define FEATURE_MOBILE 0x10    /* also FEATURE_QUADRO for BMP */
-#define LEGACY_I2C_CRT 0x80
-#define LEGACY_I2C_PANEL 0x81
-#define LEGACY_I2C_TV 0x82
 
 #define EDID1_LEN 128
 
@@ -723,115 +721,19 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
        return dcb_entry;
 }
 
-static int
-read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
-{
-       uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
-       int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
-       int recordoffset = 0, rdofs = 1, wrofs = 0;
-       uint8_t port_type = 0;
-
-       if (!i2ctable)
-               return -EINVAL;
-
-       if (dcb_version >= 0x30) {
-               if (i2ctable[0] != dcb_version) /* necessary? */
-                       NV_WARN(dev,
-                               "DCB I2C table version mismatch (%02X vs %02X)\n",
-                               i2ctable[0], dcb_version);
-               dcb_i2c_ver = i2ctable[0];
-               headerlen = i2ctable[1];
-               if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
-                       i2c_entries = i2ctable[2];
-               else
-                       NV_WARN(dev,
-                               "DCB I2C table has more entries than indexable "
-                               "(%d entries, max %d)\n", i2ctable[2],
-                               DCB_MAX_NUM_I2C_ENTRIES);
-               entry_len = i2ctable[3];
-               /* [4] is i2c_default_indices, read in parse_dcb_table() */
-       }
-       /*
-        * It's your own fault if you call this function on a DCB 1.1 BIOS --
-        * the test below is for DCB 1.2
-        */
-       if (dcb_version < 0x14) {
-               recordoffset = 2;
-               rdofs = 0;
-               wrofs = 1;
-       }
-
-       if (index == 0xf)
-               return 0;
-       if (index >= i2c_entries) {
-               NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
-                        index, i2ctable[2]);
-               return -ENOENT;
-       }
-       if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
-               NV_ERROR(dev, "DCB I2C entry invalid\n");
-               return -EINVAL;
-       }
-
-       if (dcb_i2c_ver >= 0x30) {
-               port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
-
-               /*
-                * Fixup for chips using same address offset for read and
-                * write.
-                */
-               if (port_type == 4)     /* seen on C51 */
-                       rdofs = wrofs = 1;
-               if (port_type >= 5)     /* G80+ */
-                       rdofs = wrofs = 0;
-       }
-
-       if (dcb_i2c_ver >= 0x40) {
-               if (port_type != 5 && port_type != 6)
-                       NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
-
-               i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]);
-       }
-
-       i2c->port_type = port_type;
-       i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
-       i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
-
-       return 0;
-}
-
 static struct nouveau_i2c_chan *
 init_i2c_device_find(struct drm_device *dev, int i2c_index)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct dcb_table *dcb = &dev_priv->vbios.dcb;
-
        if (i2c_index == 0xff) {
+               struct drm_nouveau_private *dev_priv = dev->dev_private;
+               struct dcb_table *dcb = &dev_priv->vbios.dcb;
                /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
-               int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
-               int default_indices = dcb->i2c_default_indices;
+               int idx = dcb_entry_idx_from_crtchead(dev);
 
+               i2c_index = NV_I2C_DEFAULT(0);
                if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
-                       shift = 4;
-
-               i2c_index = (default_indices >> shift) & 0xf;
+                       i2c_index = NV_I2C_DEFAULT(1);
        }
-       if (i2c_index == 0x80)  /* g80+ */
-               i2c_index = dcb->i2c_default_indices & 0xf;
-       else
-       if (i2c_index == 0x81)
-               i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
-
-       if (i2c_index >= DCB_MAX_NUM_I2C_ENTRIES) {
-               NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
-               return NULL;
-       }
-
-       /* Make sure i2c table entry has been parsed, it may not
-        * have been if this is a bus not referenced by a DCB encoder
-        */
-       read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
-                          i2c_index, &dcb->i2c[i2c_index]);
 
        return nouveau_i2c_find(dev, i2c_index);
 }
@@ -1199,13 +1101,9 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 
        switch (cond) {
        case 0:
-       {
-               struct dcb_connector_table_entry *ent =
-                       &bios->dcb.connector.entry[dcb->connector];
-
-               if (ent->type != DCB_CONNECTOR_eDP)
+               entry = dcb_conn(dev, dcb->connector);
+               if (!entry || entry[0] != DCB_CONNECTOR_eDP)
                        iexec->execute = false;
-       }
                break;
        case 1:
        case 2:
@@ -3227,49 +3125,6 @@ init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
        return 1;
 }
 
-static void
-init_gpio_unknv50(struct nvbios *bios, struct dcb_gpio_entry *gpio)
-{
-       const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
-       u32 r, s, v;
-
-       /* Not a clue, needs de-magicing */
-       r = nv50_gpio_ctl[gpio->line >> 4];
-       s = (gpio->line & 0x0f);
-       v = bios_rd32(bios, r) & ~(0x00010001 << s);
-       switch ((gpio->entry & 0x06000000) >> 25) {
-       case 1:
-               v |= (0x00000001 << s);
-               break;
-       case 2:
-               v |= (0x00010000 << s);
-               break;
-       default:
-               break;
-       }
-
-       bios_wr32(bios, r, v);
-}
-
-static void
-init_gpio_unknvd0(struct nvbios *bios, struct dcb_gpio_entry *gpio)
-{
-       u32 v, i;
-
-       v  = bios_rd32(bios, 0x00d610 + (gpio->line * 4));
-       v &= 0xffffff00;
-       v |= (gpio->entry & 0x00ff0000) >> 16;
-       bios_wr32(bios, 0x00d610 + (gpio->line * 4), v);
-
-       i = (gpio->entry & 0x1f000000) >> 24;
-       if (i) {
-               v  = bios_rd32(bios, 0x00d640 + ((i - 1) * 4));
-               v &= 0xffffff00;
-               v |= gpio->line;
-               bios_wr32(bios, 0x00d640 + ((i - 1) * 4), v);
-       }
-}
-
 static int
 init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 {
@@ -3282,35 +3137,8 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
         * each GPIO according to various values listed in each entry
         */
 
-       struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-       int i;
-
-       if (dev_priv->card_type < NV_50) {
-               NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n");
-               return 1;
-       }
-
-       if (!iexec->execute)
-               return 1;
-
-       for (i = 0; i < bios->dcb.gpio.entries; i++) {
-               struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i];
-
-               BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
-
-               BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
-                       offset, gpio->tag, gpio->state_default);
-
-               if (!bios->execute)
-                       continue;
-
-               pgpio->set(bios->dev, gpio->tag, gpio->state_default);
-               if (dev_priv->card_type < NV_D0)
-                       init_gpio_unknv50(bios, gpio);
-               else
-                       init_gpio_unknvd0(bios, gpio);
-       }
+       if (iexec->execute && bios->execute)
+               nouveau_gpio_reset(bios->dev);
 
        return 1;
 }
@@ -4407,18 +4235,6 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
                break;
        }
 
-       /* Dell Latitude D620 reports a too-high value for the dual-link
-        * transition freq, causing us to program the panel incorrectly.
-        *
-        * It doesn't appear the VBIOS actually uses its transition freq
-        * (90000kHz), instead it uses the "Number of LVDS channels" field
-        * out of the panel ID structure (http://www.spwg.org/).
-        *
-        * For the moment, a quirk will do :)
-        */
-       if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
-               bios->fp.duallink_transition_clk = 80000;
-
        /* set dual_link flag for EDID case */
        if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
                bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
@@ -4541,7 +4357,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
        NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
                        dcbent->type, dcbent->location, dcbent->or);
        for (i = 0; i < table[3]; i++) {
-               otable = ROMPTR(bios, table[table[1] + (i * table[2])]);
+               otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
                if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
                        break;
        }
@@ -4719,7 +4535,7 @@ static struct pll_mapping nv84_pll_mapping[] = {
        { PLL_CORE  , 0x004028 },
        { PLL_SHADER, 0x004020 },
        { PLL_MEMORY, 0x004008 },
-       { PLL_UNK05 , 0x004030 },
+       { PLL_VDEC  , 0x004030 },
        { PLL_UNK41 , 0x00e818 },
        { PLL_VPLL0 , 0x614100 },
        { PLL_VPLL1 , 0x614900 },
@@ -5485,6 +5301,9 @@ bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
        struct nvbios *bios = &dev_priv->vbios;
        u8 entries, *entry;
 
+       if (bios->type != NVBIOS_BIT)
+               return -ENODEV;
+
        entries = bios->data[bios->offset + 10];
        entry   = &bios->data[bios->offset + 12];
        while (entries--) {
@@ -5493,7 +5312,7 @@ bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
                        bit->version = entry[1];
                        bit->length = ROM16(entry[2]);
                        bit->offset = ROM16(entry[4]);
-                       bit->data = ROMPTR(bios, entry[4]);
+                       bit->data = ROMPTR(dev, entry[4]);
                        return 0;
                }
 
@@ -5598,10 +5417,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
        uint16_t legacy_scripts_offset, legacy_i2c_offset;
 
        /* load needed defaults in case we can't parse this info */
-       bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
-       bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
-       bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
-       bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
        bios->digital_min_front_porch = 0x4b;
        bios->fmaxvco = 256000;
        bios->fminvco = 128000;
@@ -5709,14 +5524,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
        bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
        bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
        bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
-       if (bios->data[legacy_i2c_offset + 4])
-               bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
-       if (bios->data[legacy_i2c_offset + 5])
-               bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
-       if (bios->data[legacy_i2c_offset + 6])
-               bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
-       if (bios->data[legacy_i2c_offset + 7])
-               bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
 
        if (bmplength > 74) {
                bios->fmaxvco = ROM32(bmp[67]);
@@ -5767,286 +5574,128 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
        return 0;
 }
 
-static struct dcb_gpio_entry *
-new_gpio_entry(struct nvbios *bios)
-{
-       struct drm_device *dev = bios->dev;
-       struct dcb_gpio_table *gpio = &bios->dcb.gpio;
-
-       if (gpio->entries >= DCB_MAX_NUM_GPIO_ENTRIES) {
-               NV_ERROR(dev, "exceeded maximum number of gpio entries!!\n");
-               return NULL;
-       }
-
-       return &gpio->entry[gpio->entries++];
-}
-
-struct dcb_gpio_entry *
-nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
+void *
+dcb_table(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nvbios *bios = &dev_priv->vbios;
-       int i;
-
-       for (i = 0; i < bios->dcb.gpio.entries; i++) {
-               if (bios->dcb.gpio.entry[i].tag != tag)
-                       continue;
+       u8 *dcb = NULL;
 
-               return &bios->dcb.gpio.entry[i];
+       if (dev_priv->card_type > NV_04)
+               dcb = ROMPTR(dev, dev_priv->vbios.data[0x36]);
+       if (!dcb) {
+               NV_WARNONCE(dev, "No DCB data found in VBIOS\n");
+               return NULL;
        }
 
-       return NULL;
-}
-
-static void
-parse_dcb_gpio_table(struct nvbios *bios)
-{
-       struct drm_device *dev = bios->dev;
-       struct dcb_gpio_entry *e;
-       u8 headerlen, entries, recordlen;
-       u8 *dcb, *gpio = NULL, *entry;
-       int i;
-
-       dcb = ROMPTR(bios, bios->data[0x36]);
+       if (dcb[0] >= 0x41) {
+               NV_WARNONCE(dev, "DCB version 0x%02x unknown\n", dcb[0]);
+               return NULL;
+       } else
        if (dcb[0] >= 0x30) {
-               gpio = ROMPTR(bios, dcb[10]);
-               if (!gpio)
-                       goto no_table;
-
-               headerlen = gpio[1];
-               entries   = gpio[2];
-               recordlen = gpio[3];
+               if (ROM32(dcb[6]) == 0x4edcbdcb)
+                       return dcb;
        } else
-       if (dcb[0] >= 0x22 && dcb[-1] >= 0x13) {
-               gpio = ROMPTR(bios, dcb[-15]);
-               if (!gpio)
-                       goto no_table;
-
-               headerlen = 3;
-               entries   = gpio[2];
-               recordlen = gpio[1];
+       if (dcb[0] >= 0x20) {
+               if (ROM32(dcb[4]) == 0x4edcbdcb)
+                       return dcb;
        } else
-       if (dcb[0] >= 0x22) {
-               /* No GPIO table present, parse the TVDAC GPIO data. */
-               uint8_t *tvdac_gpio = &dcb[-5];
-
-               if (tvdac_gpio[0] & 1) {
-                       e = new_gpio_entry(bios);
-                       e->tag = DCB_GPIO_TVDAC0;
-                       e->line = tvdac_gpio[1] >> 4;
-                       e->invert = tvdac_gpio[0] & 2;
-               }
-
-               goto no_table;
+       if (dcb[0] >= 0x15) {
+               if (!memcmp(&dcb[-7], "DEV_REC", 7))
+                       return dcb;
        } else {
-               NV_DEBUG(dev, "no/unknown gpio table on DCB 0x%02x\n", dcb[0]);
-               goto no_table;
-       }
-
-       entry = gpio + headerlen;
-       for (i = 0; i < entries; i++, entry += recordlen) {
-               e = new_gpio_entry(bios);
-               if (!e)
-                       break;
-
-               if (gpio[0] < 0x40) {
-                       e->entry = ROM16(entry[0]);
-                       e->tag = (e->entry & 0x07e0) >> 5;
-                       if (e->tag == 0x3f) {
-                               bios->dcb.gpio.entries--;
-                               continue;
-                       }
-
-                       e->line = (e->entry & 0x001f);
-                       e->invert = ((e->entry & 0xf800) >> 11) != 4;
-               } else {
-                       e->entry = ROM32(entry[0]);
-                       e->tag = (e->entry & 0x0000ff00) >> 8;
-                       if (e->tag == 0xff) {
-                               bios->dcb.gpio.entries--;
-                               continue;
-                       }
-
-                       e->line = (e->entry & 0x0000001f) >> 0;
-                       if (gpio[0] == 0x40) {
-                               e->state_default = (e->entry & 0x01000000) >> 24;
-                               e->state[0] = (e->entry & 0x18000000) >> 27;
-                               e->state[1] = (e->entry & 0x60000000) >> 29;
-                       } else {
-                               e->state_default = (e->entry & 0x00000080) >> 7;
-                               e->state[0] = (entry[4] >> 4) & 3;
-                               e->state[1] = (entry[4] >> 6) & 3;
-                       }
-               }
-       }
-
-no_table:
-       /* Apple iMac G4 NV18 */
-       if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
-               e = new_gpio_entry(bios);
-               if (e) {
-                       e->tag = DCB_GPIO_TVDAC0;
-                       e->line = 4;
-               }
-       }
-}
-
-struct dcb_connector_table_entry *
-nouveau_bios_connector_entry(struct drm_device *dev, int index)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nvbios *bios = &dev_priv->vbios;
-       struct dcb_connector_table_entry *cte;
-
-       if (index >= bios->dcb.connector.entries)
-               return NULL;
-
-       cte = &bios->dcb.connector.entry[index];
-       if (cte->type == 0xff)
+               /*
+                * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but
+                * always has the same single (crt) entry, even when tv-out
+                * present, so the conclusion is this version cannot really
+                * be used.
+                *
+                * v1.2 tables (some NV6/10, and NV15+) normally have the
+                * same 5 entries, which are not specific to the card and so
+                * no use.
+                *
+                * v1.2 does have an I2C table that read_dcb_i2c_table can
+                * handle, but cards exist (nv11 in #14821) with a bad i2c
+                * table pointer, so use the indices parsed in
+                * parse_bmp_structure.
+                *
+                * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
+                */
+               NV_WARNONCE(dev, "No useful DCB data in VBIOS\n");
                return NULL;
+       }
 
-       return cte;
+       NV_WARNONCE(dev, "DCB header validation failed\n");
+       return NULL;
 }
 
-static enum dcb_connector_type
-divine_connector_type(struct nvbios *bios, int index)
+void *
+dcb_outp(struct drm_device *dev, u8 idx)
 {
-       struct dcb_table *dcb = &bios->dcb;
-       unsigned encoders = 0, type = DCB_CONNECTOR_NONE;
-       int i;
-
-       for (i = 0; i < dcb->entries; i++) {
-               if (dcb->entry[i].connector == index)
-                       encoders |= (1 << dcb->entry[i].type);
-       }
-
-       if (encoders & (1 << OUTPUT_DP)) {
-               if (encoders & (1 << OUTPUT_TMDS))
-                       type = DCB_CONNECTOR_DP;
-               else
-                       type = DCB_CONNECTOR_eDP;
-       } else
-       if (encoders & (1 << OUTPUT_TMDS)) {
-               if (encoders & (1 << OUTPUT_ANALOG))
-                       type = DCB_CONNECTOR_DVI_I;
-               else
-                       type = DCB_CONNECTOR_DVI_D;
-       } else
-       if (encoders & (1 << OUTPUT_ANALOG)) {
-               type = DCB_CONNECTOR_VGA;
+       u8 *dcb = dcb_table(dev);
+       if (dcb && dcb[0] >= 0x30) {
+               if (idx < dcb[2])
+                       return dcb + dcb[1] + (idx * dcb[3]);
        } else
-       if (encoders & (1 << OUTPUT_LVDS)) {
-               type = DCB_CONNECTOR_LVDS;
+       if (dcb && dcb[0] >= 0x20) {
+               u8 *i2c = ROMPTR(dev, dcb[2]);
+               u8 *ent = dcb + 8 + (idx * 8);
+               if (i2c && ent < i2c)
+                       return ent;
        } else
-       if (encoders & (1 << OUTPUT_TV)) {
-               type = DCB_CONNECTOR_TV_0;
+       if (dcb && dcb[0] >= 0x15) {
+               u8 *i2c = ROMPTR(dev, dcb[2]);
+               u8 *ent = dcb + 4 + (idx * 10);
+               if (i2c && ent < i2c)
+                       return ent;
        }
 
-       return type;
+       return NULL;
 }
 
-static void
-apply_dcb_connector_quirks(struct nvbios *bios, int idx)
-{
-       struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx];
-       struct drm_device *dev = bios->dev;
+int
+dcb_outp_foreach(struct drm_device *dev, void *data,
+                int (*exec)(struct drm_device *, void *, int idx, u8 *outp))
+{
+       int ret, idx = -1;
+       u8 *outp = NULL;
+       while ((outp = dcb_outp(dev, ++idx))) {
+               if (ROM32(outp[0]) == 0x00000000)
+                       break; /* seen on an NV11 with DCB v1.5 */
+               if (ROM32(outp[0]) == 0xffffffff)
+                       break; /* seen on an NV17 with DCB v2.0 */
+
+               if ((outp[0] & 0x0f) == OUTPUT_UNUSED)
+                       continue;
+               if ((outp[0] & 0x0f) == OUTPUT_EOL)
+                       break;
 
-       /* Gigabyte NX85T */
-       if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
-               if (cte->type == DCB_CONNECTOR_HDMI_1)
-                       cte->type = DCB_CONNECTOR_DVI_I;
+               ret = exec(dev, data, idx, outp);
+               if (ret)
+                       return ret;
        }
 
-       /* Gigabyte GV-NX86T512H */
-       if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
-               if (cte->type == DCB_CONNECTOR_HDMI_1)
-                       cte->type = DCB_CONNECTOR_DVI_I;
-       }
+       return 0;
 }
 
-static const u8 hpd_gpio[16] = {
-       0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
-       0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
-};
-
-static void
-parse_dcb_connector_table(struct nvbios *bios)
+u8 *
+dcb_conntab(struct drm_device *dev)
 {
-       struct drm_device *dev = bios->dev;
-       struct dcb_connector_table *ct = &bios->dcb.connector;
-       struct dcb_connector_table_entry *cte;
-       uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr];
-       uint8_t *entry;
-       int i;
-
-       if (!bios->dcb.connector_table_ptr) {
-               NV_DEBUG_KMS(dev, "No DCB connector table present\n");
-               return;
-       }
-
-       NV_INFO(dev, "DCB connector table: VHER 0x%02x %d %d %d\n",
-               conntab[0], conntab[1], conntab[2], conntab[3]);
-       if ((conntab[0] != 0x30 && conntab[0] != 0x40) ||
-           (conntab[3] != 2 && conntab[3] != 4)) {
-               NV_ERROR(dev, "  Unknown!  Please report.\n");
-               return;
+       u8 *dcb = dcb_table(dev);
+       if (dcb && dcb[0] >= 0x30 && dcb[1] >= 0x16) {
+               u8 *conntab = ROMPTR(dev, dcb[0x14]);
+               if (conntab && conntab[0] >= 0x30 && conntab[0] <= 0x40)
+                       return conntab;
        }
+       return NULL;
+}
 
-       ct->entries = conntab[2];
-
-       entry = conntab + conntab[1];
-       cte = &ct->entry[0];
-       for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
-               cte->index = i;
-               if (conntab[3] == 2)
-                       cte->entry = ROM16(entry[0]);
-               else
-                       cte->entry = ROM32(entry[0]);
-
-               cte->type  = (cte->entry & 0x000000ff) >> 0;
-               cte->index2 = (cte->entry & 0x00000f00) >> 8;
-
-               cte->gpio_tag = ffs((cte->entry & 0x07033000) >> 12);
-               cte->gpio_tag = hpd_gpio[cte->gpio_tag];
-
-               if (cte->type == 0xff)
-                       continue;
-
-               apply_dcb_connector_quirks(bios, i);
-
-               NV_INFO(dev, "  %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
-                       i, cte->entry, cte->type, cte->index, cte->gpio_tag);
-
-               /* check for known types, fallback to guessing the type
-                * from attached encoders if we hit an unknown.
-                */
-               switch (cte->type) {
-               case DCB_CONNECTOR_VGA:
-               case DCB_CONNECTOR_TV_0:
-               case DCB_CONNECTOR_TV_1:
-               case DCB_CONNECTOR_TV_3:
-               case DCB_CONNECTOR_DVI_I:
-               case DCB_CONNECTOR_DVI_D:
-               case DCB_CONNECTOR_LVDS:
-               case DCB_CONNECTOR_LVDS_SPWG:
-               case DCB_CONNECTOR_DP:
-               case DCB_CONNECTOR_eDP:
-               case DCB_CONNECTOR_HDMI_0:
-               case DCB_CONNECTOR_HDMI_1:
-                       break;
-               default:
-                       cte->type = divine_connector_type(bios, cte->index);
-                       NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type);
-                       break;
-               }
-
-               if (nouveau_override_conntype) {
-                       int type = divine_connector_type(bios, cte->index);
-                       if (type != cte->type)
-                               NV_WARN(dev, " -> type 0x%02x\n", cte->type);
-               }
-
-       }
+u8 *
+dcb_conn(struct drm_device *dev, u8 idx)
+{
+       u8 *conntab = dcb_conntab(dev);
+       if (conntab && idx < conntab[2])
+               return conntab + conntab[1] + (idx * conntab[3]);
+       return NULL;
 }
 
 static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
@@ -6079,8 +5728,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
        entry->type = conn & 0xf;
        entry->i2c_index = (conn >> 4) & 0xf;
        entry->heads = (conn >> 8) & 0xf;
-       if (dcb->version >= 0x40)
-               entry->connector = (conn >> 12) & 0xf;
+       entry->connector = (conn >> 12) & 0xf;
        entry->bus = (conn >> 16) & 0xf;
        entry->location = (conn >> 20) & 0x3;
        entry->or = (conn >> 24) & 0xf;
@@ -6252,25 +5900,6 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
        return true;
 }
 
-static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb,
-                           uint32_t conn, uint32_t conf)
-{
-       struct dcb_entry *entry = new_dcb_entry(dcb);
-       bool ret;
-
-       if (dcb->version >= 0x20)
-               ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
-       else
-               ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
-       if (!ret)
-               return ret;
-
-       read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
-                          entry->i2c_index, &dcb->i2c[entry->i2c_index]);
-
-       return true;
-}
-
 static
 void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
 {
@@ -6431,154 +6060,118 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
 #endif
 
        /* Make up some sane defaults */
-       fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
+       fabricate_dcb_output(dcb, OUTPUT_ANALOG,
+                            bios->legacy.i2c_indices.crt, 1, 1);
 
        if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
-               fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
+               fabricate_dcb_output(dcb, OUTPUT_TV,
+                                    bios->legacy.i2c_indices.tv,
                                     all_heads, 0);
 
        else if (bios->tmds.output0_script_ptr ||
                 bios->tmds.output1_script_ptr)
-               fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
+               fabricate_dcb_output(dcb, OUTPUT_TMDS,
+                                    bios->legacy.i2c_indices.panel,
                                     all_heads, 1);
 }
 
 static int
-parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
+parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct dcb_table *dcb = &bios->dcb;
-       uint16_t dcbptr = 0, i2ctabptr = 0;
-       uint8_t *dcbtable;
-       uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
-       bool configblock = true;
-       int recordlength = 8, confofs = 4;
-       int i;
-
-       /* get the offset from 0x36 */
-       if (dev_priv->card_type > NV_04) {
-               dcbptr = ROM16(bios->data[0x36]);
-               if (dcbptr == 0x0000)
-                       NV_WARN(dev, "No output data (DCB) found in BIOS\n");
-       }
-
-       /* this situation likely means a really old card, pre DCB */
-       if (dcbptr == 0x0) {
-               fabricate_dcb_encoder_table(dev, bios);
-               return 0;
-       }
-
-       dcbtable = &bios->data[dcbptr];
-
-       /* get DCB version */
-       dcb->version = dcbtable[0];
-       NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
-                dcb->version >> 4, dcb->version & 0xf);
-
-       if (dcb->version >= 0x20) { /* NV17+ */
-               uint32_t sig;
+       struct dcb_table *dcb = &dev_priv->vbios.dcb;
+       u32 conf = (dcb->version >= 0x20) ? ROM32(outp[4]) : ROM32(outp[6]);
+       u32 conn = ROM32(outp[0]);
+       bool ret;
 
-               if (dcb->version >= 0x30) { /* NV40+ */
-                       headerlen = dcbtable[1];
-                       entries = dcbtable[2];
-                       recordlength = dcbtable[3];
-                       i2ctabptr = ROM16(dcbtable[4]);
-                       sig = ROM32(dcbtable[6]);
-                       dcb->gpio_table_ptr = ROM16(dcbtable[10]);
-                       dcb->connector_table_ptr = ROM16(dcbtable[20]);
-               } else {
-                       i2ctabptr = ROM16(dcbtable[2]);
-                       sig = ROM32(dcbtable[4]);
-                       headerlen = 8;
-               }
+       if (apply_dcb_encoder_quirks(dev, idx, &conn, &conf)) {
+               struct dcb_entry *entry = new_dcb_entry(dcb);
 
-               if (sig != 0x4edcbdcb) {
-                       NV_ERROR(dev, "Bad Display Configuration Block "
-                                       "signature (%08X)\n", sig);
-                       return -EINVAL;
-               }
-       } else if (dcb->version >= 0x15) { /* some NV11 and NV20 */
-               char sig[8] = { 0 };
+               NV_TRACEWARN(dev, "DCB outp %02d: %08x %08x\n", idx, conn, conf);
 
-               strncpy(sig, (char *)&dcbtable[-7], 7);
-               i2ctabptr = ROM16(dcbtable[2]);
-               recordlength = 10;
-               confofs = 6;
+               if (dcb->version >= 0x20)
+                       ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
+               else
+                       ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
+               if (!ret)
+                       return 1; /* stop parsing */
 
-               if (strcmp(sig, "DEV_REC")) {
-                       NV_ERROR(dev, "Bad Display Configuration Block "
-                                       "signature (%s)\n", sig);
-                       return -EINVAL;
-               }
-       } else {
-               /*
-                * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but always
-                * has the same single (crt) entry, even when tv-out present, so
-                * the conclusion is this version cannot really be used.
-                * v1.2 tables (some NV6/10, and NV15+) normally have the same
-                * 5 entries, which are not specific to the card and so no use.
-                * v1.2 does have an I2C table that read_dcb_i2c_table can
-                * handle, but cards exist (nv11 in #14821) with a bad i2c table
-                * pointer, so use the indices parsed in parse_bmp_structure.
-                * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
+               /* Ignore the I2C index for on-chip TV-out, as there
+                * are cards with bogus values (nv31m in bug 23212),
+                * and it's otherwise useless.
                 */
-               NV_TRACEWARN(dev, "No useful information in BIOS output table; "
-                                 "adding all possible outputs\n");
-               fabricate_dcb_encoder_table(dev, bios);
-               return 0;
+               if (entry->type == OUTPUT_TV &&
+                   entry->location == DCB_LOC_ON_CHIP)
+                       entry->i2c_index = 0x0f;
        }
 
-       if (!i2ctabptr)
-               NV_WARN(dev, "No pointer to DCB I2C port table\n");
-       else {
-               dcb->i2c_table = &bios->data[i2ctabptr];
-               if (dcb->version >= 0x30)
-                       dcb->i2c_default_indices = dcb->i2c_table[4];
+       return 0;
+}
 
-               /*
-                * Parse the "management" I2C bus, used for hardware
-                * monitoring and some external TMDS transmitters.
-                */
-               if (dcb->version >= 0x22) {
-                       int idx = (dcb->version >= 0x40 ?
-                                  dcb->i2c_default_indices & 0xf :
-                                  2);
+static void
+dcb_fake_connectors(struct nvbios *bios)
+{
+       struct dcb_table *dcbt = &bios->dcb;
+       u8 map[16] = { };
+       int i, idx = 0;
 
-                       read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
-                                          idx, &dcb->i2c[idx]);
-               }
+       /* heuristic: if we ever get a non-zero connector field, assume
+        * that all the indices are valid and we don't need fake them.
+        */
+       for (i = 0; i < dcbt->entries; i++) {
+               if (dcbt->entry[i].connector)
+                       return;
        }
 
-       if (entries > DCB_MAX_NUM_ENTRIES)
-               entries = DCB_MAX_NUM_ENTRIES;
-
-       for (i = 0; i < entries; i++) {
-               uint32_t connection, config = 0;
-
-               connection = ROM32(dcbtable[headerlen + recordlength * i]);
-               if (configblock)
-                       config = ROM32(dcbtable[headerlen + confofs + recordlength * i]);
-
-               /* seen on an NV11 with DCB v1.5 */
-               if (connection == 0x00000000)
-                       break;
+       /* no useful connector info available, we need to make it up
+        * ourselves.  the rule here is: anything on the same i2c bus
+        * is considered to be on the same connector.  any output
+        * without an associated i2c bus is assigned its own unique
+        * connector index.
+        */
+       for (i = 0; i < dcbt->entries; i++) {
+               u8 i2c = dcbt->entry[i].i2c_index;
+               if (i2c == 0x0f) {
+                       dcbt->entry[i].connector = idx++;
+               } else {
+                       if (!map[i2c])
+                               map[i2c] = ++idx;
+                       dcbt->entry[i].connector = map[i2c] - 1;
+               }
+       }
 
-               /* seen on an NV17 with DCB v2.0 */
-               if (connection == 0xffffffff)
-                       break;
+       /* if we created more than one connector, destroy the connector
+        * table - just in case it has random, rather than stub, entries.
+        */
+       if (i > 1) {
+               u8 *conntab = dcb_conntab(bios->dev);
+               if (conntab)
+                       conntab[0] = 0x00;
+       }
+}
 
-               if ((connection & 0x0000000f) == 0x0000000f)
-                       continue;
+static int
+parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
+{
+       struct dcb_table *dcb = &bios->dcb;
+       u8 *dcbt, *conn;
+       int idx;
+
+       dcbt = dcb_table(dev);
+       if (!dcbt) {
+               /* handle pre-DCB boards */
+               if (bios->type == NVBIOS_BMP) {
+                       fabricate_dcb_encoder_table(dev, bios);
+                       return 0;
+               }
 
-               if (!apply_dcb_encoder_quirks(dev, i, &connection, &config))
-                       continue;
+               return -EINVAL;
+       }
 
-               NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
-                            dcb->entries, connection, config);
+       NV_TRACE(dev, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf);
 
-               if (!parse_dcb_entry(dev, dcb, connection, config))
-                       break;
-       }
+       dcb->version = dcbt[0];
+       dcb_outp_foreach(dev, NULL, parse_dcb_entry);
 
        /*
         * apart for v2.1+ not being known for requiring merging, this
@@ -6590,77 +6183,19 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
        if (!dcb->entries)
                return -ENXIO;
 
-       parse_dcb_gpio_table(bios);
-       parse_dcb_connector_table(bios);
-       return 0;
-}
-
-static void
-fixup_legacy_connector(struct nvbios *bios)
-{
-       struct dcb_table *dcb = &bios->dcb;
-       int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { };
-
-       /*
-        * DCB 3.0 also has the table in most cases, but there are some cards
-        * where the table is filled with stub entries, and the DCB entriy
-        * indices are all 0.  We don't need the connector indices on pre-G80
-        * chips (yet?) so limit the use to DCB 4.0 and above.
-        */
-       if (dcb->version >= 0x40)
-               return;
-
-       dcb->connector.entries = 0;
-
-       /*
-        * No known connector info before v3.0, so make it up.  the rule here
-        * is: anything on the same i2c bus is considered to be on the same
-        * connector.  any output without an associated i2c bus is assigned
-        * its own unique connector index.
-        */
-       for (i = 0; i < dcb->entries; i++) {
-               /*
-                * Ignore the I2C index for on-chip TV-out, as there
-                * are cards with bogus values (nv31m in bug 23212),
-                * and it's otherwise useless.
-                */
-               if (dcb->entry[i].type == OUTPUT_TV &&
-                   dcb->entry[i].location == DCB_LOC_ON_CHIP)
-                       dcb->entry[i].i2c_index = 0xf;
-               i2c = dcb->entry[i].i2c_index;
-
-               if (i2c_conn[i2c]) {
-                       dcb->entry[i].connector = i2c_conn[i2c] - 1;
-                       continue;
+       /* dump connector table entries to log, if any exist */
+       idx = -1;
+       while ((conn = dcb_conn(dev, ++idx))) {
+               if (conn[0] != 0xff) {
+                       NV_TRACE(dev, "DCB conn %02d: ", idx);
+                       if (dcb_conntab(dev)[3] < 4)
+                               printk("%04x\n", ROM16(conn[0]));
+                       else
+                               printk("%08x\n", ROM32(conn[0]));
                }
-
-               dcb->entry[i].connector = dcb->connector.entries++;
-               if (i2c != 0xf)
-                       i2c_conn[i2c] = dcb->connector.entries;
-       }
-
-       /* Fake the connector table as well as just connector indices */
-       for (i = 0; i < dcb->connector.entries; i++) {
-               dcb->connector.entry[i].index = i;
-               dcb->connector.entry[i].type = divine_connector_type(bios, i);
-               dcb->connector.entry[i].gpio_tag = 0xff;
-       }
-}
-
-static void
-fixup_legacy_i2c(struct nvbios *bios)
-{
-       struct dcb_table *dcb = &bios->dcb;
-       int i;
-
-       for (i = 0; i < dcb->entries; i++) {
-               if (dcb->entry[i].i2c_index == LEGACY_I2C_CRT)
-                       dcb->entry[i].i2c_index = bios->legacy.i2c_indices.crt;
-               if (dcb->entry[i].i2c_index == LEGACY_I2C_PANEL)
-                       dcb->entry[i].i2c_index = bios->legacy.i2c_indices.panel;
-               if (dcb->entry[i].i2c_index == LEGACY_I2C_TV)
-                       dcb->entry[i].i2c_index = bios->legacy.i2c_indices.tv;
        }
+       dcb_fake_connectors(bios);
+       return 0;
 }
 
 static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry)
@@ -6879,19 +6414,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
        return ret;
 }
 
-static void
-nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nvbios *bios = &dev_priv->vbios;
-       struct dcb_i2c_entry *entry;
-       int i;
-
-       entry = &bios->dcb.i2c[0];
-       for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
-               nouveau_i2c_fini(dev, entry);
-}
-
 static bool
 nouveau_bios_posted(struct drm_device *dev)
 {
@@ -6928,12 +6450,17 @@ nouveau_bios_init(struct drm_device *dev)
        if (ret)
                return ret;
 
-       ret = parse_dcb_table(dev, bios);
+       ret = nouveau_i2c_init(dev);
        if (ret)
                return ret;
 
-       fixup_legacy_i2c(bios);
-       fixup_legacy_connector(bios);
+       ret = nouveau_mxm_init(dev);
+       if (ret)
+               return ret;
+
+       ret = parse_dcb_table(dev, bios);
+       if (ret)
+               return ret;
 
        if (!bios->major_version)       /* we don't run version 0 bios */
                return 0;
@@ -6971,5 +6498,6 @@ nouveau_bios_init(struct drm_device *dev)
 void
 nouveau_bios_takedown(struct drm_device *dev)
 {
-       nouveau_bios_i2c_devices_takedown(dev);
+       nouveau_mxm_fini(dev);
+       nouveau_i2c_fini(dev);
 }
index 8adb69e4a6b124f2034e0e80ed0ac97ca13a1b9f..1e382ad5a2b85b04fb6f8b0953fe793eee44f615 100644 (file)
 
 #define DCB_LOC_ON_CHIP 0
 
-#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
-#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
-#define ROMPTR(bios, x) (ROM16(x) ? &(bios)->data[ROM16(x)] : NULL)
+#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
+#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
+#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
+#define ROM64(x) le64_to_cpu(*(u64 *)&(x))
+#define ROMPTR(d,x) ({            \
+       struct drm_nouveau_private *dev_priv = (d)->dev_private; \
+       ROM16(x) ? &dev_priv->vbios.data[ROM16(x)] : NULL; \
+})
 
 struct bit_entry {
        uint8_t  id;
@@ -48,30 +53,12 @@ struct bit_entry {
 
 int bit_table(struct drm_device *, u8 id, struct bit_entry *);
 
-struct dcb_i2c_entry {
-       uint32_t entry;
-       uint8_t port_type;
-       uint8_t read, write;
-       struct nouveau_i2c_chan *chan;
-};
-
 enum dcb_gpio_tag {
        DCB_GPIO_TVDAC0 = 0xc,
        DCB_GPIO_TVDAC1 = 0x2d,
-};
-
-struct dcb_gpio_entry {
-       enum dcb_gpio_tag tag;
-       int line;
-       bool invert;
-       uint32_t entry;
-       uint8_t state_default;
-       uint8_t state[2];
-};
-
-struct dcb_gpio_table {
-       int entries;
-       struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
+       DCB_GPIO_PWM_FAN = 0x9,
+       DCB_GPIO_FAN_SENSE = 0x3d,
+       DCB_GPIO_UNUSED = 0xff
 };
 
 enum dcb_connector_type {
@@ -90,20 +77,6 @@ enum dcb_connector_type {
        DCB_CONNECTOR_NONE = 0xff
 };
 
-struct dcb_connector_table_entry {
-       uint8_t index;
-       uint32_t entry;
-       enum dcb_connector_type type;
-       uint8_t index2;
-       uint8_t gpio_tag;
-       void *drm;
-};
-
-struct dcb_connector_table {
-       int entries;
-       struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
-};
-
 enum dcb_type {
        OUTPUT_ANALOG = 0,
        OUTPUT_TV = 1,
@@ -111,6 +84,7 @@ enum dcb_type {
        OUTPUT_LVDS = 3,
        OUTPUT_DP = 6,
        OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
+       OUTPUT_UNUSED = 15,
        OUTPUT_ANY = -1
 };
 
@@ -155,18 +129,8 @@ struct dcb_entry {
 
 struct dcb_table {
        uint8_t version;
-
        int entries;
        struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
-
-       uint8_t *i2c_table;
-       uint8_t i2c_default_indices;
-       struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
-
-       uint16_t gpio_table_ptr;
-       struct dcb_gpio_table gpio;
-       uint16_t connector_table_ptr;
-       struct dcb_connector_table connector;
 };
 
 enum nouveau_or {
@@ -195,7 +159,7 @@ enum pll_types {
        PLL_SHADER = 0x02,
        PLL_UNK03  = 0x03,
        PLL_MEMORY = 0x04,
-       PLL_UNK05  = 0x05,
+       PLL_VDEC   = 0x05,
        PLL_UNK40  = 0x40,
        PLL_UNK41  = 0x41,
        PLL_UNK42  = 0x42,
@@ -333,4 +297,11 @@ struct nvbios {
        } legacy;
 };
 
+void *dcb_table(struct drm_device *);
+void *dcb_outp(struct drm_device *, u8 idx);
+int dcb_outp_foreach(struct drm_device *, void *data,
+                    int (*)(struct drm_device *, void *, int idx, u8 *outp));
+u8 *dcb_conntab(struct drm_device *);
+u8 *dcb_conn(struct drm_device *, u8 idx);
+
 #endif
index 7cc37e69086012594a1dff3a2e255f66e396e656..724b41a2b9e9414b27ef6c4ea347a116657c9026 100644 (file)
@@ -28,6 +28,7 @@
  */
 
 #include "drmP.h"
+#include "ttm/ttm_page_alloc.h"
 
 #include "nouveau_drm.h"
 #include "nouveau_drv.h"
@@ -92,6 +93,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_bo *nvbo;
+       size_t acc_size;
        int ret;
 
        nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
@@ -114,9 +116,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
        nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
        nouveau_bo_placement_set(nvbo, flags, 0);
 
+       acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
+                                      sizeof(struct nouveau_bo));
+
        ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
                          ttm_bo_type_device, &nvbo->placement,
-                         align >> PAGE_SHIFT, 0, false, NULL, size,
+                         align >> PAGE_SHIFT, 0, false, NULL, acc_size,
                          nouveau_bo_del_ttm);
        if (ret) {
                /* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -343,8 +348,10 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
                *mem = val;
 }
 
-static struct ttm_backend *
-nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
+static struct ttm_tt *
+nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
+                     unsigned long size, uint32_t page_flags,
+                     struct page *dummy_read_page)
 {
        struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
        struct drm_device *dev = dev_priv->dev;
@@ -352,11 +359,13 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
        switch (dev_priv->gart_info.type) {
 #if __OS_HAS_AGP
        case NOUVEAU_GART_AGP:
-               return ttm_agp_backend_init(bdev, dev->agp->bridge);
+               return ttm_agp_tt_create(bdev, dev->agp->bridge,
+                                        size, page_flags, dummy_read_page);
 #endif
        case NOUVEAU_GART_PDMA:
        case NOUVEAU_GART_HW:
-               return nouveau_sgdma_init_ttm(dev);
+               return nouveau_sgdma_create_ttm(bdev, size, page_flags,
+                                               dummy_read_page);
        default:
                NV_ERROR(dev, "Unknown GART type %d\n",
                         dev_priv->gart_info.type);
@@ -673,8 +682,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
        if (mem->mem_type == TTM_PL_VRAM)
                nouveau_vm_map(vma, node);
        else
-               nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
-                                 node, node->pages);
+               nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
 
        return 0;
 }
@@ -801,19 +809,18 @@ out:
 static void
 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
 {
-       struct nouveau_mem *node = new_mem->mm_node;
        struct nouveau_bo *nvbo = nouveau_bo(bo);
        struct nouveau_vma *vma;
 
        list_for_each_entry(vma, &nvbo->vma_list, head) {
-               if (new_mem->mem_type == TTM_PL_VRAM) {
+               if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
                        nouveau_vm_map(vma, new_mem->mm_node);
                } else
-               if (new_mem->mem_type == TTM_PL_TT &&
+               if (new_mem && new_mem->mem_type == TTM_PL_TT &&
                    nvbo->page_shift == vma->vm->spg_shift) {
                        nouveau_vm_map_sg(vma, 0, new_mem->
                                          num_pages << PAGE_SHIFT,
-                                         node, node->pages);
+                                         new_mem->mm_node);
                } else {
                        nouveau_vm_unmap(vma);
                }
@@ -1044,8 +1051,94 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
        nouveau_fence_unref(&old_fence);
 }
 
+static int
+nouveau_ttm_tt_populate(struct ttm_tt *ttm)
+{
+       struct ttm_dma_tt *ttm_dma = (void *)ttm;
+       struct drm_nouveau_private *dev_priv;
+       struct drm_device *dev;
+       unsigned i;
+       int r;
+
+       if (ttm->state != tt_unpopulated)
+               return 0;
+
+       dev_priv = nouveau_bdev(ttm->bdev);
+       dev = dev_priv->dev;
+
+#if __OS_HAS_AGP
+       if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+               return ttm_agp_tt_populate(ttm);
+       }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+       if (swiotlb_nr_tbl()) {
+               return ttm_dma_populate((void *)ttm, dev->dev);
+       }
+#endif
+
+       r = ttm_pool_populate(ttm);
+       if (r) {
+               return r;
+       }
+
+       for (i = 0; i < ttm->num_pages; i++) {
+               ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
+                                                  0, PAGE_SIZE,
+                                                  PCI_DMA_BIDIRECTIONAL);
+               if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
+                       while (--i) {
+                               pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
+                                              PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+                               ttm_dma->dma_address[i] = 0;
+                       }
+                       ttm_pool_unpopulate(ttm);
+                       return -EFAULT;
+               }
+       }
+       return 0;
+}
+
+static void
+nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+       struct ttm_dma_tt *ttm_dma = (void *)ttm;
+       struct drm_nouveau_private *dev_priv;
+       struct drm_device *dev;
+       unsigned i;
+
+       dev_priv = nouveau_bdev(ttm->bdev);
+       dev = dev_priv->dev;
+
+#if __OS_HAS_AGP
+       if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+               ttm_agp_tt_unpopulate(ttm);
+               return;
+       }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+       if (swiotlb_nr_tbl()) {
+               ttm_dma_unpopulate((void *)ttm, dev->dev);
+               return;
+       }
+#endif
+
+       for (i = 0; i < ttm->num_pages; i++) {
+               if (ttm_dma->dma_address[i]) {
+                       pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
+                                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+               }
+       }
+
+       ttm_pool_unpopulate(ttm);
+}
+
 struct ttm_bo_driver nouveau_bo_driver = {
-       .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
+       .ttm_tt_create = &nouveau_ttm_tt_create,
+       .ttm_tt_populate = &nouveau_ttm_tt_populate,
+       .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
        .invalidate_caches = nouveau_bo_invalidate_caches,
        .init_mem_type = nouveau_bo_init_mem_type,
        .evict_flags = nouveau_bo_evict_flags,
@@ -1091,7 +1184,7 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
                nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
        else
        if (nvbo->bo.mem.mem_type == TTM_PL_TT)
-               nouveau_vm_map_sg(vma, 0, size, node, node->pages);
+               nouveau_vm_map_sg(vma, 0, size, node);
 
        list_add_tail(&vma->head, &nvbo->vma_list);
        vma->refcount = 1;
index bb6ec9ef8676a6300f6de6fc221cc5b5c7c7199e..a018defb7621fd2e1c91e45f271485bebd9b17c5 100644 (file)
@@ -187,6 +187,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
        nouveau_dma_pre_init(chan);
        chan->user_put = 0x40;
        chan->user_get = 0x44;
+       if (dev_priv->card_type >= NV_50)
+                chan->user_get_hi = 0x60;
 
        /* disable the fifo caches */
        pfifo->reassign(dev, false);
index cea6696b19064e48a7c4f1e94bef793377962db8..f3ce34be082a7d5ad82b651de6978e2f63111ce2 100644 (file)
@@ -35,6 +35,7 @@
 #include "nouveau_encoder.h"
 #include "nouveau_crtc.h"
 #include "nouveau_connector.h"
+#include "nouveau_gpio.h"
 #include "nouveau_hw.h"
 
 static void nouveau_connector_hotplug(void *, int);
@@ -78,29 +79,11 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
        return NULL;
 }
 
-/*TODO: This could use improvement, and learn to handle the fixed
- *      BIOS tables etc.  It's fine currently, for its only user.
- */
-int
-nouveau_connector_bpp(struct drm_connector *connector)
-{
-       struct nouveau_connector *nv_connector = nouveau_connector(connector);
-
-       if (nv_connector->edid && nv_connector->edid->revision >= 4) {
-               u8 bpc = ((nv_connector->edid->input & 0x70) >> 3) + 4;
-               if (bpc > 4)
-                       return bpc;
-       }
-
-       return 18;
-}
-
 static void
 nouveau_connector_destroy(struct drm_connector *connector)
 {
        struct nouveau_connector *nv_connector = nouveau_connector(connector);
        struct drm_nouveau_private *dev_priv;
-       struct nouveau_gpio_engine *pgpio;
        struct drm_device *dev;
 
        if (!nv_connector)
@@ -110,10 +93,9 @@ nouveau_connector_destroy(struct drm_connector *connector)
        dev_priv = dev->dev_private;
        NV_DEBUG_KMS(dev, "\n");
 
-       pgpio = &dev_priv->engine.gpio;
-       if (pgpio->irq_unregister) {
-               pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
-                                     nouveau_connector_hotplug, connector);
+       if (nv_connector->hpd != DCB_GPIO_UNUSED) {
+               nouveau_gpio_isr_del(dev, 0, nv_connector->hpd, 0xff,
+                                    nouveau_connector_hotplug, connector);
        }
 
        kfree(nv_connector->edid);
@@ -198,6 +180,10 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
                return;
        nv_connector->detected_encoder = nv_encoder;
 
+       if (dev_priv->card_type >= NV_50) {
+               connector->interlace_allowed = true;
+               connector->doublescan_allowed = true;
+       } else
        if (nv_encoder->dcb->type == OUTPUT_LVDS ||
            nv_encoder->dcb->type == OUTPUT_TMDS) {
                connector->doublescan_allowed = false;
@@ -214,7 +200,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
                        connector->interlace_allowed = true;
        }
 
-       if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
+       if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
                drm_connector_property_set_value(connector,
                        dev->mode_config.dvi_i_subconnector_property,
                        nv_encoder->dcb->type == OUTPUT_TMDS ?
@@ -397,7 +383,7 @@ nouveau_connector_force(struct drm_connector *connector)
        struct nouveau_encoder *nv_encoder;
        int type;
 
-       if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
+       if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
                if (connector->force == DRM_FORCE_ON_DIGITAL)
                        type = OUTPUT_TMDS;
                else
@@ -420,15 +406,21 @@ static int
 nouveau_connector_set_property(struct drm_connector *connector,
                               struct drm_property *property, uint64_t value)
 {
+       struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+       struct nouveau_display_engine *disp = &dev_priv->engine.display;
        struct nouveau_connector *nv_connector = nouveau_connector(connector);
        struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
        struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
        struct drm_device *dev = connector->dev;
+       struct nouveau_crtc *nv_crtc;
        int ret;
 
+       nv_crtc = NULL;
+       if (connector->encoder && connector->encoder->crtc)
+               nv_crtc = nouveau_crtc(connector->encoder->crtc);
+
        /* Scaling mode */
        if (property == dev->mode_config.scaling_mode_property) {
-               struct nouveau_crtc *nv_crtc = NULL;
                bool modeset = false;
 
                switch (value) {
@@ -454,8 +446,6 @@ nouveau_connector_set_property(struct drm_connector *connector,
                        modeset = true;
                nv_connector->scaling_mode = value;
 
-               if (connector->encoder && connector->encoder->crtc)
-                       nv_crtc = nouveau_crtc(connector->encoder->crtc);
                if (!nv_crtc)
                        return 0;
 
@@ -467,7 +457,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
                        if (!ret)
                                return -EINVAL;
                } else {
-                       ret = nv_crtc->set_scale(nv_crtc, value, true);
+                       ret = nv_crtc->set_scale(nv_crtc, true);
                        if (ret)
                                return ret;
                }
@@ -475,23 +465,58 @@ nouveau_connector_set_property(struct drm_connector *connector,
                return 0;
        }
 
-       /* Dithering */
-       if (property == dev->mode_config.dithering_mode_property) {
-               struct nouveau_crtc *nv_crtc = NULL;
+       /* Underscan */
+       if (property == disp->underscan_property) {
+               if (nv_connector->underscan != value) {
+                       nv_connector->underscan = value;
+                       if (!nv_crtc || !nv_crtc->set_scale)
+                               return 0;
 
-               if (value == DRM_MODE_DITHERING_ON)
-                       nv_connector->use_dithering = true;
-               else
-                       nv_connector->use_dithering = false;
+                       return nv_crtc->set_scale(nv_crtc, true);
+               }
+
+               return 0;
+       }
+
+       if (property == disp->underscan_hborder_property) {
+               if (nv_connector->underscan_hborder != value) {
+                       nv_connector->underscan_hborder = value;
+                       if (!nv_crtc || !nv_crtc->set_scale)
+                               return 0;
+
+                       return nv_crtc->set_scale(nv_crtc, true);
+               }
+
+               return 0;
+       }
+
+       if (property == disp->underscan_vborder_property) {
+               if (nv_connector->underscan_vborder != value) {
+                       nv_connector->underscan_vborder = value;
+                       if (!nv_crtc || !nv_crtc->set_scale)
+                               return 0;
+
+                       return nv_crtc->set_scale(nv_crtc, true);
+               }
+
+               return 0;
+       }
+
+       /* Dithering */
+       if (property == disp->dithering_mode) {
+               nv_connector->dithering_mode = value;
+               if (!nv_crtc || !nv_crtc->set_dither)
+                       return 0;
 
-               if (connector->encoder && connector->encoder->crtc)
-                       nv_crtc = nouveau_crtc(connector->encoder->crtc);
+               return nv_crtc->set_dither(nv_crtc, true);
+       }
 
+       if (property == disp->dithering_depth) {
+               nv_connector->dithering_depth = value;
                if (!nv_crtc || !nv_crtc->set_dither)
                        return 0;
 
-               return nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering,
-                                          true);
+               return nv_crtc->set_dither(nv_crtc, true);
        }
 
        if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
@@ -602,6 +627,46 @@ nouveau_connector_scaler_modes_add(struct drm_connector *connector)
        return modes;
 }
 
+static void
+nouveau_connector_detect_depth(struct drm_connector *connector)
+{
+       struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+       struct nouveau_connector *nv_connector = nouveau_connector(connector);
+       struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+       struct nvbios *bios = &dev_priv->vbios;
+       struct drm_display_mode *mode = nv_connector->native_mode;
+       bool duallink;
+
+       /* if the edid is feeling nice enough to provide this info, use it */
+       if (nv_connector->edid && connector->display_info.bpc)
+               return;
+
+       /* if not, we're out of options unless we're LVDS, default to 6bpc */
+       connector->display_info.bpc = 6;
+       if (nv_encoder->dcb->type != OUTPUT_LVDS)
+               return;
+
+       /* LVDS: panel straps */
+       if (bios->fp_no_ddc) {
+               if (bios->fp.if_is_24bit)
+                       connector->display_info.bpc = 8;
+               return;
+       }
+
+       /* LVDS: DDC panel, need to first determine the number of links to
+        * know which if_is_24bit flag to check...
+        */
+       if (nv_connector->edid &&
+           nv_connector->type == DCB_CONNECTOR_LVDS_SPWG)
+               duallink = ((u8 *)nv_connector->edid)[121] == 2;
+       else
+               duallink = mode->clock >= bios->fp.duallink_transition_clk;
+
+       if ((!duallink && (bios->fp.strapless_is_24bit & 1)) ||
+           ( duallink && (bios->fp.strapless_is_24bit & 2)))
+               connector->display_info.bpc = 8;
+}
+
 static int
 nouveau_connector_get_modes(struct drm_connector *connector)
 {
@@ -631,6 +696,12 @@ nouveau_connector_get_modes(struct drm_connector *connector)
                nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
        }
 
+       /* Determine display colour depth for everything except LVDS now,
+        * DP requires this before mode_valid() is called.
+        */
+       if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+               nouveau_connector_detect_depth(connector);
+
        /* Find the native mode if this is a digital panel, if we didn't
         * find any modes through DDC previously add the native mode to
         * the list of modes.
@@ -646,12 +717,19 @@ nouveau_connector_get_modes(struct drm_connector *connector)
                ret = 1;
        }
 
+       /* Determine LVDS colour depth, must happen after determining
+        * "native" mode as some VBIOS tables require us to use the
+        * pixel clock as part of the lookup...
+        */
+       if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+               nouveau_connector_detect_depth(connector);
+
        if (nv_encoder->dcb->type == OUTPUT_TV)
                ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
 
-       if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS ||
-           nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG ||
-           nv_connector->dcb->type == DCB_CONNECTOR_eDP)
+       if (nv_connector->type == DCB_CONNECTOR_LVDS ||
+           nv_connector->type == DCB_CONNECTOR_LVDS_SPWG ||
+           nv_connector->type == DCB_CONNECTOR_eDP)
                ret += nouveau_connector_scaler_modes_add(connector);
 
        return ret;
@@ -710,7 +788,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
        case OUTPUT_DP:
                max_clock  = nv_encoder->dp.link_nr;
                max_clock *= nv_encoder->dp.link_bw;
-               clock = clock * nouveau_connector_bpp(connector) / 10;
+               clock = clock * (connector->display_info.bpc * 3) / 10;
                break;
        default:
                BUG_ON(1);
@@ -768,96 +846,175 @@ nouveau_connector_funcs_lvds = {
        .force = nouveau_connector_force
 };
 
+static int
+drm_conntype_from_dcb(enum dcb_connector_type dcb)
+{
+       switch (dcb) {
+       case DCB_CONNECTOR_VGA      : return DRM_MODE_CONNECTOR_VGA;
+       case DCB_CONNECTOR_TV_0     :
+       case DCB_CONNECTOR_TV_1     :
+       case DCB_CONNECTOR_TV_3     : return DRM_MODE_CONNECTOR_TV;
+       case DCB_CONNECTOR_DVI_I    : return DRM_MODE_CONNECTOR_DVII;
+       case DCB_CONNECTOR_DVI_D    : return DRM_MODE_CONNECTOR_DVID;
+       case DCB_CONNECTOR_LVDS     :
+       case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS;
+       case DCB_CONNECTOR_DP       : return DRM_MODE_CONNECTOR_DisplayPort;
+       case DCB_CONNECTOR_eDP      : return DRM_MODE_CONNECTOR_eDP;
+       case DCB_CONNECTOR_HDMI_0   :
+       case DCB_CONNECTOR_HDMI_1   : return DRM_MODE_CONNECTOR_HDMIA;
+       default:
+               break;
+       }
+
+       return DRM_MODE_CONNECTOR_Unknown;
+}
+
 struct drm_connector *
 nouveau_connector_create(struct drm_device *dev, int index)
 {
        const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+       struct nouveau_display_engine *disp = &dev_priv->engine.display;
        struct nouveau_connector *nv_connector = NULL;
-       struct dcb_connector_table_entry *dcb = NULL;
        struct drm_connector *connector;
        int type, ret = 0;
+       bool dummy;
 
        NV_DEBUG_KMS(dev, "\n");
 
-       if (index >= dev_priv->vbios.dcb.connector.entries)
-               return ERR_PTR(-EINVAL);
-
-       dcb = &dev_priv->vbios.dcb.connector.entry[index];
-       if (dcb->drm)
-               return dcb->drm;
-
-       switch (dcb->type) {
-       case DCB_CONNECTOR_VGA:
-               type = DRM_MODE_CONNECTOR_VGA;
-               break;
-       case DCB_CONNECTOR_TV_0:
-       case DCB_CONNECTOR_TV_1:
-       case DCB_CONNECTOR_TV_3:
-               type = DRM_MODE_CONNECTOR_TV;
-               break;
-       case DCB_CONNECTOR_DVI_I:
-               type = DRM_MODE_CONNECTOR_DVII;
-               break;
-       case DCB_CONNECTOR_DVI_D:
-               type = DRM_MODE_CONNECTOR_DVID;
-               break;
-       case DCB_CONNECTOR_HDMI_0:
-       case DCB_CONNECTOR_HDMI_1:
-               type = DRM_MODE_CONNECTOR_HDMIA;
-               break;
-       case DCB_CONNECTOR_LVDS:
-       case DCB_CONNECTOR_LVDS_SPWG:
-               type = DRM_MODE_CONNECTOR_LVDS;
-               funcs = &nouveau_connector_funcs_lvds;
-               break;
-       case DCB_CONNECTOR_DP:
-               type = DRM_MODE_CONNECTOR_DisplayPort;
-               break;
-       case DCB_CONNECTOR_eDP:
-               type = DRM_MODE_CONNECTOR_eDP;
-               break;
-       default:
-               NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
-               return ERR_PTR(-EINVAL);
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               nv_connector = nouveau_connector(connector);
+               if (nv_connector->index == index)
+                       return connector;
        }
 
        nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
        if (!nv_connector)
                return ERR_PTR(-ENOMEM);
-       nv_connector->dcb = dcb;
+
        connector = &nv_connector->base;
+       nv_connector->index = index;
+
+       /* attempt to parse vbios connector type and hotplug gpio */
+       nv_connector->dcb = dcb_conn(dev, index);
+       if (nv_connector->dcb) {
+               static const u8 hpd[16] = {
+                       0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
+               };
+
+               u32 entry = ROM16(nv_connector->dcb[0]);
+               if (dcb_conntab(dev)[3] >= 4)
+                       entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
+
+               nv_connector->hpd = ffs((entry & 0x07033000) >> 12);
+               nv_connector->hpd = hpd[nv_connector->hpd];
+
+               nv_connector->type = nv_connector->dcb[0];
+               if (drm_conntype_from_dcb(nv_connector->type) ==
+                                         DRM_MODE_CONNECTOR_Unknown) {
+                       NV_WARN(dev, "unknown connector type %02x\n",
+                               nv_connector->type);
+                       nv_connector->type = DCB_CONNECTOR_NONE;
+               }
 
-       /* defaults, will get overridden in detect() */
-       connector->interlace_allowed = false;
-       connector->doublescan_allowed = false;
+               /* Gigabyte NX85T */
+               if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
+                       if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
+                               nv_connector->type = DCB_CONNECTOR_DVI_I;
+               }
 
-       drm_connector_init(dev, connector, funcs, type);
-       drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
+               /* Gigabyte GV-NX86T512H */
+               if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
+                       if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
+                               nv_connector->type = DCB_CONNECTOR_DVI_I;
+               }
+       } else {
+               nv_connector->type = DCB_CONNECTOR_NONE;
+               nv_connector->hpd = DCB_GPIO_UNUSED;
+       }
+
+       /* no vbios data, or an unknown dcb connector type - attempt to
+        * figure out something suitable ourselves
+        */
+       if (nv_connector->type == DCB_CONNECTOR_NONE) {
+               struct drm_nouveau_private *dev_priv = dev->dev_private;
+               struct dcb_table *dcbt = &dev_priv->vbios.dcb;
+               u32 encoders = 0;
+               int i;
+
+               for (i = 0; i < dcbt->entries; i++) {
+                       if (dcbt->entry[i].connector == nv_connector->index)
+                               encoders |= (1 << dcbt->entry[i].type);
+               }
 
-       /* Check if we need dithering enabled */
-       if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
-               bool dummy, is_24bit = false;
+               if (encoders & (1 << OUTPUT_DP)) {
+                       if (encoders & (1 << OUTPUT_TMDS))
+                               nv_connector->type = DCB_CONNECTOR_DP;
+                       else
+                               nv_connector->type = DCB_CONNECTOR_eDP;
+               } else
+               if (encoders & (1 << OUTPUT_TMDS)) {
+                       if (encoders & (1 << OUTPUT_ANALOG))
+                               nv_connector->type = DCB_CONNECTOR_DVI_I;
+                       else
+                               nv_connector->type = DCB_CONNECTOR_DVI_D;
+               } else
+               if (encoders & (1 << OUTPUT_ANALOG)) {
+                       nv_connector->type = DCB_CONNECTOR_VGA;
+               } else
+               if (encoders & (1 << OUTPUT_LVDS)) {
+                       nv_connector->type = DCB_CONNECTOR_LVDS;
+               } else
+               if (encoders & (1 << OUTPUT_TV)) {
+                       nv_connector->type = DCB_CONNECTOR_TV_0;
+               }
+       }
 
-               ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit);
+       type = drm_conntype_from_dcb(nv_connector->type);
+       if (type == DRM_MODE_CONNECTOR_LVDS) {
+               ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
                if (ret) {
-                       NV_ERROR(dev, "Error parsing LVDS table, disabling "
-                                "LVDS\n");
-                       goto fail;
+                       NV_ERROR(dev, "Error parsing LVDS table, disabling\n");
+                       kfree(nv_connector);
+                       return ERR_PTR(ret);
                }
 
-               nv_connector->use_dithering = !is_24bit;
+               funcs = &nouveau_connector_funcs_lvds;
+       } else {
+               funcs = &nouveau_connector_funcs;
        }
 
+       /* defaults, will get overridden in detect() */
+       connector->interlace_allowed = false;
+       connector->doublescan_allowed = false;
+
+       drm_connector_init(dev, connector, funcs, type);
+       drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
+
        /* Init DVI-I specific properties */
-       if (dcb->type == DCB_CONNECTOR_DVI_I) {
-               drm_mode_create_dvi_i_properties(dev);
+       if (nv_connector->type == DCB_CONNECTOR_DVI_I)
                drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
-               drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
+
+       /* Add overscan compensation options to digital outputs */
+       if (disp->underscan_property &&
+           (nv_connector->type == DCB_CONNECTOR_DVI_D ||
+            nv_connector->type == DCB_CONNECTOR_DVI_I ||
+            nv_connector->type == DCB_CONNECTOR_HDMI_0 ||
+            nv_connector->type == DCB_CONNECTOR_HDMI_1 ||
+            nv_connector->type == DCB_CONNECTOR_DP)) {
+               drm_connector_attach_property(connector,
+                                             disp->underscan_property,
+                                             UNDERSCAN_OFF);
+               drm_connector_attach_property(connector,
+                                             disp->underscan_hborder_property,
+                                             0);
+               drm_connector_attach_property(connector,
+                                             disp->underscan_vborder_property,
+                                             0);
        }
 
-       switch (dcb->type) {
+       switch (nv_connector->type) {
        case DCB_CONNECTOR_VGA:
                if (dev_priv->card_type >= NV_50) {
                        drm_connector_attach_property(connector,
@@ -876,32 +1033,32 @@ nouveau_connector_create(struct drm_device *dev, int index)
                drm_connector_attach_property(connector,
                                dev->mode_config.scaling_mode_property,
                                nv_connector->scaling_mode);
-               drm_connector_attach_property(connector,
-                               dev->mode_config.dithering_mode_property,
-                               nv_connector->use_dithering ?
-                               DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
+               if (disp->dithering_mode) {
+                       nv_connector->dithering_mode = DITHERING_MODE_AUTO;
+                       drm_connector_attach_property(connector,
+                                               disp->dithering_mode,
+                                               nv_connector->dithering_mode);
+               }
+               if (disp->dithering_depth) {
+                       nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
+                       drm_connector_attach_property(connector,
+                                               disp->dithering_depth,
+                                               nv_connector->dithering_depth);
+               }
                break;
        }
 
-       if (nv_connector->dcb->gpio_tag != 0xff && pgpio->irq_register) {
-               pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
-                                   nouveau_connector_hotplug, connector);
-
-               connector->polled = DRM_CONNECTOR_POLL_HPD;
-       } else {
-               connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+       if (nv_connector->hpd != DCB_GPIO_UNUSED) {
+               ret = nouveau_gpio_isr_add(dev, 0, nv_connector->hpd, 0xff,
+                                          nouveau_connector_hotplug,
+                                          connector);
+               if (ret == 0)
+                       connector->polled = DRM_CONNECTOR_POLL_HPD;
        }
 
        drm_sysfs_connector_add(connector);
-
-       dcb->drm = connector;
-       return dcb->drm;
-
-fail:
-       drm_connector_cleanup(connector);
-       kfree(connector);
-       return ERR_PTR(ret);
-
+       return connector;
 }
 
 static void
index 711b1e9203aff075c16757fd41394a5ea06d33e7..e4857021304c46f5ff4bf3d6e2fa9b6d8f3baf21 100644 (file)
 #include "drm_edid.h"
 #include "nouveau_i2c.h"
 
+enum nouveau_underscan_type {
+       UNDERSCAN_OFF,
+       UNDERSCAN_ON,
+       UNDERSCAN_AUTO,
+};
+
+/* the enum values specifically defined here match nv50/nvd0 hw values, and
+ * the code relies on this
+ */
+enum nouveau_dithering_mode {
+       DITHERING_MODE_OFF = 0x00,
+       DITHERING_MODE_ON = 0x01,
+       DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
+       DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
+       DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
+       DITHERING_MODE_AUTO
+};
+
+enum nouveau_dithering_depth {
+       DITHERING_DEPTH_6BPC = 0x00,
+       DITHERING_DEPTH_8BPC = 0x02,
+       DITHERING_DEPTH_AUTO
+};
+
 struct nouveau_connector {
        struct drm_connector base;
+       enum dcb_connector_type type;
+       u8 index;
+       u8 *dcb;
+       u8 hpd;
 
-       struct dcb_connector_table_entry *dcb;
-
+       int dithering_mode;
+       int dithering_depth;
        int scaling_mode;
-       bool use_dithering;
+       enum nouveau_underscan_type underscan;
+       u32 underscan_hborder;
+       u32 underscan_vborder;
 
        struct nouveau_encoder *detected_encoder;
        struct edid *edid;
index bf8e1289953de6550293252087d7de52018b6a10..686f6b4a1da344a01f42181c5e49413c6ebb1a7a 100644 (file)
@@ -32,8 +32,6 @@ struct nouveau_crtc {
 
        int index;
 
-       struct drm_display_mode *mode;
-
        uint32_t dpms_saved_fp_control;
        uint32_t fp_users;
        int saturation;
@@ -67,8 +65,8 @@ struct nouveau_crtc {
                int depth;
        } lut;
 
-       int (*set_dither)(struct nouveau_crtc *crtc, bool on, bool update);
-       int (*set_scale)(struct nouveau_crtc *crtc, int mode, bool update);
+       int (*set_dither)(struct nouveau_crtc *crtc, bool update);
+       int (*set_scale)(struct nouveau_crtc *crtc, bool update);
 };
 
 static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
index 8e1592368cce1985d3270f934473a58fb73f6a33..fa2ec491f6a7e56b40708a7a92eb7b9292666a35 100644 (file)
@@ -44,7 +44,7 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
        seq_printf(m, "channel id    : %d\n", chan->id);
 
        seq_printf(m, "cpu fifo state:\n");
-       seq_printf(m, "          base: 0x%08x\n", chan->pushbuf_base);
+       seq_printf(m, "          base: 0x%10llx\n", chan->pushbuf_base);
        seq_printf(m, "           max: 0x%08x\n", chan->dma.max << 2);
        seq_printf(m, "           cur: 0x%08x\n", chan->dma.cur << 2);
        seq_printf(m, "           put: 0x%08x\n", chan->dma.put << 2);
@@ -178,6 +178,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
        { "memory", nouveau_debugfs_memory_info, 0, NULL },
        { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
        { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
+       { "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
 };
 #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
 
index b12fd2c80812d002e0840787e8ff47481a3691a2..3cb52bc52b21101bbdf0a136b1cd4c2a639fb79e 100644 (file)
@@ -32,6 +32,8 @@
 #include "nouveau_hw.h"
 #include "nouveau_crtc.h"
 #include "nouveau_dma.h"
+#include "nouveau_connector.h"
+#include "nouveau_gpio.h"
 #include "nv50_display.h"
 
 static void
@@ -64,7 +66,7 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
 int
 nouveau_framebuffer_init(struct drm_device *dev,
                         struct nouveau_framebuffer *nv_fb,
-                        struct drm_mode_fb_cmd *mode_cmd,
+                        struct drm_mode_fb_cmd2 *mode_cmd,
                         struct nouveau_bo *nvbo)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -107,14 +109,14 @@ nouveau_framebuffer_init(struct drm_device *dev,
 
                if (!tile_flags) {
                        if (dev_priv->card_type < NV_D0)
-                               nv_fb->r_pitch = 0x00100000 | fb->pitch;
+                               nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
                        else
-                               nv_fb->r_pitch = 0x01000000 | fb->pitch;
+                               nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
                } else {
                        u32 mode = nvbo->tile_mode;
                        if (dev_priv->card_type >= NV_C0)
                                mode >>= 4;
-                       nv_fb->r_pitch = ((fb->pitch / 4) << 4) | mode;
+                       nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
                }
        }
 
@@ -124,13 +126,13 @@ nouveau_framebuffer_init(struct drm_device *dev,
 static struct drm_framebuffer *
 nouveau_user_framebuffer_create(struct drm_device *dev,
                                struct drm_file *file_priv,
-                               struct drm_mode_fb_cmd *mode_cmd)
+                               struct drm_mode_fb_cmd2 *mode_cmd)
 {
        struct nouveau_framebuffer *nouveau_fb;
        struct drm_gem_object *gem;
        int ret;
 
-       gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+       gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
        if (!gem)
                return ERR_PTR(-ENOENT);
 
@@ -147,11 +149,186 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
        return &nouveau_fb->base;
 }
 
-const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
+static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
        .fb_create = nouveau_user_framebuffer_create,
        .output_poll_changed = nouveau_fbcon_output_poll_changed,
 };
 
+
+struct drm_prop_enum_list {
+       u8 gen_mask;
+       int type;
+       char *name;
+};
+
+static struct drm_prop_enum_list underscan[] = {
+       { 6, UNDERSCAN_AUTO, "auto" },
+       { 6, UNDERSCAN_OFF, "off" },
+       { 6, UNDERSCAN_ON, "on" },
+       {}
+};
+
+static struct drm_prop_enum_list dither_mode[] = {
+       { 7, DITHERING_MODE_AUTO, "auto" },
+       { 7, DITHERING_MODE_OFF, "off" },
+       { 1, DITHERING_MODE_ON, "on" },
+       { 6, DITHERING_MODE_STATIC2X2, "static 2x2" },
+       { 6, DITHERING_MODE_DYNAMIC2X2, "dynamic 2x2" },
+       { 4, DITHERING_MODE_TEMPORAL, "temporal" },
+       {}
+};
+
+static struct drm_prop_enum_list dither_depth[] = {
+       { 6, DITHERING_DEPTH_AUTO, "auto" },
+       { 6, DITHERING_DEPTH_6BPC, "6 bpc" },
+       { 6, DITHERING_DEPTH_8BPC, "8 bpc" },
+       {}
+};
+
+#define PROP_ENUM(p,gen,n,list) do {                                           \
+       struct drm_prop_enum_list *l = (list);                                 \
+       int c = 0;                                                             \
+       while (l->gen_mask) {                                                  \
+               if (l->gen_mask & (1 << (gen)))                                \
+                       c++;                                                   \
+               l++;                                                           \
+       }                                                                      \
+       if (c) {                                                               \
+               p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c);        \
+               l = (list);                                                    \
+               c = 0;                                                         \
+               while (p && l->gen_mask) {                                     \
+                       if (l->gen_mask & (1 << (gen))) {                      \
+                               drm_property_add_enum(p, c, l->type, l->name); \
+                               c++;                                           \
+                       }                                                      \
+                       l++;                                                   \
+               }                                                              \
+       }                                                                      \
+} while(0)
+
+int
+nouveau_display_init(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_display_engine *disp = &dev_priv->engine.display;
+       struct drm_connector *connector;
+       int ret;
+
+       ret = disp->init(dev);
+       if (ret)
+               return ret;
+
+       drm_kms_helper_poll_enable(dev);
+
+       /* enable hotplug interrupts */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct nouveau_connector *conn = nouveau_connector(connector);
+               nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, true);
+       }
+
+       return ret;
+}
+
+void
+nouveau_display_fini(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_display_engine *disp = &dev_priv->engine.display;
+       struct drm_connector *connector;
+
+       /* disable hotplug interrupts */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct nouveau_connector *conn = nouveau_connector(connector);
+               nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, false);
+       }
+
+       drm_kms_helper_poll_disable(dev);
+       disp->fini(dev);
+}
+
+int
+nouveau_display_create(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_display_engine *disp = &dev_priv->engine.display;
+       int ret, gen;
+
+       drm_mode_config_init(dev);
+       drm_mode_create_scaling_mode_property(dev);
+       drm_mode_create_dvi_i_properties(dev);
+
+       if (dev_priv->card_type < NV_50)
+               gen = 0;
+       else
+       if (dev_priv->card_type < NV_D0)
+               gen = 1;
+       else
+               gen = 2;
+
+       PROP_ENUM(disp->dithering_mode, gen, "dithering mode", dither_mode);
+       PROP_ENUM(disp->dithering_depth, gen, "dithering depth", dither_depth);
+       PROP_ENUM(disp->underscan_property, gen, "underscan", underscan);
+
+       disp->underscan_hborder_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "underscan hborder", 2);
+       disp->underscan_hborder_property->values[0] = 0;
+       disp->underscan_hborder_property->values[1] = 128;
+
+       disp->underscan_vborder_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "underscan vborder", 2);
+       disp->underscan_vborder_property->values[0] = 0;
+       disp->underscan_vborder_property->values[1] = 128;
+
+       dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+       dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
+
+       dev->mode_config.min_width = 0;
+       dev->mode_config.min_height = 0;
+       if (dev_priv->card_type < NV_10) {
+               dev->mode_config.max_width = 2048;
+               dev->mode_config.max_height = 2048;
+       } else
+       if (dev_priv->card_type < NV_50) {
+               dev->mode_config.max_width = 4096;
+               dev->mode_config.max_height = 4096;
+       } else {
+               dev->mode_config.max_width = 8192;
+               dev->mode_config.max_height = 8192;
+       }
+
+       drm_kms_helper_poll_init(dev);
+       drm_kms_helper_poll_disable(dev);
+
+       ret = disp->create(dev);
+       if (ret)
+               return ret;
+
+       if (dev->mode_config.num_crtc) {
+               ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+               if (ret)
+                       return ret;
+       }
+
+       return ret;
+}
+
+void
+nouveau_display_destroy(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_display_engine *disp = &dev_priv->engine.display;
+
+       drm_vblank_cleanup(dev);
+
+       disp->destroy(dev);
+
+       drm_kms_helper_poll_fini(dev);
+       drm_mode_config_cleanup(dev);
+}
+
 int
 nouveau_vblank_enable(struct drm_device *dev, int crtc)
 {
@@ -294,7 +471,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        /* Initialize a page flip struct */
        *s = (struct nouveau_page_flip_state)
                { { }, event, nouveau_crtc(crtc)->index,
-                 fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
+                 fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
                  new_bo->bo.offset };
 
        /* Choose the channel the flip will be handled in */
@@ -305,7 +482,10 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 
        /* Emit a page flip */
        if (dev_priv->card_type >= NV_50) {
-               ret = nv50_display_flip_next(crtc, fb, chan);
+               if (dev_priv->card_type >= NV_D0)
+                       ret = nvd0_display_flip_next(crtc, fb, chan, 0);
+               else
+                       ret = nv50_display_flip_next(crtc, fb, chan);
                if (ret) {
                        nouveau_channel_put(&chan);
                        goto fail_unreserve;
index 00bc6eaad558069a2d59c392124a120e57ddc6e8..4c2e4e5925feb549a71826c81a11342d53167118 100644 (file)
@@ -134,11 +134,13 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
  *  -EBUSY if timeout exceeded
  */
 static inline int
-READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
+READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
 {
-       uint32_t val;
+       uint64_t val;
 
        val = nvchan_rd32(chan, chan->user_get);
+        if (chan->user_get_hi)
+                val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32;
 
        /* reset counter as long as GET is still advancing, this is
         * to avoid misdetecting a GPU lockup if the GPU happens to
@@ -218,8 +220,8 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
 static int
 nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
 {
-       uint32_t cnt = 0, prev_get = 0;
-       int ret;
+       uint64_t prev_get = 0;
+       int ret, cnt = 0;
 
        ret = nv50_dma_push_wait(chan, slots + 1);
        if (unlikely(ret))
@@ -261,8 +263,8 @@ nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
 int
 nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
 {
-       uint32_t prev_get = 0, cnt = 0;
-       int get;
+       uint64_t prev_get = 0;
+       int cnt = 0, get;
 
        if (chan->dma.ib_max)
                return nv50_dma_wait(chan, slots, size);
index de5efe71fefd21d67b063ad951d0aed9b81d23d4..9b93b703ceabaacaf9a3ba1f7326e77d1fe6a773 100644 (file)
@@ -29,6 +29,7 @@
 #include "nouveau_connector.h"
 #include "nouveau_encoder.h"
 #include "nouveau_crtc.h"
+#include "nouveau_gpio.h"
 
 /******************************************************************************
  * aux channel util functions
@@ -273,8 +274,6 @@ nouveau_dp_tu_update(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
 u8 *
 nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nvbios *bios = &dev_priv->vbios;
        struct bit_entry d;
        u8 *table;
        int i;
@@ -289,7 +288,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
                return NULL;
        }
 
-       table = ROMPTR(bios, d.data[0]);
+       table = ROMPTR(dev, d.data[0]);
        if (!table) {
                NV_ERROR(dev, "displayport table pointer invalid\n");
                return NULL;
@@ -306,7 +305,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
        }
 
        for (i = 0; i < table[3]; i++) {
-               *entry = ROMPTR(bios, table[table[1] + (i * table[2])]);
+               *entry = ROMPTR(dev, table[table[1] + (i * table[2])]);
                if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
                        return table;
        }
@@ -336,7 +335,6 @@ struct dp_state {
 static void
 dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
        int or = dp->or, link = dp->link;
        u8 *entry, sink[2];
        u32 dp_ctrl;
@@ -360,7 +358,7 @@ dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
         * table, that has (among other things) pointers to more scripts that
         * need to be executed, this time depending on link speed.
         */
-       entry = ROMPTR(&dev_priv->vbios, dp->entry[10]);
+       entry = ROMPTR(dev, dp->entry[10]);
        if (entry) {
                if (dp->table[0] < 0x30) {
                        while (dp->link_bw < (ROM16(entry[0]) * 10))
@@ -559,8 +557,6 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
 bool
 nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
 {
-       struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
        struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
        struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
        struct nouveau_connector *nv_connector =
@@ -581,7 +577,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
 
        dp.dcb = nv_encoder->dcb;
        dp.crtc = nv_crtc->index;
-       dp.auxch = auxch->rd;
+       dp.auxch = auxch->drive;
        dp.or = nv_encoder->or;
        dp.link = !(nv_encoder->dcb->sorconf.link & 1);
        dp.dpcd = nv_encoder->dp.dpcd;
@@ -590,7 +586,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
         * we take during link training (DP_SET_POWER is one), we need
         * to ignore them for the moment to avoid races.
         */
-       pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
+       nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, false);
 
        /* enable down-spreading, if possible */
        if (dp.table[1] >= 16) {
@@ -639,7 +635,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
        nouveau_bios_run_init_table(dev, ROM16(dp.entry[8]), dp.dcb, dp.crtc);
 
        /* re-enable hotplug detect */
-       pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true);
+       nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, true);
        return true;
 }
 
@@ -656,7 +652,7 @@ nouveau_dp_detect(struct drm_encoder *encoder)
        if (!auxch)
                return false;
 
-       ret = auxch_tx(dev, auxch->rd, 9, DP_DPCD_REV, dpcd, 8);
+       ret = auxch_tx(dev, auxch->drive, 9, DP_DPCD_REV, dpcd, 8);
        if (ret)
                return false;
 
@@ -684,7 +680,7 @@ int
 nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
                 uint8_t *data, int data_nr)
 {
-       return auxch_tx(auxch->dev, auxch->rd, cmd, addr, data, data_nr);
+       return auxch_tx(auxch->dev, auxch->drive, cmd, addr, data, data_nr);
 }
 
 static int
index 9791d13c9e3b8d9d223bd5bb1cae182de4bc89ee..e4a7cfe7898dc455fa4e423828a73ff162ee4ecc 100644 (file)
@@ -124,6 +124,10 @@ MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n");
 int nouveau_ctxfw;
 module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
 
+MODULE_PARM_DESC(ctxfw, "Santise DCB table according to MXM-SIS\n");
+int nouveau_mxmdcb = 1;
+module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400);
+
 int nouveau_fbpercrtc;
 #if 0
 module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -178,8 +182,11 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       NV_INFO(dev, "Disabling fbcon acceleration...\n");
-       nouveau_fbcon_save_disable_accel(dev);
+       NV_INFO(dev, "Disabling display...\n");
+       nouveau_display_fini(dev);
+
+       NV_INFO(dev, "Disabling fbcon...\n");
+       nouveau_fbcon_set_suspend(dev, 1);
 
        NV_INFO(dev, "Unpinning framebuffer(s)...\n");
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -220,7 +227,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
 
                ret = dev_priv->eng[e]->fini(dev, e, true);
                if (ret) {
-                       NV_ERROR(dev, "... engine %d failed: %d\n", i, ret);
+                       NV_ERROR(dev, "... engine %d failed: %d\n", e, ret);
                        goto out_abort;
                }
        }
@@ -246,10 +253,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
                pci_set_power_state(pdev, PCI_D3hot);
        }
 
-       console_lock();
-       nouveau_fbcon_set_suspend(dev, 1);
-       console_unlock();
-       nouveau_fbcon_restore_accel(dev);
        return 0;
 
 out_abort:
@@ -275,8 +278,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       nouveau_fbcon_save_disable_accel(dev);
-
        NV_INFO(dev, "We're back, enabling device...\n");
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
@@ -296,8 +297,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
        if (ret)
                return ret;
 
-       nouveau_pm_resume(dev);
-
        if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
                ret = nouveau_mem_init_agp(dev);
                if (ret) {
@@ -337,6 +336,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
                }
        }
 
+       nouveau_pm_resume(dev);
+
        NV_INFO(dev, "Restoring mode...\n");
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct nouveau_framebuffer *nouveau_fb;
@@ -358,16 +359,10 @@ nouveau_pci_resume(struct pci_dev *pdev)
                        NV_ERROR(dev, "Could not pin/map cursor.\n");
        }
 
-       engine->display.init(dev);
-
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-               u32 offset = nv_crtc->cursor.nvbo->bo.offset;
+       nouveau_fbcon_set_suspend(dev, 0);
+       nouveau_fbcon_zfill_all(dev);
 
-               nv_crtc->cursor.set_offset(nv_crtc, offset);
-               nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
-                                                nv_crtc->cursor_saved_y);
-       }
+       nouveau_display_init(dev);
 
        /* Force CLUT to get re-loaded during modeset */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -376,18 +371,35 @@ nouveau_pci_resume(struct pci_dev *pdev)
                nv_crtc->lut.depth = 0;
        }
 
-       console_lock();
-       nouveau_fbcon_set_suspend(dev, 0);
-       console_unlock();
+       drm_helper_resume_force_mode(dev);
 
-       nouveau_fbcon_zfill_all(dev);
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+               u32 offset = nv_crtc->cursor.nvbo->bo.offset;
 
-       drm_helper_resume_force_mode(dev);
+               nv_crtc->cursor.set_offset(nv_crtc, offset);
+               nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
+                                                nv_crtc->cursor_saved_y);
+       }
 
-       nouveau_fbcon_restore_accel(dev);
        return 0;
 }
 
+static const struct file_operations nouveau_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = nouveau_ttm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .read = drm_read,
+#if defined(CONFIG_COMPAT)
+       .compat_ioctl = nouveau_compat_ioctl,
+#endif
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        .driver_features =
                DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
@@ -413,21 +425,7 @@ static struct drm_driver driver = {
        .disable_vblank = nouveau_vblank_disable,
        .reclaim_buffers = drm_core_reclaim_buffers,
        .ioctls = nouveau_ioctls,
-       .fops = {
-               .owner = THIS_MODULE,
-               .open = drm_open,
-               .release = drm_release,
-               .unlocked_ioctl = drm_ioctl,
-               .mmap = nouveau_ttm_mmap,
-               .poll = drm_poll,
-               .fasync = drm_fasync,
-               .read = drm_read,
-#if defined(CONFIG_COMPAT)
-               .compat_ioctl = nouveau_compat_ioctl,
-#endif
-               .llseek = noop_llseek,
-       },
-
+       .fops = &nouveau_driver_fops,
        .gem_init_object = nouveau_gem_object_new,
        .gem_free_object = nouveau_gem_object_del,
        .gem_open_object = nouveau_gem_object_open,
index 4c0be3a4ed882f5430ea628ba4d80fef1f1d3690..38134a9c75780a16ab4f646aa978f4d85719ecea 100644 (file)
@@ -163,6 +163,9 @@ enum nouveau_flags {
 #define NVOBJ_ENGINE_COPY0     3
 #define NVOBJ_ENGINE_COPY1     4
 #define NVOBJ_ENGINE_MPEG      5
+#define NVOBJ_ENGINE_PPP       NVOBJ_ENGINE_MPEG
+#define NVOBJ_ENGINE_BSP       6
+#define NVOBJ_ENGINE_VP                7
 #define NVOBJ_ENGINE_DISPLAY   15
 #define NVOBJ_ENGINE_NR                16
 
@@ -229,6 +232,7 @@ struct nouveau_channel {
        /* mapping of the regs controlling the fifo */
        void __iomem *user;
        uint32_t user_get;
+       uint32_t user_get_hi;
        uint32_t user_put;
 
        /* Fencing */
@@ -246,7 +250,7 @@ struct nouveau_channel {
        struct nouveau_gpuobj *pushbuf;
        struct nouveau_bo     *pushbuf_bo;
        struct nouveau_vma     pushbuf_vma;
-       uint32_t               pushbuf_base;
+       uint64_t               pushbuf_base;
 
        /* Notifier memory */
        struct nouveau_bo *notifier_bo;
@@ -393,24 +397,25 @@ struct nouveau_display_engine {
        int (*early_init)(struct drm_device *);
        void (*late_takedown)(struct drm_device *);
        int (*create)(struct drm_device *);
-       int (*init)(struct drm_device *);
        void (*destroy)(struct drm_device *);
+       int (*init)(struct drm_device *);
+       void (*fini)(struct drm_device *);
+
+       struct drm_property *dithering_mode;
+       struct drm_property *dithering_depth;
+       struct drm_property *underscan_property;
+       struct drm_property *underscan_hborder_property;
+       struct drm_property *underscan_vborder_property;
 };
 
 struct nouveau_gpio_engine {
-       void *priv;
-
-       int  (*init)(struct drm_device *);
-       void (*takedown)(struct drm_device *);
-
-       int  (*get)(struct drm_device *, enum dcb_gpio_tag);
-       int  (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
-
-       int  (*irq_register)(struct drm_device *, enum dcb_gpio_tag,
-                            void (*)(void *, int), void *);
-       void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag,
-                              void (*)(void *, int), void *);
-       bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
+       spinlock_t lock;
+       struct list_head isr;
+       int (*init)(struct drm_device *);
+       void (*fini)(struct drm_device *);
+       int (*drive)(struct drm_device *, int line, int dir, int out);
+       int (*sense)(struct drm_device *, int line);
+       void (*irq_enable)(struct drm_device *, int line, bool);
 };
 
 struct nouveau_pm_voltage_level {
@@ -484,7 +489,7 @@ struct nouveau_pm_level {
        u32 copy;
        u32 daemon;
        u32 vdec;
-       u32 unk05;      /* nv50:nva3, roughly.. */
+       u32 dom6;
        u32 unka0;      /* nva3:nvc0 */
        u32 hub01;      /* nvc0- */
        u32 hub06;      /* nvc0- */
@@ -518,6 +523,12 @@ struct nouveau_pm_memtimings {
        int nr_timing;
 };
 
+struct nouveau_pm_fan {
+       u32 min_duty;
+       u32 max_duty;
+       u32 pwm_freq;
+};
+
 struct nouveau_pm_engine {
        struct nouveau_pm_voltage voltage;
        struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
@@ -525,6 +536,8 @@ struct nouveau_pm_engine {
        struct nouveau_pm_memtimings memtimings;
        struct nouveau_pm_temp_sensor_constants sensor_constants;
        struct nouveau_pm_threshold_temp threshold_temp;
+       struct nouveau_pm_fan fan;
+       u32 pwm_divisor;
 
        struct nouveau_pm_level boot;
        struct nouveau_pm_level *cur;
@@ -532,19 +545,14 @@ struct nouveau_pm_engine {
        struct device *hwmon;
        struct notifier_block acpi_nb;
 
-       int (*clock_get)(struct drm_device *, u32 id);
-       void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
-                          u32 id, int khz);
-       void (*clock_set)(struct drm_device *, void *);
-
        int  (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
        void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
-       void (*clocks_set)(struct drm_device *, void *);
+       int (*clocks_set)(struct drm_device *, void *);
 
        int (*voltage_get)(struct drm_device *);
        int (*voltage_set)(struct drm_device *, int voltage);
-       int (*fanspeed_get)(struct drm_device *);
-       int (*fanspeed_set)(struct drm_device *, int fanspeed);
+       int (*pwm_get)(struct drm_device *, int line, u32*, u32*);
+       int (*pwm_set)(struct drm_device *, int line, u32, u32);
        int (*temp_get)(struct drm_device *);
 };
 
@@ -780,6 +788,8 @@ struct drm_nouveau_private {
        struct nouveau_vm *chan_vm;
 
        struct nvbios vbios;
+       u8 *mxms;
+       struct list_head i2c_ports;
 
        struct nv04_mode_state mode_reg;
        struct nv04_mode_state saved_reg;
@@ -850,6 +860,7 @@ extern char *nouveau_perflvl;
 extern int nouveau_perflvl_wr;
 extern int nouveau_msi;
 extern int nouveau_ctxfw;
+extern int nouveau_mxmdcb;
 
 extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
 extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -1000,7 +1011,10 @@ extern int nouveau_sgdma_init(struct drm_device *);
 extern void nouveau_sgdma_takedown(struct drm_device *);
 extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
                                           uint32_t offset);
-extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
+extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
+                                              unsigned long size,
+                                              uint32_t page_flags,
+                                              struct page *dummy_read_page);
 
 /* nouveau_debugfs.c */
 #if defined(CONFIG_DRM_NOUVEAU_DEBUG)
@@ -1072,8 +1086,6 @@ extern int nouveau_run_vbios_init(struct drm_device *);
 extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
                                        struct dcb_entry *, int crtc);
 extern void nouveau_bios_init_exec(struct drm_device *, uint16_t table);
-extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
-                                                     enum dcb_gpio_tag);
 extern struct dcb_connector_table_entry *
 nouveau_bios_connector_entry(struct drm_device *, int index);
 extern u32 get_pll_register(struct drm_device *, enum pll_types);
@@ -1091,11 +1103,18 @@ extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
                            enum LVDS_script, int pxclk);
 bool bios_encoder_match(struct dcb_entry *, u32 hash);
 
+/* nouveau_mxm.c */
+int  nouveau_mxm_init(struct drm_device *dev);
+void nouveau_mxm_fini(struct drm_device *dev);
+
 /* nouveau_ttm.c */
 int nouveau_ttm_global_init(struct drm_nouveau_private *);
 void nouveau_ttm_global_release(struct drm_nouveau_private *);
 int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
 
+/* nouveau_hdmi.c */
+void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
+
 /* nouveau_dp.c */
 int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
                     uint8_t *data, int data_nr);
@@ -1222,6 +1241,9 @@ extern int  nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
 /* nv84_crypt.c */
 extern int  nv84_crypt_create(struct drm_device *);
 
+/* nv98_crypt.c */
+extern int  nv98_crypt_create(struct drm_device *dev);
+
 /* nva3_copy.c */
 extern int  nva3_copy_create(struct drm_device *dev);
 
@@ -1234,6 +1256,17 @@ extern int  nv31_mpeg_create(struct drm_device *dev);
 /* nv50_mpeg.c */
 extern int  nv50_mpeg_create(struct drm_device *dev);
 
+/* nv84_bsp.c */
+/* nv98_bsp.c */
+extern int  nv84_bsp_create(struct drm_device *dev);
+
+/* nv84_vp.c */
+/* nv98_vp.c */
+extern int  nv84_vp_create(struct drm_device *dev);
+
+/* nv98_ppp.c */
+extern int  nv98_ppp_create(struct drm_device *dev);
+
 /* nv04_instmem.c */
 extern int  nv04_instmem_init(struct drm_device *);
 extern void nv04_instmem_takedown(struct drm_device *);
@@ -1311,13 +1344,19 @@ extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *);
 extern int nv04_display_early_init(struct drm_device *);
 extern void nv04_display_late_takedown(struct drm_device *);
 extern int nv04_display_create(struct drm_device *);
-extern int nv04_display_init(struct drm_device *);
 extern void nv04_display_destroy(struct drm_device *);
+extern int nv04_display_init(struct drm_device *);
+extern void nv04_display_fini(struct drm_device *);
 
 /* nvd0_display.c */
 extern int nvd0_display_create(struct drm_device *);
-extern int nvd0_display_init(struct drm_device *);
 extern void nvd0_display_destroy(struct drm_device *);
+extern int nvd0_display_init(struct drm_device *);
+extern void nvd0_display_fini(struct drm_device *);
+struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int crtc);
+void nvd0_display_flip_stop(struct drm_crtc *);
+int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+                          struct nouveau_channel *, u32 swap_interval);
 
 /* nv04_crtc.c */
 extern int nv04_crtc_create(struct drm_device *, int index);
@@ -1412,6 +1451,10 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
                                  struct drm_file *);
 
 /* nouveau_display.c */
+int nouveau_display_create(struct drm_device *dev);
+void nouveau_display_destroy(struct drm_device *dev);
+int nouveau_display_init(struct drm_device *dev);
+void nouveau_display_fini(struct drm_device *dev);
 int nouveau_vblank_enable(struct drm_device *dev, int crtc);
 void nouveau_vblank_disable(struct drm_device *dev, int crtc);
 int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -1426,23 +1469,22 @@ int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
                                 uint32_t handle);
 
 /* nv10_gpio.c */
-int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
-int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+int nv10_gpio_init(struct drm_device *dev);
+void nv10_gpio_fini(struct drm_device *dev);
+int nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out);
+int nv10_gpio_sense(struct drm_device *dev, int line);
+void nv10_gpio_irq_enable(struct drm_device *, int line, bool on);
 
 /* nv50_gpio.c */
 int nv50_gpio_init(struct drm_device *dev);
 void nv50_gpio_fini(struct drm_device *dev);
-int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
-int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
-int nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
-int nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
-int  nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
-                           void (*)(void *, int), void *);
-void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
-                             void (*)(void *, int), void *);
-bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
-
-/* nv50_calc. */
+int nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out);
+int nv50_gpio_sense(struct drm_device *dev, int line);
+void nv50_gpio_irq_enable(struct drm_device *, int line, bool on);
+int nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out);
+int nvd0_gpio_sense(struct drm_device *dev, int line);
+
+/* nv50_calc.c */
 int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
                  int *N1, int *M1, int *N2, int *M2, int *P);
 int nva3_calc_pll(struct drm_device *, struct pll_lims *,
@@ -1565,6 +1607,13 @@ extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
 #define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
 #define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
 #define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
+#define NV_WARNONCE(d, fmt, arg...) do {                                       \
+       static int _warned = 0;                                                \
+       if (!_warned) {                                                        \
+               NV_WARN(d, fmt, ##arg);                                        \
+               _warned = 1;                                                   \
+       }                                                                      \
+} while(0)
 
 /* nouveau_reg_debug bitmask */
 enum {
index 95c843e684bbaf1aec87607405f9bfc1cbbd842e..f3fb649fe45426cd509e3d7a990346c1339921a2 100644 (file)
@@ -42,8 +42,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
        return container_of(fb, struct nouveau_framebuffer, base);
 }
 
-extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
-
 int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
-                            struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo);
+                            struct drm_mode_fb_cmd2 *mode_cmd, struct nouveau_bo *nvbo);
 #endif /* __NOUVEAU_FB_H__ */
index 3a4cc32b9e44c44f92fab174792778def49197f4..9892218d7452b0deaa48ef03c46fb7bbecf9b372 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/init.h>
 #include <linux/screen_info.h>
 #include <linux/vga_switcheroo.h>
+#include <linux/console.h>
 
 #include "drmP.h"
 #include "drm.h"
@@ -281,7 +282,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
        struct nouveau_framebuffer *nouveau_fb;
        struct nouveau_channel *chan;
        struct nouveau_bo *nvbo;
-       struct drm_mode_fb_cmd mode_cmd;
+       struct drm_mode_fb_cmd2 mode_cmd;
        struct pci_dev *pdev = dev->pdev;
        struct device *device = &pdev->dev;
        int size, ret;
@@ -289,12 +290,13 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
        mode_cmd.width = sizes->surface_width;
        mode_cmd.height = sizes->surface_height;
 
-       mode_cmd.bpp = sizes->surface_bpp;
-       mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
-       mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
-       mode_cmd.depth = sizes->surface_depth;
+       mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
+       mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
 
-       size = mode_cmd.pitch * mode_cmd.height;
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+                                                         sizes->surface_depth);
+
+       size = mode_cmd.pitches[0] * mode_cmd.height;
        size = roundup(size, PAGE_SIZE);
 
        ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
@@ -369,7 +371,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
        info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
        info->screen_size = size;
 
-       drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+       drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
        drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
 
        /* Set aperture base/size for vesafb takeover */
@@ -547,7 +549,13 @@ void nouveau_fbcon_restore_accel(struct drm_device *dev)
 void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
+       console_lock();
+       if (state == 0)
+               nouveau_fbcon_save_disable_accel(dev);
        fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
+       if (state == 1)
+               nouveau_fbcon_restore_accel(dev);
+       console_unlock();
 }
 
 void nouveau_fbcon_zfill_all(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.c b/drivers/gpu/drm/nouveau/nouveau_gpio.c
new file mode 100644 (file)
index 0000000..a580cc6
--- /dev/null
@@ -0,0 +1,400 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_i2c.h"
+#include "nouveau_gpio.h"
+
+static u8 *
+dcb_gpio_table(struct drm_device *dev)
+{
+       u8 *dcb = dcb_table(dev);
+       if (dcb) {
+               if (dcb[0] >= 0x30 && dcb[1] >= 0x0c)
+                       return ROMPTR(dev, dcb[0x0a]);
+               if (dcb[0] >= 0x22 && dcb[-1] >= 0x13)
+                       return ROMPTR(dev, dcb[-15]);
+       }
+       return NULL;
+}
+
+static u8 *
+dcb_gpio_entry(struct drm_device *dev, int idx, int ent, u8 *version)
+{
+       u8 *table = dcb_gpio_table(dev);
+       if (table) {
+               *version = table[0];
+               if (*version < 0x30 && ent < table[2])
+                       return table + 3 + (ent * table[1]);
+               else if (ent < table[2])
+                       return table + table[1] + (ent * table[3]);
+       }
+       return NULL;
+}
+
+int
+nouveau_gpio_drive(struct drm_device *dev, int idx, int line, int dir, int out)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+       return pgpio->drive ? pgpio->drive(dev, line, dir, out) : -ENODEV;
+}
+
+int
+nouveau_gpio_sense(struct drm_device *dev, int idx, int line)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+       return pgpio->sense ? pgpio->sense(dev, line) : -ENODEV;
+}
+
+int
+nouveau_gpio_find(struct drm_device *dev, int idx, u8 func, u8 line,
+                 struct gpio_func *gpio)
+{
+       u8 *table, *entry, version;
+       int i = -1;
+
+       if (line == 0xff && func == 0xff)
+               return -EINVAL;
+
+       while ((entry = dcb_gpio_entry(dev, idx, ++i, &version))) {
+               if (version < 0x40) {
+                       u16 data = ROM16(entry[0]);
+                       *gpio = (struct gpio_func) {
+                               .line = (data & 0x001f) >> 0,
+                               .func = (data & 0x07e0) >> 5,
+                               .log[0] = (data & 0x1800) >> 11,
+                               .log[1] = (data & 0x6000) >> 13,
+                       };
+               } else
+               if (version < 0x41) {
+                       *gpio = (struct gpio_func) {
+                               .line = entry[0] & 0x1f,
+                               .func = entry[1],
+                               .log[0] = (entry[3] & 0x18) >> 3,
+                               .log[1] = (entry[3] & 0x60) >> 5,
+                       };
+               } else {
+                       *gpio = (struct gpio_func) {
+                               .line = entry[0] & 0x3f,
+                               .func = entry[1],
+                               .log[0] = (entry[4] & 0x30) >> 4,
+                               .log[1] = (entry[4] & 0xc0) >> 6,
+                       };
+               }
+
+               if ((line == 0xff || line == gpio->line) &&
+                   (func == 0xff || func == gpio->func))
+                       return 0;
+       }
+
+       /* DCB 2.2, fixed TVDAC GPIO data */
+       if ((table = dcb_table(dev)) && table[0] >= 0x22) {
+               if (func == DCB_GPIO_TVDAC0) {
+                       *gpio = (struct gpio_func) {
+                               .func = DCB_GPIO_TVDAC0,
+                               .line = table[-4] >> 4,
+                               .log[0] = !!(table[-5] & 2),
+                               .log[1] =  !(table[-5] & 2),
+                       };
+                       return 0;
+               }
+       }
+
+       /* Apple iMac G4 NV18 */
+       if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+               if (func == DCB_GPIO_TVDAC0) {
+                       *gpio = (struct gpio_func) {
+                               .func = DCB_GPIO_TVDAC0,
+                               .line = 4,
+                               .log[0] = 0,
+                               .log[1] = 1,
+                       };
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
+}
+
+int
+nouveau_gpio_set(struct drm_device *dev, int idx, u8 tag, u8 line, int state)
+{
+       struct gpio_func gpio;
+       int ret;
+
+       ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+       if (ret == 0) {
+               int dir = !!(gpio.log[state] & 0x02);
+               int out = !!(gpio.log[state] & 0x01);
+               ret = nouveau_gpio_drive(dev, idx, gpio.line, dir, out);
+       }
+
+       return ret;
+}
+
+int
+nouveau_gpio_get(struct drm_device *dev, int idx, u8 tag, u8 line)
+{
+       struct gpio_func gpio;
+       int ret;
+
+       ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+       if (ret == 0) {
+               ret = nouveau_gpio_sense(dev, idx, gpio.line);
+               if (ret >= 0)
+                       ret = (ret == (gpio.log[1] & 1));
+       }
+
+       return ret;
+}
+
+int
+nouveau_gpio_irq(struct drm_device *dev, int idx, u8 tag, u8 line, bool on)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+       struct gpio_func gpio;
+       int ret;
+
+       ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+       if (ret == 0) {
+               if (idx == 0 && pgpio->irq_enable)
+                       pgpio->irq_enable(dev, gpio.line, on);
+               else
+                       ret = -ENODEV;
+       }
+
+       return ret;
+}
+
+struct gpio_isr {
+       struct drm_device *dev;
+       struct list_head head;
+       struct work_struct work;
+       int idx;
+       struct gpio_func func;
+       void (*handler)(void *, int);
+       void *data;
+       bool inhibit;
+};
+
+static void
+nouveau_gpio_isr_bh(struct work_struct *work)
+{
+       struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
+       struct drm_device *dev = isr->dev;
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+       unsigned long flags;
+       int state;
+
+       state = nouveau_gpio_get(dev, isr->idx, isr->func.func, isr->func.line);
+       if (state >= 0)
+               isr->handler(isr->data, state);
+
+       spin_lock_irqsave(&pgpio->lock, flags);
+       isr->inhibit = false;
+       spin_unlock_irqrestore(&pgpio->lock, flags);
+}
+
+void
+nouveau_gpio_isr(struct drm_device *dev, int idx, u32 line_mask)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+       struct gpio_isr *isr;
+
+       if (idx != 0)
+               return;
+
+       spin_lock(&pgpio->lock);
+       list_for_each_entry(isr, &pgpio->isr, head) {
+               if (line_mask & (1 << isr->func.line)) {
+                       if (isr->inhibit)
+                               continue;
+                       isr->inhibit = true;
+                       schedule_work(&isr->work);
+               }
+       }
+       spin_unlock(&pgpio->lock);
+}
+
+int
+nouveau_gpio_isr_add(struct drm_device *dev, int idx, u8 tag, u8 line,
+                    void (*handler)(void *, int), void *data)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+       struct gpio_isr *isr;
+       unsigned long flags;
+       int ret;
+
+       isr = kzalloc(sizeof(*isr), GFP_KERNEL);
+       if (!isr)
+               return -ENOMEM;
+
+       ret = nouveau_gpio_find(dev, idx, tag, line, &isr->func);
+       if (ret) {
+               kfree(isr);
+               return ret;
+       }
+
+       INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
+       isr->dev = dev;
+       isr->handler = handler;
+       isr->data = data;
+       isr->idx = idx;
+
+       spin_lock_irqsave(&pgpio->lock, flags);
+       list_add(&isr->head, &pgpio->isr);
+       spin_unlock_irqrestore(&pgpio->lock, flags);
+       return 0;
+}
+
+void
+nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
+                    void (*handler)(void *, int), void *data)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+       struct gpio_isr *isr, *tmp;
+       struct gpio_func func;
+       unsigned long flags;
+       LIST_HEAD(tofree);
+       int ret;
+
+       ret = nouveau_gpio_find(dev, idx, tag, line, &func);
+       if (ret == 0) {
+               spin_lock_irqsave(&pgpio->lock, flags);
+               list_for_each_entry_safe(isr, tmp, &pgpio->isr, head) {
+                       if (memcmp(&isr->func, &func, sizeof(func)) ||
+                           isr->idx != idx ||
+                           isr->handler != handler || isr->data != data)
+                               continue;
+                       list_move(&isr->head, &tofree);
+               }
+               spin_unlock_irqrestore(&pgpio->lock, flags);
+
+               list_for_each_entry_safe(isr, tmp, &tofree, head) {
+                       flush_work_sync(&isr->work);
+                       kfree(isr);
+               }
+       }
+}
+
+int
+nouveau_gpio_create(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+       INIT_LIST_HEAD(&pgpio->isr);
+       spin_lock_init(&pgpio->lock);
+
+       return nouveau_gpio_init(dev);
+}
+
+void
+nouveau_gpio_destroy(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+       nouveau_gpio_fini(dev);
+       BUG_ON(!list_empty(&pgpio->isr));
+}
+
+int
+nouveau_gpio_init(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+       int ret = 0;
+
+       if (pgpio->init)
+               ret = pgpio->init(dev);
+
+       return ret;
+}
+
+void
+nouveau_gpio_fini(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+       if (pgpio->fini)
+               pgpio->fini(dev);
+}
+
+void
+nouveau_gpio_reset(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u8 *entry, version;
+       int ent = -1;
+
+       while ((entry = dcb_gpio_entry(dev, 0, ++ent, &version))) {
+               u8 func = 0xff, line, defs, unk0, unk1;
+               if (version >= 0x41) {
+                       defs = !!(entry[0] & 0x80);
+                       line = entry[0] & 0x3f;
+                       func = entry[1];
+                       unk0 = entry[2];
+                       unk1 = entry[3] & 0x1f;
+               } else
+               if (version >= 0x40) {
+                       line = entry[0] & 0x1f;
+                       func = entry[1];
+                       defs = !!(entry[3] & 0x01);
+                       unk0 = !!(entry[3] & 0x02);
+                       unk1 = !!(entry[3] & 0x04);
+               } else {
+                       break;
+               }
+
+               if (func == 0xff)
+                       continue;
+
+               nouveau_gpio_func_set(dev, func, defs);
+
+               if (dev_priv->card_type >= NV_D0) {
+                       nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
+                       if (unk1--)
+                               nv_mask(dev, 0x00d640 + (unk1 * 4), 0xff, line);
+               } else
+               if (dev_priv->card_type >= NV_50) {
+                       static const u32 regs[] = { 0xe100, 0xe28c };
+                       u32 val = (unk1 << 16) | unk0;
+                       u32 reg = regs[line >> 4]; line &= 0x0f;
+
+                       nv_mask(dev, reg, 0x00010001 << line, val << line);
+               }
+       }
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.h b/drivers/gpu/drm/nouveau/nouveau_gpio.h
new file mode 100644 (file)
index 0000000..64c5cb0
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_GPIO_H__
+#define __NOUVEAU_GPIO_H__
+
+struct gpio_func {
+       u8 func;
+       u8 line;
+       u8 log[2];
+};
+
+/* nouveau_gpio.c */
+int  nouveau_gpio_create(struct drm_device *);
+void nouveau_gpio_destroy(struct drm_device *);
+int  nouveau_gpio_init(struct drm_device *);
+void nouveau_gpio_fini(struct drm_device *);
+void nouveau_gpio_reset(struct drm_device *);
+int  nouveau_gpio_drive(struct drm_device *, int idx, int line,
+                       int dir, int out);
+int  nouveau_gpio_sense(struct drm_device *, int idx, int line);
+int  nouveau_gpio_find(struct drm_device *, int idx, u8 tag, u8 line,
+                      struct gpio_func *);
+int  nouveau_gpio_set(struct drm_device *, int idx, u8 tag, u8 line, int state);
+int  nouveau_gpio_get(struct drm_device *, int idx, u8 tag, u8 line);
+int  nouveau_gpio_irq(struct drm_device *, int idx, u8 tag, u8 line, bool on);
+void nouveau_gpio_isr(struct drm_device *, int idx, u32 mask);
+int  nouveau_gpio_isr_add(struct drm_device *, int idx, u8 tag, u8 line,
+                         void (*)(void *, int state), void *data);
+void nouveau_gpio_isr_del(struct drm_device *, int idx, u8 tag, u8 line,
+                         void (*)(void *, int state), void *data);
+
+static inline bool
+nouveau_gpio_func_valid(struct drm_device *dev, u8 tag)
+{
+       struct gpio_func func;
+       return (nouveau_gpio_find(dev, 0, tag, 0xff, &func)) == 0;
+}
+
+static inline int
+nouveau_gpio_func_set(struct drm_device *dev, u8 tag, int state)
+{
+       return nouveau_gpio_set(dev, 0, tag, 0xff, state);
+}
+
+static inline int
+nouveau_gpio_func_get(struct drm_device *dev, u8 tag)
+{
+       return nouveau_gpio_get(dev, 0, tag, 0xff);
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
new file mode 100644 (file)
index 0000000..59ea1c1
--- /dev/null
@@ -0,0 +1,258 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_connector.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+
+static bool
+hdmi_sor(struct drm_encoder *encoder)
+{
+       struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+       if (dev_priv->chipset < 0xa3)
+               return false;
+       return true;
+}
+
+static inline u32
+hdmi_base(struct drm_encoder *encoder)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+       if (!hdmi_sor(encoder))
+               return 0x616500 + (nv_crtc->index * 0x800);
+       return 0x61c500 + (nv_encoder->or * 0x800);
+}
+
+static void
+hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
+{
+       nv_wr32(encoder->dev, hdmi_base(encoder) + reg, val);
+}
+
+static u32
+hdmi_rd32(struct drm_encoder *encoder, u32 reg)
+{
+       return nv_rd32(encoder->dev, hdmi_base(encoder) + reg);
+}
+
+static u32
+hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val)
+{
+       u32 tmp = hdmi_rd32(encoder, reg);
+       hdmi_wr32(encoder, reg, (tmp & ~mask) | val);
+       return tmp;
+}
+
+static void
+nouveau_audio_disconnect(struct drm_encoder *encoder)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct drm_device *dev = encoder->dev;
+       u32 or = nv_encoder->or * 0x800;
+
+       if (hdmi_sor(encoder)) {
+               nv_mask(dev, 0x61c448 + or, 0x00000003, 0x00000000);
+       }
+}
+
+static void
+nouveau_audio_mode_set(struct drm_encoder *encoder,
+                      struct drm_display_mode *mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_connector *nv_connector;
+       struct drm_device *dev = encoder->dev;
+       u32 or = nv_encoder->or * 0x800;
+       int i;
+
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       if (!drm_detect_monitor_audio(nv_connector->edid)) {
+               nouveau_audio_disconnect(encoder);
+               return;
+       }
+
+       if (hdmi_sor(encoder)) {
+               nv_mask(dev, 0x61c448 + or, 0x00000001, 0x00000001);
+
+               drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+               if (nv_connector->base.eld[0]) {
+                       u8 *eld = nv_connector->base.eld;
+                       for (i = 0; i < eld[2] * 4; i++)
+                               nv_wr32(dev, 0x61c440 + or, (i << 8) | eld[i]);
+                       for (i = eld[2] * 4; i < 0x60; i++)
+                               nv_wr32(dev, 0x61c440 + or, (i << 8) | 0x00);
+                       nv_mask(dev, 0x61c448 + or, 0x00000002, 0x00000002);
+               }
+       }
+}
+
+static void
+nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame)
+{
+       /* calculate checksum for the infoframe */
+       u8 sum = 0, i;
+       for (i = 0; i < frame[2]; i++)
+               sum += frame[i];
+       frame[3] = 256 - sum;
+
+       /* disable infoframe, and write header */
+       hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000);
+       hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff);
+
+       /* register scans tell me the audio infoframe has only one set of
+        * subpack regs, according to tegra (gee nvidia, it'd be nice if we
+        * could get those docs too!), the hdmi block pads out the rest of
+        * the packet on its own.
+        */
+       if (ctrl == 0x020)
+               frame[2] = 6;
+
+       /* write out checksum and data, weird weird 7 byte register pairs */
+       for (i = 0; i < frame[2] + 1; i += 7) {
+               u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8);
+               u32 *subpack = (u32 *)&frame[3 + i];
+               hdmi_wr32(encoder, rsubpack + 0, subpack[0]);
+               hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff);
+       }
+
+       /* enable the infoframe */
+       hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001);
+}
+
+static void
+nouveau_hdmi_video_infoframe(struct drm_encoder *encoder,
+                            struct drm_display_mode *mode)
+{
+       const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0;
+       const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0;
+       const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0;
+       u8 frame[20];
+
+       frame[0x00] = 0x82; /* AVI infoframe */
+       frame[0x01] = 0x02; /* version */
+       frame[0x02] = 0x0d; /* length */
+       frame[0x03] = 0x00;
+       frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S;
+       frame[0x05] = (C << 6) | (M << 4) | R;
+       frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC;
+       frame[0x07] = VIC;
+       frame[0x08] = PR;
+       frame[0x09] = bar_top & 0xff;
+       frame[0x0a] = bar_top >> 8;
+       frame[0x0b] = bar_bottom & 0xff;
+       frame[0x0c] = bar_bottom >> 8;
+       frame[0x0d] = bar_left & 0xff;
+       frame[0x0e] = bar_left >> 8;
+       frame[0x0f] = bar_right & 0xff;
+       frame[0x10] = bar_right >> 8;
+       frame[0x11] = 0x00;
+       frame[0x12] = 0x00;
+       frame[0x13] = 0x00;
+
+       nouveau_hdmi_infoframe(encoder, 0x020, frame);
+}
+
+static void
+nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder,
+                            struct drm_display_mode *mode)
+{
+       const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00;
+       const u8 CA = 0x00, DM_INH = 0, LSV = 0x00;
+       u8 frame[12];
+
+       frame[0x00] = 0x84;     /* Audio infoframe */
+       frame[0x01] = 0x01;     /* version */
+       frame[0x02] = 0x0a;     /* length */
+       frame[0x03] = 0x00;
+       frame[0x04] = (CT << 4) | CC;
+       frame[0x05] = (SF << 2) | ceaSS;
+       frame[0x06] = FMT;
+       frame[0x07] = CA;
+       frame[0x08] = (DM_INH << 7) | (LSV << 3);
+       frame[0x09] = 0x00;
+       frame[0x0a] = 0x00;
+       frame[0x0b] = 0x00;
+
+       nouveau_hdmi_infoframe(encoder, 0x000, frame);
+}
+
+static void
+nouveau_hdmi_disconnect(struct drm_encoder *encoder)
+{
+       nouveau_audio_disconnect(encoder);
+
+       /* disable audio and avi infoframes */
+       hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000);
+       hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000);
+
+       /* disable hdmi */
+       hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000);
+}
+
+void
+nouveau_hdmi_mode_set(struct drm_encoder *encoder,
+                     struct drm_display_mode *mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_connector *nv_connector;
+       struct drm_device *dev = encoder->dev;
+       u32 max_ac_packet, rekey;
+
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       if (!mode || !nv_connector || !nv_connector->edid ||
+           !drm_detect_hdmi_monitor(nv_connector->edid)) {
+               nouveau_hdmi_disconnect(encoder);
+               return;
+       }
+
+       nouveau_hdmi_video_infoframe(encoder, mode);
+       nouveau_hdmi_audio_infoframe(encoder, mode);
+
+       hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+       hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+       hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+
+       nv_mask(dev, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+       nv_mask(dev, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+       nv_mask(dev, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+       /* value matches nvidia binary driver, and tegra constant */
+       rekey = 56;
+
+       max_ac_packet  = mode->htotal - mode->hdisplay;
+       max_ac_packet -= rekey;
+       max_ac_packet -= 18; /* constant from tegra */
+       max_ac_packet /= 32;
+
+       /* enable hdmi */
+       hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */
+                                             0x1f000000 | /* unknown */
+                                             max_ac_packet << 16 |
+                                             rekey);
+
+       nouveau_audio_mode_set(encoder, mode);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwsq.h b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
new file mode 100644 (file)
index 0000000..6976875
--- /dev/null
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_HWSQ_H__
+#define __NOUVEAU_HWSQ_H__
+
+struct hwsq_ucode {
+       u8 data[0x200];
+       union {
+               u8  *u08;
+               u16 *u16;
+               u32 *u32;
+       } ptr;
+       u16 len;
+
+       u32 reg;
+       u32 val;
+};
+
+static inline void
+hwsq_init(struct hwsq_ucode *hwsq)
+{
+       hwsq->ptr.u08 = hwsq->data;
+       hwsq->reg = 0xffffffff;
+       hwsq->val = 0xffffffff;
+}
+
+static inline void
+hwsq_fini(struct hwsq_ucode *hwsq)
+{
+       do {
+               *hwsq->ptr.u08++ = 0x7f;
+               hwsq->len = hwsq->ptr.u08 - hwsq->data;
+       } while (hwsq->len & 3);
+       hwsq->ptr.u08 = hwsq->data;
+}
+
+static inline void
+hwsq_usec(struct hwsq_ucode *hwsq, u8 usec)
+{
+       u32 shift = 0;
+       while (usec & ~3) {
+               usec >>= 2;
+               shift++;
+       }
+
+       *hwsq->ptr.u08++ = (shift << 2) | usec;
+}
+
+static inline void
+hwsq_setf(struct hwsq_ucode *hwsq, u8 flag, int val)
+{
+       flag += 0x80;
+       if (val >= 0)
+               flag += 0x20;
+       if (val >= 1)
+               flag += 0x20;
+       *hwsq->ptr.u08++ = flag;
+}
+
+static inline void
+hwsq_op5f(struct hwsq_ucode *hwsq, u8 v0, u8 v1)
+{
+       *hwsq->ptr.u08++ = 0x5f;
+       *hwsq->ptr.u08++ = v0;
+       *hwsq->ptr.u08++ = v1;
+}
+
+static inline void
+hwsq_wr32(struct hwsq_ucode *hwsq, u32 reg, u32 val)
+{
+       if (val != hwsq->val) {
+               if ((val & 0xffff0000) == (hwsq->val & 0xffff0000)) {
+                       *hwsq->ptr.u08++ = 0x42;
+                       *hwsq->ptr.u16++ = (val & 0x0000ffff);
+               } else {
+                       *hwsq->ptr.u08++ = 0xe2;
+                       *hwsq->ptr.u32++ = val;
+               }
+
+               hwsq->val = val;
+       }
+
+       if ((reg & 0xffff0000) == (hwsq->reg & 0xffff0000)) {
+               *hwsq->ptr.u08++ = 0x40;
+               *hwsq->ptr.u16++ = (reg & 0x0000ffff);
+       } else {
+               *hwsq->ptr.u08++ = 0xe0;
+               *hwsq->ptr.u32++ = reg;
+       }
+       hwsq->reg = reg;
+}
+
+#endif
index d39b2202b197521473129d8c5e3cf1ccd6d34031..820ae7f520448086c388caa6cb5da00c8096f180 100644 (file)
 #include "nouveau_i2c.h"
 #include "nouveau_hw.h"
 
+#define T_TIMEOUT  2200000
+#define T_RISEFALL 1000
+#define T_HOLD     5000
+
 static void
-nv04_i2c_setscl(void *data, int state)
+i2c_drive_scl(void *data, int state)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
-       uint8_t val;
-
-       val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
-       NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+       struct nouveau_i2c_chan *port = data;
+       if (port->type == 0) {
+               u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
+               if (state) val |= 0x20;
+               else       val &= 0xdf;
+               NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
+       } else
+       if (port->type == 4) {
+               nv_mask(port->dev, port->drive, 0x2f, state ? 0x21 : 0x01);
+       } else
+       if (port->type == 5) {
+               if (state) port->state |= 0x01;
+               else       port->state &= 0xfe;
+               nv_wr32(port->dev, port->drive, 4 | port->state);
+       }
 }
 
 static void
-nv04_i2c_setsda(void *data, int state)
+i2c_drive_sda(void *data, int state)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
-       uint8_t val;
-
-       val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
-       NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+       struct nouveau_i2c_chan *port = data;
+       if (port->type == 0) {
+               u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
+               if (state) val |= 0x10;
+               else       val &= 0xef;
+               NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
+       } else
+       if (port->type == 4) {
+               nv_mask(port->dev, port->drive, 0x1f, state ? 0x11 : 0x01);
+       } else
+       if (port->type == 5) {
+               if (state) port->state |= 0x02;
+               else       port->state &= 0xfd;
+               nv_wr32(port->dev, port->drive, 4 | port->state);
+       }
 }
 
 static int
-nv04_i2c_getscl(void *data)
+i2c_sense_scl(void *data)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
-
-       return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4);
+       struct nouveau_i2c_chan *port = data;
+       struct drm_nouveau_private *dev_priv = port->dev->dev_private;
+       if (port->type == 0) {
+               return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x04);
+       } else
+       if (port->type == 4) {
+               return !!(nv_rd32(port->dev, port->sense) & 0x00040000);
+       } else
+       if (port->type == 5) {
+               if (dev_priv->card_type < NV_D0)
+                       return !!(nv_rd32(port->dev, port->sense) & 0x01);
+               else
+                       return !!(nv_rd32(port->dev, port->sense) & 0x10);
+       }
+       return 0;
 }
 
 static int
-nv04_i2c_getsda(void *data)
+i2c_sense_sda(void *data)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
-
-       return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8);
+       struct nouveau_i2c_chan *port = data;
+       struct drm_nouveau_private *dev_priv = port->dev->dev_private;
+       if (port->type == 0) {
+               return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x08);
+       } else
+       if (port->type == 4) {
+               return !!(nv_rd32(port->dev, port->sense) & 0x00080000);
+       } else
+       if (port->type == 5) {
+               if (dev_priv->card_type < NV_D0)
+                       return !!(nv_rd32(port->dev, port->sense) & 0x02);
+               else
+                       return !!(nv_rd32(port->dev, port->sense) & 0x20);
+       }
+       return 0;
 }
 
 static void
-nv4e_i2c_setscl(void *data, int state)
+i2c_delay(struct nouveau_i2c_chan *port, u32 nsec)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
-       uint8_t val;
-
-       val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
-       nv_wr32(dev, i2c->wr, val | 0x01);
+       udelay((nsec + 500) / 1000);
 }
 
-static void
-nv4e_i2c_setsda(void *data, int state)
+static bool
+i2c_raise_scl(struct nouveau_i2c_chan *port)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
-       uint8_t val;
+       u32 timeout = T_TIMEOUT / T_RISEFALL;
+
+       i2c_drive_scl(port, 1);
+       do {
+               i2c_delay(port, T_RISEFALL);
+       } while (!i2c_sense_scl(port) && --timeout);
 
-       val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
-       nv_wr32(dev, i2c->wr, val | 0x01);
+       return timeout != 0;
 }
 
 static int
-nv4e_i2c_getscl(void *data)
+i2c_start(struct nouveau_i2c_chan *port)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
+       int ret = 0;
+
+       port->state  = i2c_sense_scl(port);
+       port->state |= i2c_sense_sda(port) << 1;
+       if (port->state != 3) {
+               i2c_drive_scl(port, 0);
+               i2c_drive_sda(port, 1);
+               if (!i2c_raise_scl(port))
+                       ret = -EBUSY;
+       }
 
-       return !!((nv_rd32(dev, i2c->rd) >> 16) & 4);
+       i2c_drive_sda(port, 0);
+       i2c_delay(port, T_HOLD);
+       i2c_drive_scl(port, 0);
+       i2c_delay(port, T_HOLD);
+       return ret;
 }
 
-static int
-nv4e_i2c_getsda(void *data)
+static void
+i2c_stop(struct nouveau_i2c_chan *port)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
-
-       return !!((nv_rd32(dev, i2c->rd) >> 16) & 8);
+       i2c_drive_scl(port, 0);
+       i2c_drive_sda(port, 0);
+       i2c_delay(port, T_RISEFALL);
+
+       i2c_drive_scl(port, 1);
+       i2c_delay(port, T_HOLD);
+       i2c_drive_sda(port, 1);
+       i2c_delay(port, T_HOLD);
 }
 
-static const uint32_t nv50_i2c_port[] = {
-       0x00e138, 0x00e150, 0x00e168, 0x00e180,
-       0x00e254, 0x00e274, 0x00e764, 0x00e780,
-       0x00e79c, 0x00e7b8
-};
-#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
-
 static int
-nv50_i2c_getscl(void *data)
+i2c_bitw(struct nouveau_i2c_chan *port, int sda)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
+       i2c_drive_sda(port, sda);
+       i2c_delay(port, T_RISEFALL);
 
-       return !!(nv_rd32(dev, i2c->rd) & 1);
-}
+       if (!i2c_raise_scl(port))
+               return -ETIMEDOUT;
+       i2c_delay(port, T_HOLD);
 
+       i2c_drive_scl(port, 0);
+       i2c_delay(port, T_HOLD);
+       return 0;
+}
 
 static int
-nv50_i2c_getsda(void *data)
+i2c_bitr(struct nouveau_i2c_chan *port)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
+       int sda;
+
+       i2c_drive_sda(port, 1);
+       i2c_delay(port, T_RISEFALL);
 
-       return !!(nv_rd32(dev, i2c->rd) & 2);
+       if (!i2c_raise_scl(port))
+               return -ETIMEDOUT;
+       i2c_delay(port, T_HOLD);
+
+       sda = i2c_sense_sda(port);
+
+       i2c_drive_scl(port, 0);
+       i2c_delay(port, T_HOLD);
+       return sda;
 }
 
-static void
-nv50_i2c_setscl(void *data, int state)
+static int
+i2c_get_byte(struct nouveau_i2c_chan *port, u8 *byte, bool last)
 {
-       struct nouveau_i2c_chan *i2c = data;
+       int i, bit;
+
+       *byte = 0;
+       for (i = 7; i >= 0; i--) {
+               bit = i2c_bitr(port);
+               if (bit < 0)
+                       return bit;
+               *byte |= bit << i;
+       }
 
-       nv_wr32(i2c->dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
+       return i2c_bitw(port, last ? 1 : 0);
 }
 
-static void
-nv50_i2c_setsda(void *data, int state)
+static int
+i2c_put_byte(struct nouveau_i2c_chan *port, u8 byte)
 {
-       struct nouveau_i2c_chan *i2c = data;
+       int i, ret;
+       for (i = 7; i >= 0; i--) {
+               ret = i2c_bitw(port, !!(byte & (1 << i)));
+               if (ret < 0)
+                       return ret;
+       }
 
-       nv_mask(i2c->dev, i2c->wr, 0x00000006, 4 | (state ? 2 : 0));
-       i2c->data = state;
+       ret = i2c_bitr(port);
+       if (ret == 1) /* nack */
+               ret = -EIO;
+       return ret;
 }
 
 static int
-nvd0_i2c_getscl(void *data)
+i2c_addr(struct nouveau_i2c_chan *port, struct i2c_msg *msg)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       return !!(nv_rd32(i2c->dev, i2c->rd) & 0x10);
+       u32 addr = msg->addr << 1;
+       if (msg->flags & I2C_M_RD)
+               addr |= 1;
+       return i2c_put_byte(port, addr);
 }
 
 static int
-nvd0_i2c_getsda(void *data)
+i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       return !!(nv_rd32(i2c->dev, i2c->rd) & 0x20);
+       struct nouveau_i2c_chan *port = (struct nouveau_i2c_chan *)adap;
+       struct i2c_msg *msg = msgs;
+       int ret = 0, mcnt = num;
+
+       while (!ret && mcnt--) {
+               u8 remaining = msg->len;
+               u8 *ptr = msg->buf;
+
+               ret = i2c_start(port);
+               if (ret == 0)
+                       ret = i2c_addr(port, msg);
+
+               if (msg->flags & I2C_M_RD) {
+                       while (!ret && remaining--)
+                               ret = i2c_get_byte(port, ptr++, !remaining);
+               } else {
+                       while (!ret && remaining--)
+                               ret = i2c_put_byte(port, *ptr++);
+               }
+
+               msg++;
+       }
+
+       i2c_stop(port);
+       return (ret < 0) ? ret : num;
 }
 
-int
-nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
+static u32
+i2c_bit_func(struct i2c_adapter *adap)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_i2c_chan *i2c;
-       int ret;
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+const struct i2c_algorithm i2c_bit_algo = {
+       .master_xfer = i2c_bit_xfer,
+       .functionality = i2c_bit_func
+};
+
+static const uint32_t nv50_i2c_port[] = {
+       0x00e138, 0x00e150, 0x00e168, 0x00e180,
+       0x00e254, 0x00e274, 0x00e764, 0x00e780,
+       0x00e79c, 0x00e7b8
+};
 
-       if (entry->chan)
-               return -EEXIST;
+static u8 *
+i2c_table(struct drm_device *dev, u8 *version)
+{
+       u8 *dcb = dcb_table(dev), *i2c = NULL;
+       if (dcb) {
+               if (dcb[0] >= 0x15)
+                       i2c = ROMPTR(dev, dcb[2]);
+               if (dcb[0] >= 0x30)
+                       i2c = ROMPTR(dev, dcb[4]);
+       }
 
-       if (dev_priv->card_type >= NV_50 &&
-           dev_priv->card_type <= NV_C0 && entry->read >= NV50_I2C_PORTS) {
-               NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
-               return -EINVAL;
+       /* early revisions had no version number, use dcb version */
+       if (i2c) {
+               *version = dcb[0];
+               if (*version >= 0x30)
+                       *version = i2c[0];
        }
 
-       i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
-       if (i2c == NULL)
-               return -ENOMEM;
-
-       switch (entry->port_type) {
-       case 0:
-               i2c->bit.setsda = nv04_i2c_setsda;
-               i2c->bit.setscl = nv04_i2c_setscl;
-               i2c->bit.getsda = nv04_i2c_getsda;
-               i2c->bit.getscl = nv04_i2c_getscl;
-               i2c->rd = entry->read;
-               i2c->wr = entry->write;
-               break;
-       case 4:
-               i2c->bit.setsda = nv4e_i2c_setsda;
-               i2c->bit.setscl = nv4e_i2c_setscl;
-               i2c->bit.getsda = nv4e_i2c_getsda;
-               i2c->bit.getscl = nv4e_i2c_getscl;
-               i2c->rd = 0x600800 + entry->read;
-               i2c->wr = 0x600800 + entry->write;
-               break;
-       case 5:
-               i2c->bit.setsda = nv50_i2c_setsda;
-               i2c->bit.setscl = nv50_i2c_setscl;
-               if (dev_priv->card_type < NV_D0) {
-                       i2c->bit.getsda = nv50_i2c_getsda;
-                       i2c->bit.getscl = nv50_i2c_getscl;
-                       i2c->rd = nv50_i2c_port[entry->read];
-                       i2c->wr = i2c->rd;
-               } else {
-                       i2c->bit.getsda = nvd0_i2c_getsda;
-                       i2c->bit.getscl = nvd0_i2c_getscl;
-                       i2c->rd = 0x00d014 + (entry->read * 0x20);
-                       i2c->wr = i2c->rd;
-               }
-               break;
-       case 6:
-               i2c->rd = entry->read;
-               i2c->wr = entry->write;
-               break;
-       default:
-               NV_ERROR(dev, "DCB I2C port type %d unknown\n",
-                        entry->port_type);
-               kfree(i2c);
-               return -EINVAL;
+       return i2c;
+}
+
+int
+nouveau_i2c_init(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nvbios *bios = &dev_priv->vbios;
+       struct nouveau_i2c_chan *port;
+       u8 *i2c, *entry, legacy[2][4] = {};
+       u8 version, entries, recordlen;
+       int ret, i;
+
+       INIT_LIST_HEAD(&dev_priv->i2c_ports);
+
+       i2c = i2c_table(dev, &version);
+       if (!i2c) {
+               u8 *bmp = &bios->data[bios->offset];
+               if (bios->type != NVBIOS_BMP)
+                       return -ENODEV;
+
+               legacy[0][0] = NV_CIO_CRE_DDC_WR__INDEX;
+               legacy[0][1] = NV_CIO_CRE_DDC_STATUS__INDEX;
+               legacy[1][0] = NV_CIO_CRE_DDC0_WR__INDEX;
+               legacy[1][1] = NV_CIO_CRE_DDC0_STATUS__INDEX;
+
+               /* BMP (from v4.0) has i2c info in the structure, it's in a
+                * fixed location on earlier VBIOS
+                */
+               if (bmp[5] < 4)
+                       i2c = &bios->data[0x48];
+               else
+                       i2c = &bmp[0x36];
+
+               if (i2c[4]) legacy[0][0] = i2c[4];
+               if (i2c[5]) legacy[0][1] = i2c[5];
+               if (i2c[6]) legacy[1][0] = i2c[6];
+               if (i2c[7]) legacy[1][1] = i2c[7];
        }
 
-       snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
-                "nouveau-%s-%d", pci_name(dev->pdev), index);
-       i2c->adapter.owner = THIS_MODULE;
-       i2c->adapter.dev.parent = &dev->pdev->dev;
-       i2c->dev = dev;
-       i2c_set_adapdata(&i2c->adapter, i2c);
-
-       if (entry->port_type < 6) {
-               i2c->adapter.algo_data = &i2c->bit;
-               i2c->bit.udelay = 40;
-               i2c->bit.timeout = usecs_to_jiffies(5000);
-               i2c->bit.data = i2c;
-               ret = i2c_bit_add_bus(&i2c->adapter);
+       if (i2c && version >= 0x30) {
+               entry     = i2c[1] + i2c;
+               entries   = i2c[2];
+               recordlen = i2c[3];
+       } else
+       if (i2c) {
+               entry     = i2c;
+               entries   = 16;
+               recordlen = 4;
        } else {
-               i2c->adapter.algo = &nouveau_dp_i2c_algo;
-               ret = i2c_add_adapter(&i2c->adapter);
+               entry     = legacy[0];
+               entries   = 2;
+               recordlen = 4;
        }
 
-       if (ret) {
-               NV_ERROR(dev, "Failed to register i2c %d\n", index);
-               kfree(i2c);
-               return ret;
+       for (i = 0; i < entries; i++, entry += recordlen) {
+               port = kzalloc(sizeof(*port), GFP_KERNEL);
+               if (port == NULL) {
+                       nouveau_i2c_fini(dev);
+                       return -ENOMEM;
+               }
+
+               port->type = entry[3];
+               if (version < 0x30) {
+                       port->type &= 0x07;
+                       if (port->type == 0x07)
+                               port->type = 0xff;
+               }
+
+               if (port->type == 0xff) {
+                       kfree(port);
+                       continue;
+               }
+
+               switch (port->type) {
+               case 0: /* NV04:NV50 */
+                       port->drive = entry[0];
+                       port->sense = entry[1];
+                       port->adapter.algo = &i2c_bit_algo;
+                       break;
+               case 4: /* NV4E */
+                       port->drive = 0x600800 + entry[1];
+                       port->sense = port->drive;
+                       port->adapter.algo = &i2c_bit_algo;
+                       break;
+               case 5: /* NV50- */
+                       port->drive = entry[0] & 0x0f;
+                       if (dev_priv->card_type < NV_D0) {
+                               if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
+                                       break;
+                               port->drive = nv50_i2c_port[port->drive];
+                               port->sense = port->drive;
+                       } else {
+                               port->drive = 0x00d014 + (port->drive * 0x20);
+                               port->sense = port->drive;
+                       }
+                       port->adapter.algo = &i2c_bit_algo;
+                       break;
+               case 6: /* NV50- DP AUX */
+                       port->drive = entry[0];
+                       port->sense = port->drive;
+                       port->adapter.algo = &nouveau_dp_i2c_algo;
+                       break;
+               default:
+                       break;
+               }
+
+               if (!port->adapter.algo) {
+                       NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
+                                i, port->type, port->drive, port->sense);
+                       kfree(port);
+                       continue;
+               }
+
+               snprintf(port->adapter.name, sizeof(port->adapter.name),
+                        "nouveau-%s-%d", pci_name(dev->pdev), i);
+               port->adapter.owner = THIS_MODULE;
+               port->adapter.dev.parent = &dev->pdev->dev;
+               port->dev = dev;
+               port->index = i;
+               port->dcb = ROM32(entry[0]);
+               i2c_set_adapdata(&port->adapter, i2c);
+
+               ret = i2c_add_adapter(&port->adapter);
+               if (ret) {
+                       NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
+                       kfree(port);
+                       continue;
+               }
+
+               list_add_tail(&port->head, &dev_priv->i2c_ports);
        }
 
-       entry->chan = i2c;
        return 0;
 }
 
 void
-nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry)
+nouveau_i2c_fini(struct drm_device *dev)
 {
-       if (!entry->chan)
-               return;
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_i2c_chan *port, *tmp;
 
-       i2c_del_adapter(&entry->chan->adapter);
-       kfree(entry->chan);
-       entry->chan = NULL;
+       list_for_each_entry_safe(port, tmp, &dev_priv->i2c_ports, head) {
+               i2c_del_adapter(&port->adapter);
+               kfree(port);
+       }
 }
 
 struct nouveau_i2c_chan *
-nouveau_i2c_find(struct drm_device *dev, int index)
+nouveau_i2c_find(struct drm_device *dev, u8 index)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index];
+       struct nouveau_i2c_chan *port;
+
+       if (index == NV_I2C_DEFAULT(0) ||
+           index == NV_I2C_DEFAULT(1)) {
+               u8 version, *i2c = i2c_table(dev, &version);
+               if (i2c && version >= 0x30) {
+                       if (index == NV_I2C_DEFAULT(0))
+                               index = (i2c[4] & 0x0f);
+                       else
+                               index = (i2c[4] & 0xf0) >> 4;
+               } else {
+                       index = 2;
+               }
+       }
 
-       if (index >= DCB_MAX_NUM_I2C_ENTRIES)
-               return NULL;
+       list_for_each_entry(port, &dev_priv->i2c_ports, head) {
+               if (port->index == index)
+                       break;
+       }
 
-       if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) {
-               uint32_t reg = 0xe500, val;
+       if (&port->head == &dev_priv->i2c_ports)
+               return NULL;
 
-               if (i2c->port_type == 6) {
-                       reg += i2c->read * 0x50;
+       if (dev_priv->card_type >= NV_50 && (port->dcb & 0x00000100)) {
+               u32 reg = 0x00e500, val;
+               if (port->type == 6) {
+                       reg += port->drive * 0x50;
                        val  = 0x2002;
                } else {
-                       reg += ((i2c->entry & 0x1e00) >> 9) * 0x50;
+                       reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
                        val  = 0xe001;
                }
 
@@ -294,9 +497,7 @@ nouveau_i2c_find(struct drm_device *dev, int index)
                nv_mask(dev, reg + 0x00, 0x0000f003, val);
        }
 
-       if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))
-               return NULL;
-       return i2c->chan;
+       return port;
 }
 
 bool
@@ -331,9 +532,13 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
        struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
        int i;
 
-       NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
+       if (!i2c) {
+               NV_DEBUG(dev, "No bus when probing %s on %d\n", what, index);
+               return -ENODEV;
+       }
 
-       for (i = 0; i2c && info[i].addr; i++) {
+       NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, i2c->index);
+       for (i = 0; info[i].addr; i++) {
                if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
                    (!match || match(i2c, &info[i]))) {
                        NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
@@ -342,6 +547,5 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
        }
 
        NV_DEBUG(dev, "No devices found.\n");
-
        return -ENODEV;
 }
index 422b62fd8272487dd78bb3d276dbfe70dd8e80fe..4d2e4e9031be6dc8f51882c5c95a933c0e763064 100644 (file)
 #include <linux/i2c-algo-bit.h>
 #include "drm_dp_helper.h"
 
-struct dcb_i2c_entry;
+#define NV_I2C_PORT(n)    (0x00 + (n))
+#define NV_I2C_PORT_NUM    0x10
+#define NV_I2C_DEFAULT(n) (0x80 + (n))
 
 struct nouveau_i2c_chan {
        struct i2c_adapter adapter;
        struct drm_device *dev;
-       struct i2c_algo_bit_data bit;
-       unsigned rd;
-       unsigned wr;
-       unsigned data;
+       struct list_head head;
+       u8  index;
+       u8  type;
+       u32 dcb;
+       u32 drive;
+       u32 sense;
+       u32 state;
 };
 
-int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index);
-void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
-struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
+int  nouveau_i2c_init(struct drm_device *);
+void nouveau_i2c_fini(struct drm_device *);
+struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, u8 index);
 bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
 int nouveau_i2c_identify(struct drm_device *dev, const char *what,
                         struct i2c_board_info *info,
index 36bec4807701b2e1afb043496c0dbe4a55d3a5ec..c3a5745e9c7977eda02afa76dd2b0db05b3c9ff9 100644 (file)
@@ -407,6 +407,12 @@ nouveau_mem_vram_init(struct drm_device *dev)
        ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
        if (ret)
                return ret;
+       ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
+       if (ret) {
+               /* Reset to default value. */
+               pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
+       }
+
 
        ret = nouveau_ttm_global_init(dev_priv);
        if (ret)
@@ -638,10 +644,10 @@ nouveau_mem_timing_init(struct drm_device *dev)
                        return;
 
                if (P.version == 1)
-                       hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[4]);
+                       hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[4]);
                else
                if (P.version == 2)
-                       hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[8]);
+                       hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[8]);
                else {
                        NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
                }
diff --git a/drivers/gpu/drm/nouveau/nouveau_mxm.c b/drivers/gpu/drm/nouveau/nouveau_mxm.c
new file mode 100644 (file)
index 0000000..8bccddf
--- /dev/null
@@ -0,0 +1,677 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/acpi.h>
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+
+#define MXM_DBG(dev, fmt, args...) NV_DEBUG((dev), "MXM: " fmt, ##args)
+#define MXM_MSG(dev, fmt, args...) NV_INFO((dev), "MXM: " fmt, ##args)
+
+static u8 *
+mxms_data(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       return dev_priv->mxms;
+
+}
+
+static u16
+mxms_version(struct drm_device *dev)
+{
+       u8 *mxms = mxms_data(dev);
+       u16 version = (mxms[4] << 8) | mxms[5];
+       switch (version ) {
+       case 0x0200:
+       case 0x0201:
+       case 0x0300:
+               return version;
+       default:
+               break;
+       }
+
+       MXM_DBG(dev, "unknown version %d.%d\n", mxms[4], mxms[5]);
+       return 0x0000;
+}
+
+static u16
+mxms_headerlen(struct drm_device *dev)
+{
+       return 8;
+}
+
+static u16
+mxms_structlen(struct drm_device *dev)
+{
+       return *(u16 *)&mxms_data(dev)[6];
+}
+
+static bool
+mxms_checksum(struct drm_device *dev)
+{
+       u16 size = mxms_headerlen(dev) + mxms_structlen(dev);
+       u8 *mxms = mxms_data(dev), sum = 0;
+       while (size--)
+               sum += *mxms++;
+       if (sum) {
+               MXM_DBG(dev, "checksum invalid\n");
+               return false;
+       }
+       return true;
+}
+
+static bool
+mxms_valid(struct drm_device *dev)
+{
+       u8 *mxms = mxms_data(dev);
+       if (*(u32 *)mxms != 0x5f4d584d) {
+               MXM_DBG(dev, "signature invalid\n");
+               return false;
+       }
+
+       if (!mxms_version(dev) || !mxms_checksum(dev))
+               return false;
+
+       return true;
+}
+
+static bool
+mxms_foreach(struct drm_device *dev, u8 types,
+            bool (*exec)(struct drm_device *, u8 *, void *), void *info)
+{
+       u8 *mxms = mxms_data(dev);
+       u8 *desc = mxms + mxms_headerlen(dev);
+       u8 *fini = desc + mxms_structlen(dev) - 1;
+       while (desc < fini) {
+               u8 type = desc[0] & 0x0f;
+               u8 headerlen = 0;
+               u8 recordlen = 0;
+               u8 entries = 0;
+
+               switch (type) {
+               case 0: /* Output Device Structure */
+                       if (mxms_version(dev) >= 0x0300)
+                               headerlen = 8;
+                       else
+                               headerlen = 6;
+                       break;
+               case 1: /* System Cooling Capability Structure */
+               case 2: /* Thermal Structure */
+               case 3: /* Input Power Structure */
+                       headerlen = 4;
+                       break;
+               case 4: /* GPIO Device Structure */
+                       headerlen = 4;
+                       recordlen = 2;
+                       entries   = (ROM32(desc[0]) & 0x01f00000) >> 20;
+                       break;
+               case 5: /* Vendor Specific Structure */
+                       headerlen = 8;
+                       break;
+               case 6: /* Backlight Control Structure */
+                       if (mxms_version(dev) >= 0x0300) {
+                               headerlen = 4;
+                               recordlen = 8;
+                               entries   = (desc[1] & 0xf0) >> 4;
+                       } else {
+                               headerlen = 8;
+                       }
+                       break;
+               case 7: /* Fan Control Structure */
+                       headerlen = 8;
+                       recordlen = 4;
+                       entries   = desc[1] & 0x07;
+                       break;
+               default:
+                       MXM_DBG(dev, "unknown descriptor type %d\n", type);
+                       return false;
+               }
+
+               if ((drm_debug & DRM_UT_DRIVER) && (exec == NULL)) {
+                       static const char * mxms_desc_name[] = {
+                               "ODS", "SCCS", "TS", "IPS",
+                               "GSD", "VSS", "BCS", "FCS",
+                       };
+                       u8 *dump = desc;
+                       int i, j;
+
+                       MXM_DBG(dev, "%4s: ", mxms_desc_name[type]);
+                       for (j = headerlen - 1; j >= 0; j--)
+                               printk("%02x", dump[j]);
+                       printk("\n");
+                       dump += headerlen;
+
+                       for (i = 0; i < entries; i++, dump += recordlen) {
+                               MXM_DBG(dev, "      ");
+                               for (j = recordlen - 1; j >= 0; j--)
+                                       printk("%02x", dump[j]);
+                               printk("\n");
+                       }
+               }
+
+               if (types & (1 << type)) {
+                       if (!exec(dev, desc, info))
+                               return false;
+               }
+
+               desc += headerlen + (entries * recordlen);
+       }
+
+       return true;
+}
+
+static u8 *
+mxm_table(struct drm_device *dev, u8 *size)
+{
+       struct bit_entry x;
+
+       if (bit_table(dev, 'x', &x)) {
+               MXM_DBG(dev, "BIT 'x' table not present\n");
+               return NULL;
+       }
+
+       if (x.version != 1 || x.length < 3) {
+               MXM_MSG(dev, "BIT x table %d/%d unknown\n",
+                       x.version, x.length);
+               return NULL;
+       }
+
+       *size = x.length;
+       return x.data;
+}
+
+/* These map MXM v2.x digital connection values to the appropriate SOR/link,
+ * hopefully they're correct for all boards within the same chipset...
+ *
+ * MXM v3.x VBIOS are nicer and provide pointers to these tables.
+ */
+static u8 nv84_sor_map[16] = {
+       0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv92_sor_map[16] = {
+       0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
+       0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv94_sor_map[16] = {
+       0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31,
+       0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv96_sor_map[16] = {
+       0x00, 0x14, 0x24, 0x00, 0x34, 0x00, 0x11, 0x31,
+       0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv98_sor_map[16] = {
+       0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31,
+       0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8
+mxm_sor_map(struct drm_device *dev, u8 conn)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u8 len, *mxm = mxm_table(dev, &len);
+       if (mxm && len >= 6) {
+               u8 *map = ROMPTR(dev, mxm[4]);
+               if (map) {
+                       if (map[0] == 0x10) {
+                               if (conn < map[3])
+                                       return map[map[1] + conn];
+                               return 0x00;
+                       }
+
+                       MXM_MSG(dev, "unknown sor map 0x%02x\n", map[0]);
+               }
+       }
+
+       if (dev_priv->chipset == 0x84 || dev_priv->chipset == 0x86)
+               return nv84_sor_map[conn];
+       if (dev_priv->chipset == 0x92)
+               return nv92_sor_map[conn];
+       if (dev_priv->chipset == 0x94)
+               return nv94_sor_map[conn];
+       if (dev_priv->chipset == 0x96)
+               return nv96_sor_map[conn];
+       if (dev_priv->chipset == 0x98)
+               return nv98_sor_map[conn];
+
+       MXM_MSG(dev, "missing sor map\n");
+       return 0x00;
+}
+
+static u8
+mxm_ddc_map(struct drm_device *dev, u8 port)
+{
+       u8 len, *mxm = mxm_table(dev, &len);
+       if (mxm && len >= 8) {
+               u8 *map = ROMPTR(dev, mxm[6]);
+               if (map) {
+                       if (map[0] == 0x10) {
+                               if (port < map[3])
+                                       return map[map[1] + port];
+                               return 0x00;
+                       }
+
+                       MXM_MSG(dev, "unknown ddc map 0x%02x\n", map[0]);
+               }
+       }
+
+       /* v2.x: directly write port as dcb i2cidx */
+       return (port << 4) | port;
+}
+
+struct mxms_odev {
+       u8 outp_type;
+       u8 conn_type;
+       u8 ddc_port;
+       u8 dig_conn;
+};
+
+static void
+mxms_output_device(struct drm_device *dev, u8 *pdata, struct mxms_odev *desc)
+{
+       u64 data = ROM32(pdata[0]);
+       if (mxms_version(dev) >= 0x0300)
+               data |= (u64)ROM16(pdata[4]) << 32;
+
+       desc->outp_type = (data & 0x00000000000000f0ULL) >> 4;
+       desc->ddc_port  = (data & 0x0000000000000f00ULL) >> 8;
+       desc->conn_type = (data & 0x000000000001f000ULL) >> 12;
+       desc->dig_conn  = (data & 0x0000000000780000ULL) >> 19;
+}
+
+struct context {
+       u32 *outp;
+       struct mxms_odev desc;
+};
+
+static bool
+mxm_match_tmds_partner(struct drm_device *dev, u8 *data, void *info)
+{
+       struct context *ctx = info;
+       struct mxms_odev desc;
+
+       mxms_output_device(dev, data, &desc);
+       if (desc.outp_type == 2 &&
+           desc.dig_conn == ctx->desc.dig_conn)
+               return false;
+       return true;
+}
+
+static bool
+mxm_match_dcb(struct drm_device *dev, u8 *data, void *info)
+{
+       struct context *ctx = info;
+       u64 desc = *(u64 *)data;
+
+       mxms_output_device(dev, data, &ctx->desc);
+
+       /* match dcb encoder type to mxm-ods device type */
+       if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type)
+               return true;
+
+       /* digital output, have some extra stuff to match here, there's a
+        * table in the vbios that provides a mapping from the mxm digital
+        * connection enum values to SOR/link
+        */
+       if ((desc & 0x00000000000000f0) >= 0x20) {
+               /* check against sor index */
+               u8 link = mxm_sor_map(dev, ctx->desc.dig_conn);
+               if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24)
+                       return true;
+
+               /* check dcb entry has a compatible link field */
+               link = (link & 0x30) >> 4;
+               if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link)
+                       return true;
+       }
+
+       /* mark this descriptor accounted for by setting invalid device type,
+        * except of course some manufactures don't follow specs properly and
+        * we need to avoid killing off the TMDS function on DP connectors
+        * if MXM-SIS is missing an entry for it.
+        */
+       data[0] &= ~0xf0;
+       if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 &&
+           mxms_foreach(dev, 0x01, mxm_match_tmds_partner, ctx)) {
+               data[0] |= 0x20; /* modify descriptor to match TMDS now */
+       } else {
+               data[0] |= 0xf0;
+       }
+
+       return false;
+}
+
+static int
+mxm_dcb_sanitise_entry(struct drm_device *dev, void *data, int idx, u8 *dcbe)
+{
+       struct context ctx = { .outp = (u32 *)dcbe };
+       u8 type, i2cidx, link;
+       u8 *conn;
+
+       /* look for an output device structure that matches this dcb entry.
+        * if one isn't found, disable it.
+        */
+       if (mxms_foreach(dev, 0x01, mxm_match_dcb, &ctx)) {
+               MXM_DBG(dev, "disable %d: 0x%08x 0x%08x\n",
+                       idx, ctx.outp[0], ctx.outp[1]);
+               ctx.outp[0] |= 0x0000000f;
+               return 0;
+       }
+
+       /* modify the output's ddc/aux port, there's a pointer to a table
+        * with the mapping from mxm ddc/aux port to dcb i2c_index in the
+        * vbios mxm table
+        */
+       i2cidx = mxm_ddc_map(dev, ctx.desc.ddc_port);
+       if ((ctx.outp[0] & 0x0000000f) != OUTPUT_DP)
+               i2cidx = (i2cidx & 0x0f) << 4;
+       else
+               i2cidx = (i2cidx & 0xf0);
+
+       if (i2cidx != 0xf0) {
+               ctx.outp[0] &= ~0x000000f0;
+               ctx.outp[0] |= i2cidx;
+       }
+
+       /* override dcb sorconf.link, based on what mxm data says */
+       switch (ctx.desc.outp_type) {
+       case 0x00: /* Analog CRT */
+       case 0x01: /* Analog TV/HDTV */
+               break;
+       default:
+               link = mxm_sor_map(dev, ctx.desc.dig_conn) & 0x30;
+               ctx.outp[1] &= ~0x00000030;
+               ctx.outp[1] |= link;
+               break;
+       }
+
+       /* we may need to fixup various other vbios tables based on what
+        * the descriptor says the connector type should be.
+        *
+        * in a lot of cases, the vbios tables will claim DVI-I is possible,
+        * and the mxm data says the connector is really HDMI.  another
+        * common example is DP->eDP.
+        */
+       conn = dcb_conn(dev, (ctx.outp[0] & 0x0000f000) >> 12);
+       type = conn[0];
+       switch (ctx.desc.conn_type) {
+       case 0x01: /* LVDS */
+               ctx.outp[1] |= 0x00000004; /* use_power_scripts */
+               /* XXX: modify default link width in LVDS table */
+               break;
+       case 0x02: /* HDMI */
+               type = DCB_CONNECTOR_HDMI_1;
+               break;
+       case 0x03: /* DVI-D */
+               type = DCB_CONNECTOR_DVI_D;
+               break;
+       case 0x0e: /* eDP, falls through to DPint */
+               ctx.outp[1] |= 0x00010000;
+       case 0x07: /* DP internal, wtf is this?? HP8670w */
+               ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
+               type = DCB_CONNECTOR_eDP;
+               break;
+       default:
+               break;
+       }
+
+       if (mxms_version(dev) >= 0x0300)
+               conn[0] = type;
+
+       return 0;
+}
+
+static bool
+mxm_show_unmatched(struct drm_device *dev, u8 *data, void *info)
+{
+       u64 desc = *(u64 *)data;
+       if ((desc & 0xf0) != 0xf0)
+               MXM_MSG(dev, "unmatched output device 0x%016llx\n", desc);
+       return true;
+}
+
+static void
+mxm_dcb_sanitise(struct drm_device *dev)
+{
+       u8 *dcb = dcb_table(dev);
+       if (!dcb || dcb[0] != 0x40) {
+               MXM_DBG(dev, "unsupported DCB version\n");
+               return;
+       }
+
+       dcb_outp_foreach(dev, NULL, mxm_dcb_sanitise_entry);
+       mxms_foreach(dev, 0x01, mxm_show_unmatched, NULL);
+}
+
+static bool
+mxm_shadow_rom_fetch(struct nouveau_i2c_chan *i2c, u8 addr,
+                    u8 offset, u8 size, u8 *data)
+{
+       struct i2c_msg msgs[] = {
+               { .addr = addr, .flags = 0, .len = 1, .buf = &offset },
+               { .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
+       };
+
+       return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
+}
+
+static bool
+mxm_shadow_rom(struct drm_device *dev, u8 version)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_i2c_chan *i2c = NULL;
+       u8 i2cidx, mxms[6], addr, size;
+
+       i2cidx = mxm_ddc_map(dev, 1 /* LVDS_DDC */) & 0x0f;
+       if (i2cidx < 0x0f)
+               i2c = nouveau_i2c_find(dev, i2cidx);
+       if (!i2c)
+               return false;
+
+       addr = 0x54;
+       if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms)) {
+               addr = 0x56;
+               if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms))
+                       return false;
+       }
+
+       dev_priv->mxms = mxms;
+       size = mxms_headerlen(dev) + mxms_structlen(dev);
+       dev_priv->mxms = kmalloc(size, GFP_KERNEL);
+
+       if (dev_priv->mxms &&
+           mxm_shadow_rom_fetch(i2c, addr, 0, size, dev_priv->mxms))
+               return true;
+
+       kfree(dev_priv->mxms);
+       dev_priv->mxms = NULL;
+       return false;
+}
+
+#if defined(CONFIG_ACPI)
+static bool
+mxm_shadow_dsm(struct drm_device *dev, u8 version)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       static char muid[] = {
+               0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
+               0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
+       };
+       u32 mxms_args[] = { 0x00000000 };
+       union acpi_object args[4] = {
+               /* _DSM MUID */
+               { .buffer.type = 3,
+                 .buffer.length = sizeof(muid),
+                 .buffer.pointer = muid,
+               },
+               /* spec says this can be zero to mean "highest revision", but
+                * of course there's at least one bios out there which fails
+                * unless you pass in exactly the version it supports..
+                */
+               { .integer.type = ACPI_TYPE_INTEGER,
+                 .integer.value = (version & 0xf0) << 4 | (version & 0x0f),
+               },
+               /* MXMS function */
+               { .integer.type = ACPI_TYPE_INTEGER,
+                 .integer.value = 0x00000010,
+               },
+               /* Pointer to MXMS arguments */
+               { .buffer.type = ACPI_TYPE_BUFFER,
+                 .buffer.length = sizeof(mxms_args),
+                 .buffer.pointer = (char *)mxms_args,
+               },
+       };
+       struct acpi_object_list list = { ARRAY_SIZE(args), args };
+       struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *obj;
+       acpi_handle handle;
+       int ret;
+
+       handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+       if (!handle)
+               return false;
+
+       ret = acpi_evaluate_object(handle, "_DSM", &list, &retn);
+       if (ret) {
+               MXM_DBG(dev, "DSM MXMS failed: %d\n", ret);
+               return false;
+       }
+
+       obj = retn.pointer;
+       if (obj->type == ACPI_TYPE_BUFFER) {
+               dev_priv->mxms = kmemdup(obj->buffer.pointer,
+                                        obj->buffer.length, GFP_KERNEL);
+       } else
+       if (obj->type == ACPI_TYPE_INTEGER) {
+               MXM_DBG(dev, "DSM MXMS returned 0x%llx\n", obj->integer.value);
+       }
+
+       kfree(obj);
+       return dev_priv->mxms != NULL;
+}
+#endif
+
+#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
+
+#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
+
+static bool
+mxm_shadow_wmi(struct drm_device *dev, u8 version)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
+       struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
+       struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *obj;
+       acpi_status status;
+
+       if (!wmi_has_guid(WMI_WMMX_GUID))
+               return false;
+
+       status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
+       if (ACPI_FAILURE(status)) {
+               MXM_DBG(dev, "WMMX MXMS returned %d\n", status);
+               return false;
+       }
+
+       obj = retn.pointer;
+       if (obj->type == ACPI_TYPE_BUFFER) {
+               dev_priv->mxms = kmemdup(obj->buffer.pointer,
+                                        obj->buffer.length, GFP_KERNEL);
+       }
+
+       kfree(obj);
+       return dev_priv->mxms != NULL;
+}
+#endif
+
+struct mxm_shadow_h {
+       const char *name;
+       bool (*exec)(struct drm_device *, u8 version);
+} _mxm_shadow[] = {
+       { "ROM", mxm_shadow_rom },
+#if defined(CONFIG_ACPI)
+       { "DSM", mxm_shadow_dsm },
+#endif
+#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
+       { "WMI", mxm_shadow_wmi },
+#endif
+       {}
+};
+
+static int
+mxm_shadow(struct drm_device *dev, u8 version)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct mxm_shadow_h *shadow = _mxm_shadow;
+       do {
+               MXM_DBG(dev, "checking %s\n", shadow->name);
+               if (shadow->exec(dev, version)) {
+                       if (mxms_valid(dev))
+                               return 0;
+                       kfree(dev_priv->mxms);
+                       dev_priv->mxms = NULL;
+               }
+       } while ((++shadow)->name);
+       return -ENOENT;
+}
+
+int
+nouveau_mxm_init(struct drm_device *dev)
+{
+       u8 mxm_size, *mxm = mxm_table(dev, &mxm_size);
+       if (!mxm || !mxm[0]) {
+               MXM_MSG(dev, "no VBIOS data, nothing to do\n");
+               return 0;
+       }
+
+       MXM_MSG(dev, "BIOS version %d.%d\n", mxm[0] >> 4, mxm[0] & 0x0f);
+
+       if (mxm_shadow(dev, mxm[0])) {
+               MXM_MSG(dev, "failed to locate valid SIS\n");
+               return -EINVAL;
+       }
+
+       MXM_MSG(dev, "MXMS Version %d.%d\n",
+               mxms_version(dev) >> 8, mxms_version(dev) & 0xff);
+       mxms_foreach(dev, 0, NULL, NULL);
+
+       if (nouveau_mxmdcb)
+               mxm_dcb_sanitise(dev);
+       return 0;
+}
+
+void
+nouveau_mxm_fini(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       kfree(dev_priv->mxms);
+       dev_priv->mxms = NULL;
+}
index 6abdbe6530a7b31254879ebed445b23cb2b1636c..2ef883c4bbc182cc85d1db2326a5c0d7cc3a6b64 100644 (file)
@@ -115,7 +115,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_gpuobj *nobj = NULL;
        struct drm_mm_node *mem;
-       uint32_t offset;
+       uint64_t offset;
        int target, ret;
 
        mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
index 960c0ae0c0c3de650dac30beaec208b9bfd3558d..cc419fae794b19d53b6b0a0fbf6e4aa226cfc3a8 100644 (file)
@@ -723,14 +723,14 @@ nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
        nv_wo32(chan->ramin, 0x020c, 0x000000ff);
 
        /* map display semaphore buffers into channel's vm */
-       if (dev_priv->card_type >= NV_D0)
-               return 0;
-
-       for (i = 0; i < 2; i++) {
-               struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i];
-
-               ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm,
-                                        &chan->dispc_vma[i]);
+       for (i = 0; i < dev->mode_config.num_crtc; i++) {
+               struct nouveau_bo *bo;
+               if (dev_priv->card_type >= NV_D0)
+                       bo = nvd0_display_crtc_sema(dev, i);
+               else
+                       bo = nv50_display(dev)->crtc[i].sem.bo;
+
+               ret = nouveau_bo_vma_add(bo, chan->vm, &chan->dispc_vma[i]);
                if (ret)
                        return ret;
        }
@@ -879,9 +879,14 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
 
        NV_DEBUG(dev, "ch%d\n", chan->id);
 
-       if (dev_priv->card_type >= NV_50 && dev_priv->card_type <= NV_C0) {
+       if (dev_priv->card_type >= NV_D0) {
+               for (i = 0; i < dev->mode_config.num_crtc; i++) {
+                       struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
+                       nouveau_bo_vma_del(bo, &chan->dispc_vma[i]);
+               }
+       } else
+       if (dev_priv->card_type >= NV_50) {
                struct nv50_display *disp = nv50_display(dev);
-
                for (i = 0; i < dev->mode_config.num_crtc; i++) {
                        struct nv50_display_crtc *dispc = &disp->crtc[i];
                        nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
index 33d03fbf00df56c7523bf8909168d267f0a1d197..58f497343cecb77d31e7156cdbb8aeebebfd6846 100644 (file)
@@ -41,7 +41,7 @@ legacy_perf_init(struct drm_device *dev)
                return;
        }
 
-       perf = ROMPTR(bios, bmp[0x73]);
+       perf = ROMPTR(dev, bmp[0x73]);
        if (!perf) {
                NV_DEBUG(dev, "No memclock table pointer found.\n");
                return;
@@ -87,7 +87,7 @@ nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P,
         * ramcfg to select the correct subentry
         */
        if (P->version == 2) {
-               u8 *tmap = ROMPTR(bios, P->data[4]);
+               u8 *tmap = ROMPTR(dev, P->data[4]);
                if (!tmap) {
                        NV_DEBUG(dev, "no timing map pointer\n");
                        return NULL;
@@ -140,7 +140,6 @@ nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
                     struct nouveau_pm_level *perflvl)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nvbios *bios = &dev_priv->vbios;
        u8 *vmap;
        int id;
 
@@ -165,7 +164,7 @@ nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
                return;
        }
 
-       vmap = ROMPTR(bios, P->data[32]);
+       vmap = ROMPTR(dev, P->data[32]);
        if (!vmap) {
                NV_DEBUG(dev, "volt map table pointer invalid\n");
                return;
@@ -200,12 +199,14 @@ nouveau_perf_init(struct drm_device *dev)
                        return;
                }
 
-               perf = ROMPTR(bios, P.data[0]);
+               perf = ROMPTR(dev, P.data[0]);
                version   = perf[0];
                headerlen = perf[1];
                if (version < 0x40) {
                        recordlen = perf[3] + (perf[4] * perf[5]);
                        entries   = perf[2];
+
+                       pm->pwm_divisor = ROM16(perf[6]);
                } else {
                        recordlen = perf[2] + (perf[3] * perf[4]);
                        entries   = perf[5];
@@ -216,7 +217,7 @@ nouveau_perf_init(struct drm_device *dev)
                        return;
                }
 
-               perf = ROMPTR(bios, bios->data[bios->offset + 0x94]);
+               perf = ROMPTR(dev, bios->data[bios->offset + 0x94]);
                if (!perf) {
                        NV_DEBUG(dev, "perf table pointer invalid\n");
                        return;
@@ -283,7 +284,6 @@ nouveau_perf_init(struct drm_device *dev)
                                perflvl->memory = ROM16(entry[11]) * 1000;
                        else
                                perflvl->memory = ROM16(entry[11]) * 2000;
-
                        break;
                case 0x25:
                        perflvl->fanspeed = entry[4];
@@ -300,8 +300,8 @@ nouveau_perf_init(struct drm_device *dev)
                        perflvl->core = ROM16(entry[8]) * 1000;
                        perflvl->shader = ROM16(entry[10]) * 1000;
                        perflvl->memory = ROM16(entry[12]) * 1000;
-                       /*XXX: confirm on 0x35 */
-                       perflvl->unk05 = ROM16(entry[16]) * 1000;
+                       perflvl->vdec = ROM16(entry[16]) * 1000;
+                       perflvl->dom6 = ROM16(entry[20]) * 1000;
                        break;
                case 0x40:
 #define subent(n) (ROM16(entry[perf[2] + ((n) * perf[3])]) & 0xfff) * 1000
index a539fd25792114cf0c4b7129d56152a890d0785d..9064d7f197941c77c32a8a6dde4dfc80c2a0c097 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "nouveau_drv.h"
 #include "nouveau_pm.h"
+#include "nouveau_gpio.h"
 
 #ifdef CONFIG_ACPI
 #include <linux/acpi.h>
 #include <linux/hwmon-sysfs.h>
 
 static int
-nouveau_pm_clock_set(struct drm_device *dev, struct nouveau_pm_level *perflvl,
-                    u8 id, u32 khz)
+nouveau_pwmfan_get(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
-       void *pre_state;
+       struct gpio_func gpio;
+       u32 divs, duty;
+       int ret;
 
-       if (khz == 0)
-               return 0;
+       if (!pm->pwm_get)
+               return -ENODEV;
+
+       ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
+       if (ret == 0) {
+               ret = pm->pwm_get(dev, gpio.line, &divs, &duty);
+               if (ret == 0) {
+                       divs = max(divs, duty);
+                       if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
+                               duty = divs - duty;
+                       return (duty * 100) / divs;
+               }
+
+               return nouveau_gpio_func_get(dev, gpio.func) * 100;
+       }
+
+       return -ENODEV;
+}
+
+static int
+nouveau_pwmfan_set(struct drm_device *dev, int percent)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct gpio_func gpio;
+       u32 divs, duty;
+       int ret;
+
+       if (!pm->pwm_set)
+               return -ENODEV;
+
+       ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
+       if (ret == 0) {
+               divs = pm->pwm_divisor;
+               if (pm->fan.pwm_freq) {
+                       /*XXX: PNVIO clock more than likely... */
+                       divs = 135000 / pm->fan.pwm_freq;
+                       if (dev_priv->chipset < 0xa3)
+                               divs /= 4;
+               }
+
+               duty = ((divs * percent) + 99) / 100;
+               if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
+                       duty = divs - duty;
 
-       pre_state = pm->clock_pre(dev, perflvl, id, khz);
-       if (IS_ERR(pre_state))
-               return PTR_ERR(pre_state);
+               return pm->pwm_set(dev, gpio.line, divs, duty);
+       }
+
+       return -ENODEV;
+}
+
+static int
+nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+                      struct nouveau_pm_level *a, struct nouveau_pm_level *b)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       int ret;
+
+       /*XXX: not on all boards, we should control based on temperature
+        *     on recent boards..  or maybe on some other factor we don't
+        *     know about?
+        */
+       if (a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
+               ret = nouveau_pwmfan_set(dev, perflvl->fanspeed);
+               if (ret && ret != -ENODEV) {
+                       NV_ERROR(dev, "fanspeed set failed: %d\n", ret);
+                       return ret;
+               }
+       }
+
+       if (pm->voltage.supported && pm->voltage_set) {
+               if (perflvl->volt_min && b->volt_min > a->volt_min) {
+                       ret = pm->voltage_set(dev, perflvl->volt_min);
+                       if (ret) {
+                               NV_ERROR(dev, "voltage set failed: %d\n", ret);
+                               return ret;
+                       }
+               }
+       }
 
-       if (pre_state)
-               pm->clock_set(dev, pre_state);
        return 0;
 }
 
@@ -59,31 +133,24 @@ nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       void *state;
        int ret;
 
        if (perflvl == pm->cur)
                return 0;
 
-       if (pm->voltage.supported && pm->voltage_set && perflvl->volt_min) {
-               ret = pm->voltage_set(dev, perflvl->volt_min);
-               if (ret) {
-                       NV_ERROR(dev, "voltage_set %d failed: %d\n",
-                                perflvl->volt_min, ret);
-               }
-       }
+       ret = nouveau_pm_perflvl_aux(dev, perflvl, pm->cur, perflvl);
+       if (ret)
+               return ret;
 
-       if (pm->clocks_pre) {
-               void *state = pm->clocks_pre(dev, perflvl);
-               if (IS_ERR(state))
-                       return PTR_ERR(state);
-               pm->clocks_set(dev, state);
-       } else
-       if (pm->clock_set) {
-               nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core);
-               nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
-               nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
-               nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
-       }
+       state = pm->clocks_pre(dev, perflvl);
+       if (IS_ERR(state))
+               return PTR_ERR(state);
+       pm->clocks_set(dev, state);
+
+       ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
+       if (ret)
+               return ret;
 
        pm->cur = perflvl;
        return 0;
@@ -130,28 +197,9 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 
        memset(perflvl, 0, sizeof(*perflvl));
 
-       if (pm->clocks_get) {
-               ret = pm->clocks_get(dev, perflvl);
-               if (ret)
-                       return ret;
-       } else
-       if (pm->clock_get) {
-               ret = pm->clock_get(dev, PLL_CORE);
-               if (ret > 0)
-                       perflvl->core = ret;
-
-               ret = pm->clock_get(dev, PLL_MEMORY);
-               if (ret > 0)
-                       perflvl->memory = ret;
-
-               ret = pm->clock_get(dev, PLL_SHADER);
-               if (ret > 0)
-                       perflvl->shader = ret;
-
-               ret = pm->clock_get(dev, PLL_UNK05);
-               if (ret > 0)
-                       perflvl->unk05 = ret;
-       }
+       ret = pm->clocks_get(dev, perflvl);
+       if (ret)
+               return ret;
 
        if (pm->voltage.supported && pm->voltage_get) {
                ret = pm->voltage_get(dev);
@@ -161,6 +209,10 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
                }
        }
 
+       ret = nouveau_pwmfan_get(dev);
+       if (ret > 0)
+               perflvl->fanspeed = ret;
+
        return 0;
 }
 
@@ -412,6 +464,172 @@ static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
                                                nouveau_hwmon_show_update_rate,
                                                NULL, 0);
 
+static ssize_t
+nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
+                             char *buf)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+       struct gpio_func gpio;
+       u32 cycles, cur, prev;
+       u64 start;
+       int ret;
+
+       ret = nouveau_gpio_find(dev, 0, DCB_GPIO_FAN_SENSE, 0xff, &gpio);
+       if (ret)
+               return ret;
+
+       /* Monitor the GPIO input 0x3b for 250ms.
+        * When the fan spins, it changes the value of GPIO FAN_SENSE.
+        * We get 4 changes (0 -> 1 -> 0 -> 1 -> [...]) per complete rotation.
+        */
+       start = ptimer->read(dev);
+       prev = nouveau_gpio_sense(dev, 0, gpio.line);
+       cycles = 0;
+       do {
+               cur = nouveau_gpio_sense(dev, 0, gpio.line);
+               if (prev != cur) {
+                       cycles++;
+                       prev = cur;
+               }
+
+               usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
+       } while (ptimer->read(dev) - start < 250000000);
+
+       /* interpolate to get rpm */
+       return sprintf(buf, "%i\n", cycles / 4 * 4 * 60);
+}
+static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input,
+                         NULL, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm0(struct device *d, struct device_attribute *a, char *buf)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       int ret;
+
+       ret = nouveau_pwmfan_get(dev);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a,
+                      const char *buf, size_t count)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       int ret = -ENODEV;
+       long value;
+
+       if (nouveau_perflvl_wr != 7777)
+               return -EPERM;
+
+       if (strict_strtol(buf, 10, &value) == -EINVAL)
+               return -EINVAL;
+
+       if (value < pm->fan.min_duty)
+               value = pm->fan.min_duty;
+       if (value > pm->fan.max_duty)
+               value = pm->fan.max_duty;
+
+       ret = nouveau_pwmfan_set(dev, value);
+       if (ret)
+               return ret;
+
+       return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm0, S_IRUGO | S_IWUSR,
+                         nouveau_hwmon_get_pwm0,
+                         nouveau_hwmon_set_pwm0, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm0_min(struct device *d,
+                          struct device_attribute *a, char *buf)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+       return sprintf(buf, "%i\n", pm->fan.min_duty);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm0_min(struct device *d, struct device_attribute *a,
+                          const char *buf, size_t count)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       long value;
+
+       if (strict_strtol(buf, 10, &value) == -EINVAL)
+               return -EINVAL;
+
+       if (value < 0)
+               value = 0;
+
+       if (pm->fan.max_duty - value < 10)
+               value = pm->fan.max_duty - 10;
+
+       if (value < 10)
+               pm->fan.min_duty = 10;
+       else
+               pm->fan.min_duty = value;
+
+       return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm0_min, S_IRUGO | S_IWUSR,
+                         nouveau_hwmon_get_pwm0_min,
+                         nouveau_hwmon_set_pwm0_min, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm0_max(struct device *d,
+                          struct device_attribute *a, char *buf)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+       return sprintf(buf, "%i\n", pm->fan.max_duty);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm0_max(struct device *d, struct device_attribute *a,
+                          const char *buf, size_t count)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       long value;
+
+       if (strict_strtol(buf, 10, &value) == -EINVAL)
+               return -EINVAL;
+
+       if (value < 0)
+               value = 0;
+
+       if (value - pm->fan.min_duty < 10)
+               value = pm->fan.min_duty + 10;
+
+       if (value > 100)
+               pm->fan.max_duty = 100;
+       else
+               pm->fan.max_duty = value;
+
+       return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm0_max, S_IRUGO | S_IWUSR,
+                         nouveau_hwmon_get_pwm0_max,
+                         nouveau_hwmon_set_pwm0_max, 0);
+
 static struct attribute *hwmon_attributes[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        &sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -420,20 +638,36 @@ static struct attribute *hwmon_attributes[] = {
        &sensor_dev_attr_update_rate.dev_attr.attr,
        NULL
 };
+static struct attribute *hwmon_fan_rpm_attributes[] = {
+       &sensor_dev_attr_fan0_input.dev_attr.attr,
+       NULL
+};
+static struct attribute *hwmon_pwm_fan_attributes[] = {
+       &sensor_dev_attr_pwm0.dev_attr.attr,
+       &sensor_dev_attr_pwm0_min.dev_attr.attr,
+       &sensor_dev_attr_pwm0_max.dev_attr.attr,
+       NULL
+};
 
 static const struct attribute_group hwmon_attrgroup = {
        .attrs = hwmon_attributes,
 };
+static const struct attribute_group hwmon_fan_rpm_attrgroup = {
+       .attrs = hwmon_fan_rpm_attributes,
+};
+static const struct attribute_group hwmon_pwm_fan_attrgroup = {
+       .attrs = hwmon_pwm_fan_attributes,
+};
 #endif
 
 static int
 nouveau_hwmon_init(struct drm_device *dev)
 {
-#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
        struct device *hwmon_dev;
-       int ret;
+       int ret = 0;
 
        if (!pm->temp_get)
                return -ENODEV;
@@ -446,17 +680,46 @@ nouveau_hwmon_init(struct drm_device *dev)
                return ret;
        }
        dev_set_drvdata(hwmon_dev, dev);
+
+       /* default sysfs entries */
        ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
        if (ret) {
-               NV_ERROR(dev,
-                       "Unable to create hwmon sysfs file: %d\n", ret);
-               hwmon_device_unregister(hwmon_dev);
-               return ret;
+               if (ret)
+                       goto error;
+       }
+
+       /* if the card has a pwm fan */
+       /*XXX: incorrect, need better detection for this, some boards have
+        *     the gpio entries for pwm fan control even when there's no
+        *     actual fan connected to it... therm table? */
+       if (nouveau_pwmfan_get(dev) >= 0) {
+               ret = sysfs_create_group(&dev->pdev->dev.kobj,
+                                        &hwmon_pwm_fan_attrgroup);
+               if (ret)
+                       goto error;
+       }
+
+       /* if the card can read the fan rpm */
+       if (nouveau_gpio_func_valid(dev, DCB_GPIO_FAN_SENSE)) {
+               ret = sysfs_create_group(&dev->pdev->dev.kobj,
+                                        &hwmon_fan_rpm_attrgroup);
+               if (ret)
+                       goto error;
        }
 
        pm->hwmon = hwmon_dev;
-#endif
+
+       return 0;
+
+error:
+       NV_ERROR(dev, "Unable to create some hwmon sysfs files: %d\n", ret);
+       hwmon_device_unregister(hwmon_dev);
+       pm->hwmon = NULL;
+       return ret;
+#else
+       pm->hwmon = NULL;
        return 0;
+#endif
 }
 
 static void
@@ -468,6 +731,9 @@ nouveau_hwmon_fini(struct drm_device *dev)
 
        if (pm->hwmon) {
                sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
+               sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_pwm_fan_attrgroup);
+               sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_fan_rpm_attrgroup);
+
                hwmon_device_unregister(pm->hwmon);
        }
 #endif
index 8ac02cdd03a1039067c444c811fcfeecb0ab5554..2f8e14fbcff85237cfc4120d19355f1a01793225 100644 (file)
@@ -47,29 +47,33 @@ void nouveau_mem_timing_init(struct drm_device *);
 void nouveau_mem_timing_fini(struct drm_device *);
 
 /* nv04_pm.c */
-int nv04_pm_clock_get(struct drm_device *, u32 id);
-void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
-                       u32 id, int khz);
-void nv04_pm_clock_set(struct drm_device *, void *);
+int nv04_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nv04_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nv04_pm_clocks_set(struct drm_device *, void *);
 
 /* nv40_pm.c */
 int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
 void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-void nv40_pm_clocks_set(struct drm_device *, void *);
+int nv40_pm_clocks_set(struct drm_device *, void *);
+int nv40_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
+int nv40_pm_pwm_set(struct drm_device *, int, u32, u32);
 
 /* nv50_pm.c */
-int nv50_pm_clock_get(struct drm_device *, u32 id);
-void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
-                       u32 id, int khz);
-void nv50_pm_clock_set(struct drm_device *, void *);
+int nv50_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nv50_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nv50_pm_clocks_set(struct drm_device *, void *);
+int nv50_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
+int nv50_pm_pwm_set(struct drm_device *, int, u32, u32);
 
 /* nva3_pm.c */
 int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
 void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-void nva3_pm_clocks_set(struct drm_device *, void *);
+int nva3_pm_clocks_set(struct drm_device *, void *);
 
 /* nvc0_pm.c */
 int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nvc0_pm_clocks_set(struct drm_device *, void *);
 
 /* nouveau_temp.c */
 void nouveau_temp_init(struct drm_device *dev);
index c8a463b76c89f03f96f68e48107d9993d2a2bda1..47f245edf538f10534824a034a36a4b3bcd85aaa 100644 (file)
@@ -8,91 +8,30 @@
 #define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
 
 struct nouveau_sgdma_be {
-       struct ttm_backend backend;
+       /* this has to be the first field so populate/unpopulated in
+        * nouve_bo.c works properly, otherwise have to move them here
+        */
+       struct ttm_dma_tt ttm;
        struct drm_device *dev;
-
-       dma_addr_t *pages;
-       unsigned nr_pages;
-       bool unmap_pages;
-
        u64 offset;
-       bool bound;
 };
 
-static int
-nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
-                      struct page **pages, struct page *dummy_read_page,
-                      dma_addr_t *dma_addrs)
-{
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
-       struct drm_device *dev = nvbe->dev;
-       int i;
-
-       NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
-
-       nvbe->pages = dma_addrs;
-       nvbe->nr_pages = num_pages;
-       nvbe->unmap_pages = true;
-
-       /* this code path isn't called and is incorrect anyways */
-       if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
-               nvbe->unmap_pages = false;
-               return 0;
-       }
-
-       for (i = 0; i < num_pages; i++) {
-               nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
-                                             PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-               if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
-                       nvbe->nr_pages = --i;
-                       be->func->clear(be);
-                       return -EFAULT;
-               }
-       }
-
-       return 0;
-}
-
 static void
-nouveau_sgdma_clear(struct ttm_backend *be)
+nouveau_sgdma_destroy(struct ttm_tt *ttm)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
-       struct drm_device *dev = nvbe->dev;
-
-       if (nvbe->bound)
-               be->func->unbind(be);
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
 
-       if (nvbe->unmap_pages) {
-               while (nvbe->nr_pages--) {
-                       pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
-                                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-               }
-               nvbe->unmap_pages = false;
-       }
-
-       nvbe->pages = NULL;
-}
-
-static void
-nouveau_sgdma_destroy(struct ttm_backend *be)
-{
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
-
-       if (be) {
+       if (ttm) {
                NV_DEBUG(nvbe->dev, "\n");
-
-               if (nvbe) {
-                       if (nvbe->pages)
-                               be->func->clear(be);
-                       kfree(nvbe);
-               }
+               ttm_dma_tt_fini(&nvbe->ttm);
+               kfree(nvbe);
        }
 }
 
 static int
-nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct drm_device *dev = nvbe->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -102,8 +41,8 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
 
        nvbe->offset = mem->start << PAGE_SHIFT;
        pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
-       for (i = 0; i < nvbe->nr_pages; i++) {
-               dma_addr_t dma_offset = nvbe->pages[i];
+       for (i = 0; i < ttm->num_pages; i++) {
+               dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
                uint32_t offset_l = lower_32_bits(dma_offset);
 
                for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
@@ -112,14 +51,13 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
                }
        }
 
-       nvbe->bound = true;
        return 0;
 }
 
 static int
-nv04_sgdma_unbind(struct ttm_backend *be)
+nv04_sgdma_unbind(struct ttm_tt *ttm)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct drm_device *dev = nvbe->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -127,22 +65,19 @@ nv04_sgdma_unbind(struct ttm_backend *be)
 
        NV_DEBUG(dev, "\n");
 
-       if (!nvbe->bound)
+       if (ttm->state != tt_bound)
                return 0;
 
        pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
-       for (i = 0; i < nvbe->nr_pages; i++) {
+       for (i = 0; i < ttm->num_pages; i++) {
                for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
                        nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
        }
 
-       nvbe->bound = false;
        return 0;
 }
 
 static struct ttm_backend_func nv04_sgdma_backend = {
-       .populate               = nouveau_sgdma_populate,
-       .clear                  = nouveau_sgdma_clear,
        .bind                   = nv04_sgdma_bind,
        .unbind                 = nv04_sgdma_unbind,
        .destroy                = nouveau_sgdma_destroy
@@ -161,14 +96,14 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
 }
 
 static int
-nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
        struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
-       dma_addr_t *list = nvbe->pages;
+       dma_addr_t *list = nvbe->ttm.dma_address;
        u32 pte = mem->start << 2;
-       u32 cnt = nvbe->nr_pages;
+       u32 cnt = ttm->num_pages;
 
        nvbe->offset = mem->start << PAGE_SHIFT;
 
@@ -178,18 +113,17 @@ nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
        }
 
        nv41_sgdma_flush(nvbe);
-       nvbe->bound = true;
        return 0;
 }
 
 static int
-nv41_sgdma_unbind(struct ttm_backend *be)
+nv41_sgdma_unbind(struct ttm_tt *ttm)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
        struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
        u32 pte = (nvbe->offset >> 12) << 2;
-       u32 cnt = nvbe->nr_pages;
+       u32 cnt = ttm->num_pages;
 
        while (cnt--) {
                nv_wo32(pgt, pte, 0x00000000);
@@ -197,24 +131,22 @@ nv41_sgdma_unbind(struct ttm_backend *be)
        }
 
        nv41_sgdma_flush(nvbe);
-       nvbe->bound = false;
        return 0;
 }
 
 static struct ttm_backend_func nv41_sgdma_backend = {
-       .populate               = nouveau_sgdma_populate,
-       .clear                  = nouveau_sgdma_clear,
        .bind                   = nv41_sgdma_bind,
        .unbind                 = nv41_sgdma_unbind,
        .destroy                = nouveau_sgdma_destroy
 };
 
 static void
-nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
+nv44_sgdma_flush(struct ttm_tt *ttm)
 {
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct drm_device *dev = nvbe->dev;
 
-       nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
+       nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
        nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
        if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
                NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
@@ -273,14 +205,14 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
 }
 
 static int
-nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
        struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
-       dma_addr_t *list = nvbe->pages;
+       dma_addr_t *list = nvbe->ttm.dma_address;
        u32 pte = mem->start << 2, tmp[4];
-       u32 cnt = nvbe->nr_pages;
+       u32 cnt = ttm->num_pages;
        int i;
 
        nvbe->offset = mem->start << PAGE_SHIFT;
@@ -308,19 +240,18 @@ nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
        if (cnt)
                nv44_sgdma_fill(pgt, list, pte, cnt);
 
-       nv44_sgdma_flush(nvbe);
-       nvbe->bound = true;
+       nv44_sgdma_flush(ttm);
        return 0;
 }
 
 static int
-nv44_sgdma_unbind(struct ttm_backend *be)
+nv44_sgdma_unbind(struct ttm_tt *ttm)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
        struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
        u32 pte = (nvbe->offset >> 12) << 2;
-       u32 cnt = nvbe->nr_pages;
+       u32 cnt = ttm->num_pages;
 
        if (pte & 0x0000000c) {
                u32  max = 4 - ((pte >> 2) & 0x3);
@@ -342,55 +273,47 @@ nv44_sgdma_unbind(struct ttm_backend *be)
        if (cnt)
                nv44_sgdma_fill(pgt, NULL, pte, cnt);
 
-       nv44_sgdma_flush(nvbe);
-       nvbe->bound = false;
+       nv44_sgdma_flush(ttm);
        return 0;
 }
 
 static struct ttm_backend_func nv44_sgdma_backend = {
-       .populate               = nouveau_sgdma_populate,
-       .clear                  = nouveau_sgdma_clear,
        .bind                   = nv44_sgdma_bind,
        .unbind                 = nv44_sgdma_unbind,
        .destroy                = nouveau_sgdma_destroy
 };
 
 static int
-nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct nouveau_mem *node = mem->mm_node;
+
        /* noop: bound in move_notify() */
-       node->pages = nvbe->pages;
-       nvbe->pages = (dma_addr_t *)node;
-       nvbe->bound = true;
+       node->pages = nvbe->ttm.dma_address;
        return 0;
 }
 
 static int
-nv50_sgdma_unbind(struct ttm_backend *be)
+nv50_sgdma_unbind(struct ttm_tt *ttm)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
-       struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
        /* noop: unbound in move_notify() */
-       nvbe->pages = node->pages;
-       node->pages = NULL;
-       nvbe->bound = false;
        return 0;
 }
 
 static struct ttm_backend_func nv50_sgdma_backend = {
-       .populate               = nouveau_sgdma_populate,
-       .clear                  = nouveau_sgdma_clear,
        .bind                   = nv50_sgdma_bind,
        .unbind                 = nv50_sgdma_unbind,
        .destroy                = nouveau_sgdma_destroy
 };
 
-struct ttm_backend *
-nouveau_sgdma_init_ttm(struct drm_device *dev)
+struct ttm_tt *
+nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
+                        unsigned long size, uint32_t page_flags,
+                        struct page *dummy_read_page)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+       struct drm_device *dev = dev_priv->dev;
        struct nouveau_sgdma_be *nvbe;
 
        nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
@@ -398,9 +321,13 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
                return NULL;
 
        nvbe->dev = dev;
+       nvbe->ttm.ttm.func = dev_priv->gart_info.func;
 
-       nvbe->backend.func = dev_priv->gart_info.func;
-       return &nvbe->backend;
+       if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
+               kfree(nvbe);
+               return NULL;
+       }
+       return &nvbe->ttm.ttm;
 }
 
 int
index d8831ab42bb90344a28b8a146624aa4aca69d8c5..f5e98910d17faf6b80bcd64f49d9c8909896b6a7 100644 (file)
@@ -36,6 +36,7 @@
 #include "nouveau_drm.h"
 #include "nouveau_fbcon.h"
 #include "nouveau_ramht.h"
+#include "nouveau_gpio.h"
 #include "nouveau_pm.h"
 #include "nv50_display.h"
 
@@ -80,16 +81,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->display.early_init      = nv04_display_early_init;
                engine->display.late_takedown   = nv04_display_late_takedown;
                engine->display.create          = nv04_display_create;
-               engine->display.init            = nv04_display_init;
                engine->display.destroy         = nv04_display_destroy;
-               engine->gpio.init               = nouveau_stub_init;
-               engine->gpio.takedown           = nouveau_stub_takedown;
-               engine->gpio.get                = NULL;
-               engine->gpio.set                = NULL;
-               engine->gpio.irq_enable         = NULL;
-               engine->pm.clock_get            = nv04_pm_clock_get;
-               engine->pm.clock_pre            = nv04_pm_clock_pre;
-               engine->pm.clock_set            = nv04_pm_clock_set;
+               engine->display.init            = nv04_display_init;
+               engine->display.fini            = nv04_display_fini;
+               engine->pm.clocks_get           = nv04_pm_clocks_get;
+               engine->pm.clocks_pre           = nv04_pm_clocks_pre;
+               engine->pm.clocks_set           = nv04_pm_clocks_set;
                engine->vram.init               = nouveau_mem_detect;
                engine->vram.takedown           = nouveau_stub_takedown;
                engine->vram.flags_valid        = nouveau_mem_flags_valid;
@@ -129,16 +126,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->display.early_init      = nv04_display_early_init;
                engine->display.late_takedown   = nv04_display_late_takedown;
                engine->display.create          = nv04_display_create;
-               engine->display.init            = nv04_display_init;
                engine->display.destroy         = nv04_display_destroy;
-               engine->gpio.init               = nouveau_stub_init;
-               engine->gpio.takedown           = nouveau_stub_takedown;
-               engine->gpio.get                = nv10_gpio_get;
-               engine->gpio.set                = nv10_gpio_set;
-               engine->gpio.irq_enable         = NULL;
-               engine->pm.clock_get            = nv04_pm_clock_get;
-               engine->pm.clock_pre            = nv04_pm_clock_pre;
-               engine->pm.clock_set            = nv04_pm_clock_set;
+               engine->display.init            = nv04_display_init;
+               engine->display.fini            = nv04_display_fini;
+               engine->gpio.drive              = nv10_gpio_drive;
+               engine->gpio.sense              = nv10_gpio_sense;
+               engine->pm.clocks_get           = nv04_pm_clocks_get;
+               engine->pm.clocks_pre           = nv04_pm_clocks_pre;
+               engine->pm.clocks_set           = nv04_pm_clocks_set;
                engine->vram.init               = nouveau_mem_detect;
                engine->vram.takedown           = nouveau_stub_takedown;
                engine->vram.flags_valid        = nouveau_mem_flags_valid;
@@ -178,16 +173,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->display.early_init      = nv04_display_early_init;
                engine->display.late_takedown   = nv04_display_late_takedown;
                engine->display.create          = nv04_display_create;
-               engine->display.init            = nv04_display_init;
                engine->display.destroy         = nv04_display_destroy;
-               engine->gpio.init               = nouveau_stub_init;
-               engine->gpio.takedown           = nouveau_stub_takedown;
-               engine->gpio.get                = nv10_gpio_get;
-               engine->gpio.set                = nv10_gpio_set;
-               engine->gpio.irq_enable         = NULL;
-               engine->pm.clock_get            = nv04_pm_clock_get;
-               engine->pm.clock_pre            = nv04_pm_clock_pre;
-               engine->pm.clock_set            = nv04_pm_clock_set;
+               engine->display.init            = nv04_display_init;
+               engine->display.fini            = nv04_display_fini;
+               engine->gpio.drive              = nv10_gpio_drive;
+               engine->gpio.sense              = nv10_gpio_sense;
+               engine->pm.clocks_get           = nv04_pm_clocks_get;
+               engine->pm.clocks_pre           = nv04_pm_clocks_pre;
+               engine->pm.clocks_set           = nv04_pm_clocks_set;
                engine->vram.init               = nouveau_mem_detect;
                engine->vram.takedown           = nouveau_stub_takedown;
                engine->vram.flags_valid        = nouveau_mem_flags_valid;
@@ -227,16 +220,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->display.early_init      = nv04_display_early_init;
                engine->display.late_takedown   = nv04_display_late_takedown;
                engine->display.create          = nv04_display_create;
-               engine->display.init            = nv04_display_init;
                engine->display.destroy         = nv04_display_destroy;
-               engine->gpio.init               = nouveau_stub_init;
-               engine->gpio.takedown           = nouveau_stub_takedown;
-               engine->gpio.get                = nv10_gpio_get;
-               engine->gpio.set                = nv10_gpio_set;
-               engine->gpio.irq_enable         = NULL;
-               engine->pm.clock_get            = nv04_pm_clock_get;
-               engine->pm.clock_pre            = nv04_pm_clock_pre;
-               engine->pm.clock_set            = nv04_pm_clock_set;
+               engine->display.init            = nv04_display_init;
+               engine->display.fini            = nv04_display_fini;
+               engine->gpio.drive              = nv10_gpio_drive;
+               engine->gpio.sense              = nv10_gpio_sense;
+               engine->pm.clocks_get           = nv04_pm_clocks_get;
+               engine->pm.clocks_pre           = nv04_pm_clocks_pre;
+               engine->pm.clocks_set           = nv04_pm_clocks_set;
                engine->pm.voltage_get          = nouveau_voltage_gpio_get;
                engine->pm.voltage_set          = nouveau_voltage_gpio_set;
                engine->vram.init               = nouveau_mem_detect;
@@ -279,19 +270,22 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->display.early_init      = nv04_display_early_init;
                engine->display.late_takedown   = nv04_display_late_takedown;
                engine->display.create          = nv04_display_create;
-               engine->display.init            = nv04_display_init;
                engine->display.destroy         = nv04_display_destroy;
-               engine->gpio.init               = nouveau_stub_init;
-               engine->gpio.takedown           = nouveau_stub_takedown;
-               engine->gpio.get                = nv10_gpio_get;
-               engine->gpio.set                = nv10_gpio_set;
-               engine->gpio.irq_enable         = NULL;
+               engine->display.init            = nv04_display_init;
+               engine->display.fini            = nv04_display_fini;
+               engine->gpio.init               = nv10_gpio_init;
+               engine->gpio.fini               = nv10_gpio_fini;
+               engine->gpio.drive              = nv10_gpio_drive;
+               engine->gpio.sense              = nv10_gpio_sense;
+               engine->gpio.irq_enable         = nv10_gpio_irq_enable;
                engine->pm.clocks_get           = nv40_pm_clocks_get;
                engine->pm.clocks_pre           = nv40_pm_clocks_pre;
                engine->pm.clocks_set           = nv40_pm_clocks_set;
                engine->pm.voltage_get          = nouveau_voltage_gpio_get;
                engine->pm.voltage_set          = nouveau_voltage_gpio_set;
                engine->pm.temp_get             = nv40_temp_get;
+               engine->pm.pwm_get              = nv40_pm_pwm_get;
+               engine->pm.pwm_set              = nv40_pm_pwm_set;
                engine->vram.init               = nouveau_mem_detect;
                engine->vram.takedown           = nouveau_stub_takedown;
                engine->vram.flags_valid        = nouveau_mem_flags_valid;
@@ -334,14 +328,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->display.early_init      = nv50_display_early_init;
                engine->display.late_takedown   = nv50_display_late_takedown;
                engine->display.create          = nv50_display_create;
-               engine->display.init            = nv50_display_init;
                engine->display.destroy         = nv50_display_destroy;
+               engine->display.init            = nv50_display_init;
+               engine->display.fini            = nv50_display_fini;
                engine->gpio.init               = nv50_gpio_init;
-               engine->gpio.takedown           = nv50_gpio_fini;
-               engine->gpio.get                = nv50_gpio_get;
-               engine->gpio.set                = nv50_gpio_set;
-               engine->gpio.irq_register       = nv50_gpio_irq_register;
-               engine->gpio.irq_unregister     = nv50_gpio_irq_unregister;
+               engine->gpio.fini               = nv50_gpio_fini;
+               engine->gpio.drive              = nv50_gpio_drive;
+               engine->gpio.sense              = nv50_gpio_sense;
                engine->gpio.irq_enable         = nv50_gpio_irq_enable;
                switch (dev_priv->chipset) {
                case 0x84:
@@ -354,9 +347,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                case 0xaa:
                case 0xac:
                case 0x50:
-                       engine->pm.clock_get    = nv50_pm_clock_get;
-                       engine->pm.clock_pre    = nv50_pm_clock_pre;
-                       engine->pm.clock_set    = nv50_pm_clock_set;
+                       engine->pm.clocks_get   = nv50_pm_clocks_get;
+                       engine->pm.clocks_pre   = nv50_pm_clocks_pre;
+                       engine->pm.clocks_set   = nv50_pm_clocks_set;
                        break;
                default:
                        engine->pm.clocks_get   = nva3_pm_clocks_get;
@@ -370,6 +363,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                        engine->pm.temp_get     = nv84_temp_get;
                else
                        engine->pm.temp_get     = nv40_temp_get;
+               engine->pm.pwm_get              = nv50_pm_pwm_get;
+               engine->pm.pwm_set              = nv50_pm_pwm_set;
                engine->vram.init               = nv50_vram_init;
                engine->vram.takedown           = nv50_vram_fini;
                engine->vram.get                = nv50_vram_new;
@@ -407,14 +402,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->display.early_init      = nv50_display_early_init;
                engine->display.late_takedown   = nv50_display_late_takedown;
                engine->display.create          = nv50_display_create;
-               engine->display.init            = nv50_display_init;
                engine->display.destroy         = nv50_display_destroy;
+               engine->display.init            = nv50_display_init;
+               engine->display.fini            = nv50_display_fini;
                engine->gpio.init               = nv50_gpio_init;
-               engine->gpio.takedown           = nouveau_stub_takedown;
-               engine->gpio.get                = nv50_gpio_get;
-               engine->gpio.set                = nv50_gpio_set;
-               engine->gpio.irq_register       = nv50_gpio_irq_register;
-               engine->gpio.irq_unregister     = nv50_gpio_irq_unregister;
+               engine->gpio.fini               = nv50_gpio_fini;
+               engine->gpio.drive              = nv50_gpio_drive;
+               engine->gpio.sense              = nv50_gpio_sense;
                engine->gpio.irq_enable         = nv50_gpio_irq_enable;
                engine->vram.init               = nvc0_vram_init;
                engine->vram.takedown           = nv50_vram_fini;
@@ -423,8 +417,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->vram.flags_valid        = nvc0_vram_flags_valid;
                engine->pm.temp_get             = nv84_temp_get;
                engine->pm.clocks_get           = nvc0_pm_clocks_get;
+               engine->pm.clocks_pre           = nvc0_pm_clocks_pre;
+               engine->pm.clocks_set           = nvc0_pm_clocks_set;
                engine->pm.voltage_get          = nouveau_voltage_gpio_get;
                engine->pm.voltage_set          = nouveau_voltage_gpio_set;
+               engine->pm.pwm_get              = nv50_pm_pwm_get;
+               engine->pm.pwm_set              = nv50_pm_pwm_set;
                break;
        case 0xd0:
                engine->instmem.init            = nvc0_instmem_init;
@@ -457,21 +455,23 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->display.early_init      = nouveau_stub_init;
                engine->display.late_takedown   = nouveau_stub_takedown;
                engine->display.create          = nvd0_display_create;
-               engine->display.init            = nvd0_display_init;
                engine->display.destroy         = nvd0_display_destroy;
+               engine->display.init            = nvd0_display_init;
+               engine->display.fini            = nvd0_display_fini;
                engine->gpio.init               = nv50_gpio_init;
-               engine->gpio.takedown           = nouveau_stub_takedown;
-               engine->gpio.get                = nvd0_gpio_get;
-               engine->gpio.set                = nvd0_gpio_set;
-               engine->gpio.irq_register       = nv50_gpio_irq_register;
-               engine->gpio.irq_unregister     = nv50_gpio_irq_unregister;
+               engine->gpio.fini               = nv50_gpio_fini;
+               engine->gpio.drive              = nvd0_gpio_drive;
+               engine->gpio.sense              = nvd0_gpio_sense;
                engine->gpio.irq_enable         = nv50_gpio_irq_enable;
                engine->vram.init               = nvc0_vram_init;
                engine->vram.takedown           = nv50_vram_fini;
                engine->vram.get                = nvc0_vram_new;
                engine->vram.put                = nv50_vram_del;
                engine->vram.flags_valid        = nvc0_vram_flags_valid;
+               engine->pm.temp_get             = nv84_temp_get;
                engine->pm.clocks_get           = nvc0_pm_clocks_get;
+               engine->pm.clocks_pre           = nvc0_pm_clocks_pre;
+               engine->pm.clocks_set           = nvc0_pm_clocks_set;
                engine->pm.voltage_get          = nouveau_voltage_gpio_get;
                engine->pm.voltage_set          = nouveau_voltage_gpio_set;
                break;
@@ -615,7 +615,7 @@ nouveau_card_init(struct drm_device *dev)
                goto out_gart;
 
        /* PGPIO */
-       ret = engine->gpio.init(dev);
+       ret = nouveau_gpio_create(dev);
        if (ret)
                goto out_mc;
 
@@ -648,6 +648,7 @@ nouveau_card_init(struct drm_device *dev)
                        nv50_graph_create(dev);
                        break;
                case NV_C0:
+               case NV_D0:
                        nvc0_graph_create(dev);
                        break;
                default:
@@ -663,6 +664,11 @@ nouveau_card_init(struct drm_device *dev)
                case 0xa0:
                        nv84_crypt_create(dev);
                        break;
+               case 0x98:
+               case 0xaa:
+               case 0xac:
+                       nv98_crypt_create(dev);
+                       break;
                }
 
                switch (dev_priv->card_type) {
@@ -684,15 +690,25 @@ nouveau_card_init(struct drm_device *dev)
                        break;
                }
 
+               if (dev_priv->chipset >= 0xa3 || dev_priv->chipset == 0x98) {
+                       nv84_bsp_create(dev);
+                       nv84_vp_create(dev);
+                       nv98_ppp_create(dev);
+               } else
+               if (dev_priv->chipset >= 0x84) {
+                       nv50_mpeg_create(dev);
+                       nv84_bsp_create(dev);
+                       nv84_vp_create(dev);
+               } else
+               if (dev_priv->chipset >= 0x50) {
+                       nv50_mpeg_create(dev);
+               } else
                if (dev_priv->card_type == NV_40 ||
                    dev_priv->chipset == 0x31 ||
                    dev_priv->chipset == 0x34 ||
-                   dev_priv->chipset == 0x36)
+                   dev_priv->chipset == 0x36) {
                        nv31_mpeg_create(dev);
-               else
-               if (dev_priv->card_type == NV_50 &&
-                   (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
-                       nv50_mpeg_create(dev);
+               }
 
                for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
                        if (dev_priv->eng[e]) {
@@ -712,27 +728,7 @@ nouveau_card_init(struct drm_device *dev)
        if (ret)
                goto out_fifo;
 
-       /* initialise general modesetting */
-       drm_mode_config_init(dev);
-       drm_mode_create_scaling_mode_property(dev);
-       drm_mode_create_dithering_property(dev);
-       dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
-       dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
-       dev->mode_config.min_width = 0;
-       dev->mode_config.min_height = 0;
-       if (dev_priv->card_type < NV_10) {
-               dev->mode_config.max_width = 2048;
-               dev->mode_config.max_height = 2048;
-       } else
-       if (dev_priv->card_type < NV_50) {
-               dev->mode_config.max_width = 4096;
-               dev->mode_config.max_height = 4096;
-       } else {
-               dev->mode_config.max_width = 8192;
-               dev->mode_config.max_height = 8192;
-       }
-
-       ret = engine->display.create(dev);
+       ret = nouveau_display_create(dev);
        if (ret)
                goto out_irq;
 
@@ -752,12 +748,11 @@ nouveau_card_init(struct drm_device *dev)
        }
 
        if (dev->mode_config.num_crtc) {
-               ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+               ret = nouveau_display_init(dev);
                if (ret)
                        goto out_chan;
 
                nouveau_fbcon_init(dev);
-               drm_kms_helper_poll_init(dev);
        }
 
        return 0;
@@ -768,7 +763,7 @@ out_fence:
        nouveau_fence_fini(dev);
 out_disp:
        nouveau_backlight_exit(dev);
-       engine->display.destroy(dev);
+       nouveau_display_destroy(dev);
 out_irq:
        nouveau_irq_fini(dev);
 out_fifo:
@@ -788,7 +783,7 @@ out_engine:
 out_timer:
        engine->timer.takedown(dev);
 out_gpio:
-       engine->gpio.takedown(dev);
+       nouveau_gpio_destroy(dev);
 out_mc:
        engine->mc.takedown(dev);
 out_gart:
@@ -818,9 +813,8 @@ static void nouveau_card_takedown(struct drm_device *dev)
        int e;
 
        if (dev->mode_config.num_crtc) {
-               drm_kms_helper_poll_fini(dev);
                nouveau_fbcon_fini(dev);
-               drm_vblank_cleanup(dev);
+               nouveau_display_fini(dev);
        }
 
        if (dev_priv->channel) {
@@ -829,8 +823,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
        }
 
        nouveau_backlight_exit(dev);
-       engine->display.destroy(dev);
-       drm_mode_config_cleanup(dev);
+       nouveau_display_destroy(dev);
 
        if (!dev_priv->noaccel) {
                engine->fifo.takedown(dev);
@@ -843,7 +836,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
        }
        engine->fb.takedown(dev);
        engine->timer.takedown(dev);
-       engine->gpio.takedown(dev);
+       nouveau_gpio_destroy(dev);
        engine->mc.takedown(dev);
        engine->display.late_takedown(dev);
 
@@ -1110,13 +1103,11 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
        dev_priv->noaccel = !!nouveau_noaccel;
        if (nouveau_noaccel == -1) {
                switch (dev_priv->chipset) {
-#if 0
-               case 0xXX: /* known broken */
+               case 0xd9: /* known broken */
                        NV_INFO(dev, "acceleration disabled by default, pass "
                                     "noaccel=0 to force enable\n");
                        dev_priv->noaccel = true;
                        break;
-#endif
                default:
                        dev_priv->noaccel = false;
                        break;
@@ -1238,7 +1229,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
                getparam->value = 1;
                break;
        case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
-               getparam->value = dev_priv->card_type < NV_D0;
+               getparam->value = 1;
                break;
        case NOUVEAU_GETPARAM_GRAPH_UNITS:
                /* NV40 and NV50 versions are quite different, but register
index 5a46446dd5a8682e4773b8a0ff60724ef7de3514..0f5a301605569988231365f48af9cab73a9bce2f 100644 (file)
@@ -55,6 +55,10 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
        temps->down_clock = 100;
        temps->fan_boost = 90;
 
+       /* Set the default range for the pwm fan */
+       pm->fan.min_duty = 30;
+       pm->fan.max_duty = 100;
+
        /* Set the known default values to setup the temperature sensor */
        if (dev_priv->card_type >= NV_40) {
                switch (dev_priv->chipset) {
@@ -156,11 +160,26 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
                case 0x13:
                        sensor->slope_div = value;
                        break;
+               case 0x22:
+                       pm->fan.min_duty = value & 0xff;
+                       pm->fan.max_duty = (value & 0xff00) >> 8;
+                       break;
+               case 0x26:
+                       pm->fan.pwm_freq = value;
+                       break;
                }
                temp += recordlen;
        }
 
        nouveau_temp_safety_checks(dev);
+
+       /* check the fan min/max settings */
+       if (pm->fan.min_duty < 10)
+               pm->fan.min_duty = 10;
+       if (pm->fan.max_duty > 100)
+               pm->fan.max_duty = 100;
+       if (pm->fan.max_duty < pm->fan.min_duty)
+               pm->fan.max_duty = pm->fan.min_duty;
 }
 
 static int
@@ -267,8 +286,6 @@ probe_monitoring_device(struct nouveau_i2c_chan *i2c,
 static void
 nouveau_temp_probe_i2c(struct drm_device *dev)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct dcb_table *dcb = &dev_priv->vbios.dcb;
        struct i2c_board_info info[] = {
                { I2C_BOARD_INFO("w83l785ts", 0x2d) },
                { I2C_BOARD_INFO("w83781d", 0x2d) },
@@ -277,11 +294,9 @@ nouveau_temp_probe_i2c(struct drm_device *dev)
                { I2C_BOARD_INFO("lm99", 0x4c) },
                { }
        };
-       int idx = (dcb->version >= 0x40 ?
-                  dcb->i2c_default_indices & 0xf : 2);
 
        nouveau_i2c_identify(dev, "monitoring device", info,
-                            probe_monitoring_device, idx);
+                            probe_monitoring_device, NV_I2C_DEFAULT(0));
 }
 
 void
@@ -297,9 +312,9 @@ nouveau_temp_init(struct drm_device *dev)
                        return;
 
                if (P.version == 1)
-                       temp = ROMPTR(bios, P.data[12]);
+                       temp = ROMPTR(dev, P.data[12]);
                else if (P.version == 2)
-                       temp = ROMPTR(bios, P.data[16]);
+                       temp = ROMPTR(dev, P.data[16]);
                else
                        NV_WARN(dev, "unknown temp for BIT P %d\n", P.version);
 
index ef0832b29ad2e94ee84e0cde3165fca8b8a4560e..2bf6c0350b4bbd4fdd5f380f488d4acb581ab978 100644 (file)
@@ -78,9 +78,10 @@ nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
 
 void
 nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
-                 struct nouveau_mem *mem, dma_addr_t *list)
+                 struct nouveau_mem *mem)
 {
        struct nouveau_vm *vm = vma->vm;
+       dma_addr_t *list = mem->pages;
        int big = vma->node->type != vm->spg_shift;
        u32 offset = vma->node->offset + (delta >> 12);
        u32 bits = vma->node->type - 12;
index 6ce995f7797e987d2043d9d8127f84fa83119f74..4fb6e728734d0db26b8d0c208819afe4c52fc1b2 100644 (file)
@@ -89,7 +89,7 @@ void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
 void nouveau_vm_unmap(struct nouveau_vma *);
 void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
 void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
-                      struct nouveau_mem *, dma_addr_t *);
+                      struct nouveau_mem *);
 
 /* nv50_vm.c */
 void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
index 86d03e15735d8143b94ac61d665ecee7f422fcc4..b010cb997b342aab62ac5ddb7f8d48573a6b4011 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "nouveau_drv.h"
 #include "nouveau_pm.h"
+#include "nouveau_gpio.h"
 
 static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
 static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
@@ -34,7 +35,6 @@ int
 nouveau_voltage_gpio_get(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
        struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
        u8 vid = 0;
        int i;
@@ -43,7 +43,7 @@ nouveau_voltage_gpio_get(struct drm_device *dev)
                if (!(volt->vid_mask & (1 << i)))
                        continue;
 
-               vid |= gpio->get(dev, vidtag[i]) << i;
+               vid |= nouveau_gpio_func_get(dev, vidtag[i]) << i;
        }
 
        return nouveau_volt_lvl_lookup(dev, vid);
@@ -53,7 +53,6 @@ int
 nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
        struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
        int vid, i;
 
@@ -65,7 +64,7 @@ nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
                if (!(volt->vid_mask & (1 << i)))
                        continue;
 
-               gpio->set(dev, vidtag[i], !!(vid & (1 << i)));
+               nouveau_gpio_func_set(dev, vidtag[i], !!(vid & (1 << i)));
        }
 
        return 0;
@@ -117,10 +116,10 @@ nouveau_volt_init(struct drm_device *dev)
                        return;
 
                if (P.version == 1)
-                       volt = ROMPTR(bios, P.data[16]);
+                       volt = ROMPTR(dev, P.data[16]);
                else
                if (P.version == 2)
-                       volt = ROMPTR(bios, P.data[12]);
+                       volt = ROMPTR(dev, P.data[12]);
                else {
                        NV_WARN(dev, "unknown volt for BIT P %d\n", P.version);
                }
@@ -130,7 +129,7 @@ nouveau_volt_init(struct drm_device *dev)
                        return;
                }
 
-               volt = ROMPTR(bios, bios->data[bios->offset + 0x98]);
+               volt = ROMPTR(dev, bios->data[bios->offset + 0x98]);
        }
 
        if (!volt) {
@@ -194,7 +193,7 @@ nouveau_volt_init(struct drm_device *dev)
                        return;
                }
 
-               if (!nouveau_bios_gpio_entry(dev, vidtag[i])) {
+               if (!nouveau_gpio_func_valid(dev, vidtag[i])) {
                        NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i);
                        return;
                }
index 5e45398a9e2deed847a2191260c76611e325b3c5..728d07584d3907046f7987f189230d706b58666d 100644 (file)
@@ -364,7 +364,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
        regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0);
        regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay;
        /* framebuffer can be larger than crtc scanout area. */
-       regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitch / 8;
+       regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitches[0] / 8;
        regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00;
        regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart;
        regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd;
@@ -377,9 +377,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
 
        /* framebuffer can be larger than crtc scanout area. */
        regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
-               XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+               XLATE(fb->pitches[0] / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
        regp->CRTC[NV_CIO_CRE_42] =
-               XLATE(fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
+               XLATE(fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
        regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
                                            MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
        regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
@@ -835,18 +835,18 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
        NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
                      regp->ramdac_gen_ctrl);
 
-       regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
+       regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitches[0] >> 3;
        regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
-               XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+               XLATE(drm_fb->pitches[0] >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
        regp->CRTC[NV_CIO_CRE_42] =
-               XLATE(drm_fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
+               XLATE(drm_fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
        crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
        crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
        crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42);
 
        /* Update the framebuffer location. */
        regp->fb_start = nv_crtc->fb.offset & ~3;
-       regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
+       regp->fb_start += (y * drm_fb->pitches[0]) + (x * drm_fb->bits_per_pixel / 8);
        nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start);
 
        /* Update the arbitration parameters. */
index e000455e06d0154000b97d6883614afa8ff5aa98..8300266ffaea2722ea5009510e70d211caf84a17 100644 (file)
@@ -32,6 +32,7 @@
 #include "nouveau_connector.h"
 #include "nouveau_crtc.h"
 #include "nouveau_hw.h"
+#include "nouveau_gpio.h"
 #include "nvreg.h"
 
 int nv04_dac_output_offset(struct drm_encoder *encoder)
@@ -220,7 +221,6 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
 {
        struct drm_device *dev = encoder->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
        struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
        uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
        uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -252,11 +252,11 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
                nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
        }
 
-       saved_gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
-       saved_gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
+       saved_gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
+       saved_gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
 
-       gpio->set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
-       gpio->set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
 
        msleep(4);
 
@@ -306,8 +306,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
                nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
        nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
 
-       gpio->set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
-       gpio->set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
 
        return sample;
 }
index 12098bf839c493561bf5aa5ab185a3601eeca34d..2258746016f8e82919b23d2ac0897c8bb02a9da7 100644 (file)
@@ -289,6 +289,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
        struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
        struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
        struct drm_display_mode *output_mode = &nv_encoder->mode;
+       struct drm_connector *connector = &nv_connector->base;
        uint32_t mode_ratio, panel_ratio;
 
        NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
@@ -340,10 +341,15 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
            output_mode->clock > 165000)
                regp->fp_control |= (2 << 24);
        if (nv_encoder->dcb->type == OUTPUT_LVDS) {
-               bool duallink, dummy;
+               bool duallink = false, dummy;
+               if (nv_connector->edid &&
+                   nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+                       duallink = (((u8 *)nv_connector->edid)[121] == 2);
+               } else {
+                       nouveau_bios_parse_lvds_table(dev, output_mode->clock,
+                                                     &duallink, &dummy);
+               }
 
-               nouveau_bios_parse_lvds_table(dev, output_mode->clock,
-                                             &duallink, &dummy);
                if (duallink)
                        regp->fp_control |= (8 << 28);
        } else
@@ -407,7 +413,9 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
        }
 
        /* Output property. */
-       if (nv_connector->use_dithering) {
+       if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
+           (nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
+            encoder->crtc->fb->depth > connector->display_info.bpc * 3)) {
                if (dev_priv->chipset == 0x11)
                        regp->dither = savep->dither | 0x00010000;
                else {
index 6bd8518d7b2e3be721703b2f5344cc9cc26b63db..7047d37e8dab4634d4a016de9126a140641e4ddf 100644 (file)
@@ -243,6 +243,11 @@ nv04_display_init(struct drm_device *dev)
        return 0;
 }
 
+void
+nv04_display_fini(struct drm_device *dev)
+{
+}
+
 static void
 nv04_vblank_crtc0_isr(struct drm_device *dev)
 {
index 9ae92a87b8cca30b3def46f7a6d60356042d55e8..6e7589918fa93dbcf91eb7d4b27e082b3a78e86d 100644 (file)
 #include "nouveau_hw.h"
 #include "nouveau_pm.h"
 
-struct nv04_pm_state {
+int
+nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+       int ret;
+
+       ret = nouveau_hw_get_clock(dev, PLL_CORE);
+       if (ret < 0)
+               return ret;
+       perflvl->core = ret;
+
+       ret = nouveau_hw_get_clock(dev, PLL_MEMORY);
+       if (ret < 0)
+               return ret;
+       perflvl->memory = ret;
+
+       return 0;
+}
+
+struct nv04_pm_clock {
        struct pll_lims pll;
        struct nouveau_pll_vals calc;
 };
 
-int
-nv04_pm_clock_get(struct drm_device *dev, u32 id)
+struct nv04_pm_state {
+       struct nv04_pm_clock core;
+       struct nv04_pm_clock memory;
+};
+
+static int
+calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk)
 {
-       return nouveau_hw_get_clock(dev, id);
+       int ret;
+
+       ret = get_pll_limits(dev, id, &clk->pll);
+       if (ret)
+               return ret;
+
+       ret = nouveau_calc_pll_mnp(dev, &clk->pll, khz, &clk->calc);
+       if (!ret)
+               return -EINVAL;
+
+       return 0;
 }
 
 void *
-nv04_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
-                 u32 id, int khz)
+nv04_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 {
-       struct nv04_pm_state *state;
+       struct nv04_pm_state *info;
        int ret;
 
-       state = kzalloc(sizeof(*state), GFP_KERNEL);
-       if (!state)
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       if (!info)
                return ERR_PTR(-ENOMEM);
 
-       ret = get_pll_limits(dev, id, &state->pll);
-       if (ret) {
-               kfree(state);
-               return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
-       }
+       ret = calc_pll(dev, PLL_CORE, perflvl->core, &info->core);
+       if (ret)
+               goto error;
 
-       ret = nouveau_calc_pll_mnp(dev, &state->pll, khz, &state->calc);
-       if (!ret) {
-               kfree(state);
-               return ERR_PTR(-EINVAL);
+       if (perflvl->memory) {
+               ret = calc_pll(dev, PLL_MEMORY, perflvl->memory, &info->memory);
+               if (ret)
+                       goto error;
        }
 
-       return state;
+       return info;
+error:
+       kfree(info);
+       return ERR_PTR(ret);
 }
 
-void
-nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
+static void
+prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
-       struct nv04_pm_state *state = pre_state;
-       u32 reg = state->pll.reg;
+       u32 reg = clk->pll.reg;
 
        /* thank the insane nouveau_hw_setpll() interface for this */
        if (dev_priv->card_type >= NV_40)
                reg += 4;
 
-       nouveau_hw_setpll(dev, reg, &state->calc);
+       nouveau_hw_setpll(dev, reg, &clk->calc);
+}
+
+int
+nv04_pm_clocks_set(struct drm_device *dev, void *pre_state)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+       struct nv04_pm_state *state = pre_state;
+
+       prog_pll(dev, &state->core);
 
-       if (dev_priv->card_type < NV_30 && reg == NV_PRAMDAC_MPLL_COEFF) {
-               if (dev_priv->card_type == NV_20)
-                       nv_mask(dev, 0x1002c4, 0, 1 << 20);
+       if (state->memory.pll.reg) {
+               prog_pll(dev, &state->memory);
+               if (dev_priv->card_type < NV_30) {
+                       if (dev_priv->card_type == NV_20)
+                               nv_mask(dev, 0x1002c4, 0, 1 << 20);
 
-               /* Reset the DLLs */
-               nv_mask(dev, 0x1002c0, 0, 1 << 8);
+                       /* Reset the DLLs */
+                       nv_mask(dev, 0x1002c0, 0, 1 << 8);
+               }
        }
 
-       if (reg == NV_PRAMDAC_NVPLL_COEFF)
-               ptimer->init(dev);
+       ptimer->init(dev);
 
        kfree(state);
+       return 0;
 }
-
index 263301b809dd388d668bfed523e37a3194033033..55c945290e5214af35c92fd9c76af03ea4045fba 100644 (file)
@@ -2,6 +2,7 @@
 #include "drm.h"
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
+#include "nouveau_hw.h"
 
 int
 nv04_timer_init(struct drm_device *dev)
@@ -17,7 +18,7 @@ nv04_timer_init(struct drm_device *dev)
 
        /* determine base clock for timer source */
        if (dev_priv->chipset < 0x40) {
-               n = dev_priv->engine.pm.clock_get(dev, PLL_CORE);
+               n = nouveau_hw_get_clock(dev, PLL_CORE);
        } else
        if (dev_priv->chipset == 0x40) {
                /*XXX: figure this out */
index 007fc29e2f86130987ee5759894ed447f2fa1d93..550ad3fcf0afa691153f293f7e36f64e181105f7 100644 (file)
 #include "drmP.h"
 #include "nouveau_drv.h"
 #include "nouveau_hw.h"
+#include "nouveau_gpio.h"
 
-static bool
-get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift,
-                 uint32_t *mask)
+int
+nv10_gpio_sense(struct drm_device *dev, int line)
 {
-       if (ent->line < 2) {
-               *reg = NV_PCRTC_GPIO;
-               *shift = ent->line * 16;
-               *mask = 0x11;
-
-       } else if (ent->line < 10) {
-               *reg = NV_PCRTC_GPIO_EXT;
-               *shift = (ent->line - 2) * 4;
-               *mask = 0x3;
+       if (line < 2) {
+               line = line * 16;
+               line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO) >> line;
+               return !!(line & 0x0100);
+       } else
+       if (line < 10) {
+               line = (line - 2) * 4;
+               line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT) >> line;
+               return !!(line & 0x04);
+       } else
+       if (line < 14) {
+               line = (line - 10) * 4;
+               line = NVReadCRTC(dev, 0, NV_PCRTC_850) >> line;
+               return !!(line & 0x04);
+       }
 
-       } else if (ent->line < 14) {
-               *reg = NV_PCRTC_850;
-               *shift = (ent->line - 10) * 4;
-               *mask = 0x3;
+       return -EINVAL;
+}
 
+int
+nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
+{
+       u32 reg, mask, data;
+
+       if (line < 2) {
+               line = line * 16;
+               reg  = NV_PCRTC_GPIO;
+               mask = 0x00000011;
+               data = (dir << 4) | out;
+       } else
+       if (line < 10) {
+               line = (line - 2) * 4;
+               reg  = NV_PCRTC_GPIO_EXT;
+               mask = 0x00000003 << ((line - 2) * 4);
+               data = (dir << 1) | out;
+       } else
+       if (line < 14) {
+               line = (line - 10) * 4;
+               reg  = NV_PCRTC_850;
+               mask = 0x00000003;
+               data = (dir << 1) | out;
        } else {
-               return false;
+               return -EINVAL;
        }
 
-       return true;
+       mask = NVReadCRTC(dev, 0, reg) & ~(mask << line);
+       NVWriteCRTC(dev, 0, reg, mask | (data << line));
+       return 0;
 }
 
-int
-nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+void
+nv10_gpio_irq_enable(struct drm_device *dev, int line, bool on)
 {
-       struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
-       uint32_t reg, shift, mask, value;
+       u32 mask = 0x00010001 << line;
 
-       if (!ent)
-               return -ENODEV;
+       nv_wr32(dev, 0x001104, mask);
+       nv_mask(dev, 0x001144, mask, on ? mask : 0);
+}
 
-       if (!get_gpio_location(ent, &reg, &shift, &mask))
-               return -ENODEV;
+static void
+nv10_gpio_isr(struct drm_device *dev)
+{
+       u32 intr = nv_rd32(dev, 0x1104);
+       u32 hi = (intr & 0x0000ffff) >> 0;
+       u32 lo = (intr & 0xffff0000) >> 16;
 
-       value = NVReadCRTC(dev, 0, reg) >> shift;
+       nouveau_gpio_isr(dev, 0, hi | lo);
 
-       return (ent->invert ? 1 : 0) ^ (value & 1);
+       nv_wr32(dev, 0x001104, intr);
 }
 
 int
-nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+nv10_gpio_init(struct drm_device *dev)
 {
-       struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
-       uint32_t reg, shift, mask, value;
-
-       if (!ent)
-               return -ENODEV;
-
-       if (!get_gpio_location(ent, &reg, &shift, &mask))
-               return -ENODEV;
-
-       value = ((ent->invert ? 1 : 0) ^ (state ? 1 : 0)) << shift;
-       mask = ~(mask << shift);
-
-       NVWriteCRTC(dev, 0, reg, value | (NVReadCRTC(dev, 0, reg) & mask));
-
+       nv_wr32(dev, 0x001140, 0x00000000);
+       nv_wr32(dev, 0x001100, 0xffffffff);
+       nv_wr32(dev, 0x001144, 0x00000000);
+       nv_wr32(dev, 0x001104, 0xffffffff);
+       nouveau_irq_register(dev, 28, nv10_gpio_isr); /* PBUS */
        return 0;
 }
+
+void
+nv10_gpio_fini(struct drm_device *dev)
+{
+       nv_wr32(dev, 0x001140, 0x00000000);
+       nv_wr32(dev, 0x001144, 0x00000000);
+       nouveau_irq_unregister(dev, 28);
+}
index 3900cebba560e2fb822d9193a796bfc8dd90dfee..696d7e7dc2a07facbfed838a637600ad5c15d880 100644 (file)
@@ -30,6 +30,7 @@
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_crtc.h"
+#include "nouveau_gpio.h"
 #include "nouveau_hw.h"
 #include "nv17_tv.h"
 
@@ -37,7 +38,6 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
 {
        struct drm_device *dev = encoder->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
        uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
        uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
                fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -53,8 +53,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
        head = (dacclk & 0x100) >> 8;
 
        /* Save the previous state. */
-       gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
-       gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
+       gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
+       gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
        fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
        fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
        fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
@@ -65,8 +65,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
        ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
 
        /* Prepare the DAC for load detection.  */
-       gpio->set(dev, DCB_GPIO_TVDAC1, true);
-       gpio->set(dev, DCB_GPIO_TVDAC0, true);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, true);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, true);
 
        NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
        NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
@@ -111,8 +111,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
        NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
        NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
        NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
-       gpio->set(dev, DCB_GPIO_TVDAC1, gpio1);
-       gpio->set(dev, DCB_GPIO_TVDAC0, gpio0);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, gpio1);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, gpio0);
 
        return sample;
 }
@@ -357,8 +357,6 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
 static void  nv17_tv_dpms(struct drm_encoder *encoder, int mode)
 {
        struct drm_device *dev = encoder->dev;
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
        struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
        struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
 
@@ -383,8 +381,8 @@ static void  nv17_tv_dpms(struct drm_encoder *encoder, int mode)
 
        nv_load_ptv(dev, regs, 200);
 
-       gpio->set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
-       gpio->set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
 
        nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
 }
index e676b0d534786ee140017691b83c4580e0e6bbc3..c7615381c5d9e287a090784c1a79f183ad0605f4 100644 (file)
@@ -222,7 +222,7 @@ nv40_pm_gr_idle(void *data)
        return true;
 }
 
-void
+int
 nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -231,7 +231,7 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
        struct bit_entry M;
        u32 crtc_mask = 0;
        u8 sr1[2];
-       int i;
+       int i, ret = -EAGAIN;
 
        /* determine which CRTCs are active, fetch VGA_SR1 for each */
        for (i = 0; i < 2; i++) {
@@ -263,6 +263,8 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
        if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev))
                goto resume;
 
+       ret = 0;
+
        /* set engine clocks */
        nv_mask(dev, 0x00c040, 0x00000333, 0x00000000);
        nv_wr32(dev, 0x004004, info->npll_coef);
@@ -345,4 +347,48 @@ resume:
        spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 
        kfree(info);
+       return ret;
+}
+
+int
+nv40_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
+{
+       if (line == 2) {
+               u32 reg = nv_rd32(dev, 0x0010f0);
+               if (reg & 0x80000000) {
+                       *duty = (reg & 0x7fff0000) >> 16;
+                       *divs = (reg & 0x00007fff);
+                       return 0;
+               }
+       } else
+       if (line == 9) {
+               u32 reg = nv_rd32(dev, 0x0015f4);
+               if (reg & 0x80000000) {
+                       *divs = nv_rd32(dev, 0x0015f8);
+                       *duty = (reg & 0x7fffffff);
+                       return 0;
+               }
+       } else {
+               NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
+               return -ENODEV;
+       }
+
+       return -EINVAL;
+}
+
+int
+nv40_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
+{
+       if (line == 2) {
+               nv_wr32(dev, 0x0010f0, 0x80000000 | (duty << 16) | divs);
+       } else
+       if (line == 9) {
+               nv_wr32(dev, 0x0015f8, divs);
+               nv_wr32(dev, 0x0015f4, duty | 0x80000000);
+       } else {
+               NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
+               return -ENODEV;
+       }
+
+       return 0;
 }
index 882080e0b4f5c2932db3ccfa91976de88f92d3ba..8f6c2ace3adf5c48b40863b34b240ceca88082ea 100644 (file)
@@ -132,33 +132,42 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
 }
 
 static int
-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
+nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
 {
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       int ret;
-
-       NV_DEBUG_KMS(dev, "\n");
+       struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master;
+       struct nouveau_connector *nv_connector;
+       struct drm_connector *connector;
+       int head = nv_crtc->index, ret;
+       u32 mode = 0x00;
 
-       ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
-       if (ret) {
-               NV_ERROR(dev, "no space while setting dither\n");
-               return ret;
+       nv_connector = nouveau_crtc_connector_get(nv_crtc);
+       connector = &nv_connector->base;
+       if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+               if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+                       mode = DITHERING_MODE_DYNAMIC2X2;
+       } else {
+               mode = nv_connector->dithering_mode;
        }
 
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DITHER_CTRL), 1);
-       if (on)
-               OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_ON);
-       else
-               OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_OFF);
+       if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+               if (connector->display_info.bpc >= 8)
+                       mode |= DITHERING_DEPTH_8BPC;
+       } else {
+               mode |= nv_connector->dithering_depth;
+       }
 
-       if (update) {
-               BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
-               OUT_RING(evo, 0);
-               FIRE_RING(evo);
+       ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
+       if (ret == 0) {
+               BEGIN_RING(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
+               OUT_RING  (evo, mode);
+               if (update) {
+                       BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+                       OUT_RING  (evo, 0);
+                       FIRE_RING (evo);
+               }
        }
 
-       return 0;
+       return ret;
 }
 
 struct nouveau_connector *
@@ -180,80 +189,103 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
 }
 
 static int
-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
+nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
 {
-       struct nouveau_connector *nv_connector =
-               nouveau_crtc_connector_get(nv_crtc);
-       struct drm_device *dev = nv_crtc->base.dev;
+       struct nouveau_connector *nv_connector;
+       struct drm_crtc *crtc = &nv_crtc->base;
+       struct drm_device *dev = crtc->dev;
        struct nouveau_channel *evo = nv50_display(dev)->master;
-       struct drm_display_mode *native_mode = NULL;
-       struct drm_display_mode *mode = &nv_crtc->base.mode;
-       uint32_t outX, outY, horiz, vert;
-       int ret;
+       struct drm_display_mode *umode = &crtc->mode;
+       struct drm_display_mode *omode;
+       int scaling_mode, ret;
+       u32 ctrl = 0, oX, oY;
 
        NV_DEBUG_KMS(dev, "\n");
 
-       switch (scaling_mode) {
-       case DRM_MODE_SCALE_NONE:
-               break;
-       default:
-               if (!nv_connector || !nv_connector->native_mode) {
-                       NV_ERROR(dev, "No native mode, forcing panel scaling\n");
-                       scaling_mode = DRM_MODE_SCALE_NONE;
+       nv_connector = nouveau_crtc_connector_get(nv_crtc);
+       if (!nv_connector || !nv_connector->native_mode) {
+               NV_ERROR(dev, "no native mode, forcing panel scaling\n");
+               scaling_mode = DRM_MODE_SCALE_NONE;
+       } else {
+               scaling_mode = nv_connector->scaling_mode;
+       }
+
+       /* start off at the resolution we programmed the crtc for, this
+        * effectively handles NONE/FULL scaling
+        */
+       if (scaling_mode != DRM_MODE_SCALE_NONE)
+               omode = nv_connector->native_mode;
+       else
+               omode = umode;
+
+       oX = omode->hdisplay;
+       oY = omode->vdisplay;
+       if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+               oY *= 2;
+
+       /* add overscan compensation if necessary, will keep the aspect
+        * ratio the same as the backend mode unless overridden by the
+        * user setting both hborder and vborder properties.
+        */
+       if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+                            (nv_connector->underscan == UNDERSCAN_AUTO &&
+                             nv_connector->edid &&
+                             drm_detect_hdmi_monitor(nv_connector->edid)))) {
+               u32 bX = nv_connector->underscan_hborder;
+               u32 bY = nv_connector->underscan_vborder;
+               u32 aspect = (oY << 19) / oX;
+
+               if (bX) {
+                       oX -= (bX * 2);
+                       if (bY) oY -= (bY * 2);
+                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
                } else {
-                       native_mode = nv_connector->native_mode;
+                       oX -= (oX >> 4) + 32;
+                       if (bY) oY -= (bY * 2);
+                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
                }
-               break;
        }
 
+       /* handle CENTER/ASPECT scaling, taking into account the areas
+        * removed already for overscan compensation
+        */
        switch (scaling_mode) {
+       case DRM_MODE_SCALE_CENTER:
+               oX = min((u32)umode->hdisplay, oX);
+               oY = min((u32)umode->vdisplay, oY);
+               /* fall-through */
        case DRM_MODE_SCALE_ASPECT:
-               horiz = (native_mode->hdisplay << 19) / mode->hdisplay;
-               vert = (native_mode->vdisplay << 19) / mode->vdisplay;
-
-               if (vert > horiz) {
-                       outX = (mode->hdisplay * horiz) >> 19;
-                       outY = (mode->vdisplay * horiz) >> 19;
+               if (oY < oX) {
+                       u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+                       oX = ((oY * aspect) + (aspect / 2)) >> 19;
                } else {
-                       outX = (mode->hdisplay * vert) >> 19;
-                       outY = (mode->vdisplay * vert) >> 19;
+                       u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+                       oY = ((oX * aspect) + (aspect / 2)) >> 19;
                }
                break;
-       case DRM_MODE_SCALE_FULLSCREEN:
-               outX = native_mode->hdisplay;
-               outY = native_mode->vdisplay;
-               break;
-       case DRM_MODE_SCALE_CENTER:
-       case DRM_MODE_SCALE_NONE:
        default:
-               outX = mode->hdisplay;
-               outY = mode->vdisplay;
                break;
        }
 
-       ret = RING_SPACE(evo, update ? 7 : 5);
+       if (umode->hdisplay != oX || umode->vdisplay != oY ||
+           umode->flags & DRM_MODE_FLAG_INTERLACE ||
+           umode->flags & DRM_MODE_FLAG_DBLSCAN)
+               ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE;
+
+       ret = RING_SPACE(evo, 5);
        if (ret)
                return ret;
 
-       /* Got a better name for SCALER_ACTIVE? */
-       /* One day i've got to really figure out why this is needed. */
        BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
-       if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ||
-           (mode->flags & DRM_MODE_FLAG_INTERLACE) ||
-           mode->hdisplay != outX || mode->vdisplay != outY) {
-               OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_ACTIVE);
-       } else {
-               OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_INACTIVE);
-       }
-
+       OUT_RING  (evo, ctrl);
        BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
-       OUT_RING(evo, outY << 16 | outX);
-       OUT_RING(evo, outY << 16 | outX);
+       OUT_RING  (evo, oY << 16 | oX);
+       OUT_RING  (evo, oY << 16 | oX);
 
        if (update) {
-               BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
-               OUT_RING(evo, 0);
-               FIRE_RING(evo);
+               nv50_display_flip_stop(crtc);
+               nv50_display_sync(dev);
+               nv50_display_flip_next(crtc, crtc->fb, NULL);
        }
 
        return 0;
@@ -333,7 +365,6 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
        nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
        nouveau_bo_unmap(nv_crtc->cursor.nvbo);
        nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
-       kfree(nv_crtc->mode);
        kfree(nv_crtc);
 }
 
@@ -441,39 +472,6 @@ nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
 }
 
-static int
-nv50_crtc_wait_complete(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
-       struct nv50_display *disp = nv50_display(dev);
-       struct nouveau_channel *evo = disp->master;
-       u64 start;
-       int ret;
-
-       ret = RING_SPACE(evo, 6);
-       if (ret)
-               return ret;
-       BEGIN_RING(evo, 0, 0x0084, 1);
-       OUT_RING  (evo, 0x80000000);
-       BEGIN_RING(evo, 0, 0x0080, 1);
-       OUT_RING  (evo, 0);
-       BEGIN_RING(evo, 0, 0x0084, 1);
-       OUT_RING  (evo, 0x00000000);
-
-       nv_wo32(disp->ntfy, 0x000, 0x00000000);
-       FIRE_RING (evo);
-
-       start = ptimer->read(dev);
-       do {
-               if (nv_ro32(disp->ntfy, 0x000))
-                       return 0;
-       } while (ptimer->read(dev) - start < 2000000000ULL);
-
-       return -EBUSY;
-}
-
 static void
 nv50_crtc_prepare(struct drm_crtc *crtc)
 {
@@ -497,7 +495,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
 
        nv50_crtc_blank(nv_crtc, false);
        drm_vblank_post_modeset(dev, nv_crtc->index);
-       nv50_crtc_wait_complete(crtc);
+       nv50_display_sync(dev);
        nv50_display_flip_next(crtc, crtc->fb, NULL);
 }
 
@@ -593,90 +591,76 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
 }
 
 static int
-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
-                  struct drm_display_mode *adjusted_mode, int x, int y,
+nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
+                  struct drm_display_mode *mode, int x, int y,
                   struct drm_framebuffer *old_fb)
 {
        struct drm_device *dev = crtc->dev;
        struct nouveau_channel *evo = nv50_display(dev)->master;
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct nouveau_connector *nv_connector = NULL;
-       uint32_t hsync_dur,  vsync_dur, hsync_start_to_end, vsync_start_to_end;
-       uint32_t hunk1, vunk1, vunk2a, vunk2b;
+       u32 head = nv_crtc->index * 0x400;
+       u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+       u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+       u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+       u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+       u32 vblan2e = 0, vblan2s = 1;
        int ret;
 
-       /* Find the connector attached to this CRTC */
-       nv_connector = nouveau_crtc_connector_get(nv_crtc);
-
-       *nv_crtc->mode = *adjusted_mode;
-
-       NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+       /* hw timing description looks like this:
+        *
+        * <sync> <back porch> <---------display---------> <front porch>
+        * ______
+        *       |____________|---------------------------|____________|
+        *
+        *       ^ synce      ^ blanke                    ^ blanks     ^ active
+        *
+        * interlaced modes also have 2 additional values pointing at the end
+        * and start of the next field's blanking period.
+        */
 
-       hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
-       vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
-       hsync_start_to_end = adjusted_mode->htotal - adjusted_mode->hsync_start;
-       vsync_start_to_end = adjusted_mode->vtotal - adjusted_mode->vsync_start;
-       /* I can't give this a proper name, anyone else can? */
-       hunk1 = adjusted_mode->htotal -
-               adjusted_mode->hsync_start + adjusted_mode->hdisplay;
-       vunk1 = adjusted_mode->vtotal -
-               adjusted_mode->vsync_start + adjusted_mode->vdisplay;
-       /* Another strange value, this time only for interlaced adjusted_modes. */
-       vunk2a = 2 * adjusted_mode->vtotal -
-                adjusted_mode->vsync_start + adjusted_mode->vdisplay;
-       vunk2b = adjusted_mode->vtotal -
-                adjusted_mode->vsync_start + adjusted_mode->vtotal;
-
-       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
-               vsync_dur /= 2;
-               vsync_start_to_end  /= 2;
-               vunk1 /= 2;
-               vunk2a /= 2;
-               vunk2b /= 2;
-               /* magic */
-               if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
-                       vsync_start_to_end -= 1;
-                       vunk1 -= 1;
-                       vunk2a -= 1;
-                       vunk2b -= 1;
-               }
+       hactive = mode->htotal;
+       hsynce  = mode->hsync_end - mode->hsync_start - 1;
+       hbackp  = mode->htotal - mode->hsync_end;
+       hblanke = hsynce + hbackp;
+       hfrontp = mode->hsync_start - mode->hdisplay;
+       hblanks = mode->htotal - hfrontp - 1;
+
+       vactive = mode->vtotal * vscan / ilace;
+       vsynce  = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+       vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+       vblanke = vsynce + vbackp;
+       vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+       vblanks = vactive - vfrontp - 1;
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+               vblan2e = vactive + vsynce + vbackp;
+               vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+               vactive = (vactive * 2) + 1;
        }
 
-       ret = RING_SPACE(evo, 17);
-       if (ret)
-               return ret;
-
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLOCK), 2);
-       OUT_RING(evo, adjusted_mode->clock | 0x800000);
-       OUT_RING(evo, (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 0);
-
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DISPLAY_START), 5);
-       OUT_RING(evo, 0);
-       OUT_RING(evo, (adjusted_mode->vtotal << 16) | adjusted_mode->htotal);
-       OUT_RING(evo, (vsync_dur - 1) << 16 | (hsync_dur - 1));
-       OUT_RING(evo, (vsync_start_to_end - 1) << 16 |
-                       (hsync_start_to_end - 1));
-       OUT_RING(evo, (vunk1 - 1) << 16 | (hunk1 - 1));
-
-       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
-               BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK0824), 1);
-               OUT_RING(evo, (vunk2b - 1) << 16 | (vunk2a - 1));
-       } else {
-               OUT_RING(evo, 0);
-               OUT_RING(evo, 0);
+       ret = RING_SPACE(evo, 18);
+       if (ret == 0) {
+               BEGIN_RING(evo, 0, 0x0804 + head, 2);
+               OUT_RING  (evo, 0x00800000 | mode->clock);
+               OUT_RING  (evo, (ilace == 2) ? 2 : 0);
+               BEGIN_RING(evo, 0, 0x0810 + head, 6);
+               OUT_RING  (evo, 0x00000000); /* border colour */
+               OUT_RING  (evo, (vactive << 16) | hactive);
+               OUT_RING  (evo, ( vsynce << 16) | hsynce);
+               OUT_RING  (evo, (vblanke << 16) | hblanke);
+               OUT_RING  (evo, (vblanks << 16) | hblanks);
+               OUT_RING  (evo, (vblan2e << 16) | vblan2s);
+               BEGIN_RING(evo, 0, 0x082c + head, 1);
+               OUT_RING  (evo, 0x00000000);
+               BEGIN_RING(evo, 0, 0x0900 + head, 1);
+               OUT_RING  (evo, 0x00000311); /* makes sync channel work */
+               BEGIN_RING(evo, 0, 0x08c8 + head, 1);
+               OUT_RING  (evo, (umode->vdisplay << 16) | umode->hdisplay);
+               BEGIN_RING(evo, 0, 0x08d4 + head, 1);
+               OUT_RING  (evo, 0x00000000); /* screen position */
        }
 
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK082C), 1);
-       OUT_RING(evo, 0);
-
-       /* This is the actual resolution of the mode. */
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, REAL_RES), 1);
-       OUT_RING(evo, (mode->vdisplay << 16) | mode->hdisplay);
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CENTER_OFFSET), 1);
-       OUT_RING(evo, NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(0, 0));
-
-       nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
-       nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
+       nv_crtc->set_dither(nv_crtc, false);
+       nv_crtc->set_scale(nv_crtc, false);
 
        return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
 }
@@ -692,7 +676,7 @@ nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
        if (ret)
                return ret;
 
-       ret = nv50_crtc_wait_complete(crtc);
+       ret = nv50_display_sync(crtc->dev);
        if (ret)
                return ret;
 
@@ -711,7 +695,7 @@ nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
        if (ret)
                return ret;
 
-       return nv50_crtc_wait_complete(crtc);
+       return nv50_display_sync(crtc->dev);
 }
 
 static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
@@ -737,12 +721,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
        if (!nv_crtc)
                return -ENOMEM;
 
-       nv_crtc->mode = kzalloc(sizeof(*nv_crtc->mode), GFP_KERNEL);
-       if (!nv_crtc->mode) {
-               kfree(nv_crtc);
-               return -ENOMEM;
-       }
-
        /* Default CLUT parameters, will be activated on the hw upon
         * first mode set.
         */
@@ -764,7 +742,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
        }
 
        if (ret) {
-               kfree(nv_crtc->mode);
                kfree(nv_crtc);
                return ret;
        }
index 808f3ec8f82703ef59fb9daf6447b299cdf74389..a0f2bebf49e302e3990b89146523e84695c25d3d 100644 (file)
@@ -199,11 +199,6 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
        return true;
 }
 
-static void
-nv50_dac_prepare(struct drm_encoder *encoder)
-{
-}
-
 static void
 nv50_dac_commit(struct drm_encoder *encoder)
 {
@@ -266,7 +261,7 @@ static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
        .save = nv50_dac_save,
        .restore = nv50_dac_restore,
        .mode_fixup = nv50_dac_mode_fixup,
-       .prepare = nv50_dac_prepare,
+       .prepare = nv50_dac_disconnect,
        .commit = nv50_dac_commit,
        .mode_set = nv50_dac_mode_set,
        .get_crtc = nv50_dac_crtc_get,
index 06de250fe617df89ad4e05a34d3e05be8907f126..7ba28e08ee3198de2094c12e1c2376f53c7934f4 100644 (file)
@@ -50,9 +50,53 @@ nv50_sor_nr(struct drm_device *dev)
        return 4;
 }
 
+static int
+evo_icmd(struct drm_device *dev, int ch, u32 mthd, u32 data)
+{
+       int ret = 0;
+       nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000001);
+       nv_wr32(dev, 0x610304 + (ch * 0x08), data);
+       nv_wr32(dev, 0x610300 + (ch * 0x08), 0x80000001 | mthd);
+       if (!nv_wait(dev, 0x610300 + (ch * 0x08), 0x80000000, 0x00000000))
+               ret = -EBUSY;
+       if (ret || (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO))
+               NV_INFO(dev, "EvoPIO: %d 0x%04x 0x%08x\n", ch, mthd, data);
+       nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000000);
+       return ret;
+}
+
 int
 nv50_display_early_init(struct drm_device *dev)
 {
+       u32 ctrl = nv_rd32(dev, 0x610200);
+       int i;
+
+       /* check if master evo channel is already active, a good a sign as any
+        * that the display engine is in a weird state (hibernate/kexec), if
+        * it is, do our best to reset the display engine...
+        */
+       if ((ctrl & 0x00000003) == 0x00000003) {
+               NV_INFO(dev, "PDISP: EVO(0) 0x%08x, resetting...\n", ctrl);
+
+               /* deactivate both heads first, PDISP will disappear forever
+                * (well, until you power cycle) on some boards as soon as
+                * PMC_ENABLE is hit unless they are..
+                */
+               for (i = 0; i < 2; i++) {
+                       evo_icmd(dev, 0, 0x0880 + (i * 0x400), 0x05000000);
+                       evo_icmd(dev, 0, 0x089c + (i * 0x400), 0);
+                       evo_icmd(dev, 0, 0x0840 + (i * 0x400), 0);
+                       evo_icmd(dev, 0, 0x0844 + (i * 0x400), 0);
+                       evo_icmd(dev, 0, 0x085c + (i * 0x400), 0);
+                       evo_icmd(dev, 0, 0x0874 + (i * 0x400), 0);
+               }
+               evo_icmd(dev, 0, 0x0080, 0);
+
+               /* reset PDISP */
+               nv_mask(dev, 0x000200, 0x40000000, 0x00000000);
+               nv_mask(dev, 0x000200, 0x40000000, 0x40000000);
+       }
+
        return 0;
 }
 
@@ -62,11 +106,40 @@ nv50_display_late_takedown(struct drm_device *dev)
 }
 
 int
-nv50_display_init(struct drm_device *dev)
+nv50_display_sync(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-       struct drm_connector *connector;
+       struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+       struct nv50_display *disp = nv50_display(dev);
+       struct nouveau_channel *evo = disp->master;
+       u64 start;
+       int ret;
+
+       ret = RING_SPACE(evo, 6);
+       if (ret == 0) {
+               BEGIN_RING(evo, 0, 0x0084, 1);
+               OUT_RING  (evo, 0x80000000);
+               BEGIN_RING(evo, 0, 0x0080, 1);
+               OUT_RING  (evo, 0);
+               BEGIN_RING(evo, 0, 0x0084, 1);
+               OUT_RING  (evo, 0x00000000);
+
+               nv_wo32(disp->ntfy, 0x000, 0x00000000);
+               FIRE_RING (evo);
+
+               start = ptimer->read(dev);
+               do {
+                       if (nv_ro32(disp->ntfy, 0x000))
+                               return 0;
+               } while (ptimer->read(dev) - start < 2000000000ULL);
+       }
+
+       return -EBUSY;
+}
+
+int
+nv50_display_init(struct drm_device *dev)
+{
        struct nouveau_channel *evo;
        int ret, i;
        u32 val;
@@ -161,16 +234,6 @@ nv50_display_init(struct drm_device *dev)
                     NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
                     NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
 
-       /* enable hotplug interrupts */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               struct nouveau_connector *conn = nouveau_connector(connector);
-
-               if (conn->dcb->gpio_tag == 0xff)
-                       continue;
-
-               pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
-       }
-
        ret = nv50_evo_init(dev);
        if (ret)
                return ret;
@@ -178,36 +241,19 @@ nv50_display_init(struct drm_device *dev)
 
        nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
 
-       ret = RING_SPACE(evo, 15);
+       ret = RING_SPACE(evo, 3);
        if (ret)
                return ret;
        BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
-       OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
-       OUT_RING(evo, NvEvoSync);
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
-       OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
-       OUT_RING(evo, 0);
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1);
-       OUT_RING(evo, 0);
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
-       OUT_RING(evo, 0);
-       /* required to make display sync channels not hate life */
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK900), 1);
-       OUT_RING  (evo, 0x00000311);
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(1, UNK900), 1);
-       OUT_RING  (evo, 0x00000311);
-       FIRE_RING(evo);
-       if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
-               NV_ERROR(dev, "evo pushbuf stalled\n");
-
+       OUT_RING  (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
+       OUT_RING  (evo, NvEvoSync);
 
-       return 0;
+       return nv50_display_sync(dev);
 }
 
-static int nv50_display_disable(struct drm_device *dev)
+void
+nv50_display_fini(struct drm_device *dev)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nv50_display *disp = nv50_display(dev);
        struct nouveau_channel *evo = disp->master;
        struct drm_crtc *drm_crtc;
@@ -270,18 +316,10 @@ static int nv50_display_disable(struct drm_device *dev)
 
        /* disable interrupts. */
        nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
-
-       /* disable hotplug interrupts */
-       nv_wr32(dev, 0xe054, 0xffffffff);
-       nv_wr32(dev, 0xe050, 0x00000000);
-       if (dev_priv->chipset >= 0x90) {
-               nv_wr32(dev, 0xe074, 0xffffffff);
-               nv_wr32(dev, 0xe070, 0x00000000);
-       }
-       return 0;
 }
 
-int nv50_display_create(struct drm_device *dev)
+int
+nv50_display_create(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct dcb_table *dcb = &dev_priv->vbios.dcb;
@@ -341,7 +379,7 @@ int nv50_display_create(struct drm_device *dev)
        tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
        nouveau_irq_register(dev, 26, nv50_display_isr);
 
-       ret = nv50_display_init(dev);
+       ret = nv50_evo_create(dev);
        if (ret) {
                nv50_display_destroy(dev);
                return ret;
@@ -357,7 +395,7 @@ nv50_display_destroy(struct drm_device *dev)
 
        NV_DEBUG_KMS(dev, "\n");
 
-       nv50_display_disable(dev);
+       nv50_evo_destroy(dev);
        nouveau_irq_unregister(dev, 26);
        kfree(disp);
 }
@@ -521,7 +559,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
                } else {
                        /* determine number of lvds links */
                        if (nv_connector && nv_connector->edid &&
-                           nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
+                           nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
                                /* http://www.spwg.org */
                                if (((u8 *)nv_connector->edid)[121] == 2)
                                        script |= 0x0100;
@@ -722,8 +760,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
        if (crtc >= 0) {
                pclk  = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
                pclk &= 0x003fffff;
-
-               nv50_crtc_set_clock(dev, crtc, pclk);
+               if (pclk)
+                       nv50_crtc_set_clock(dev, crtc, pclk);
 
                tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
                tmp &= ~0x000000f;
index c2da503a22aa589bac21db214130ede72aebb8bf..95874f7c043cf3aad9e80de35712cc9d1aad72f6 100644 (file)
@@ -69,14 +69,18 @@ int nv50_display_early_init(struct drm_device *dev);
 void nv50_display_late_takedown(struct drm_device *dev);
 int nv50_display_create(struct drm_device *dev);
 int nv50_display_init(struct drm_device *dev);
+void nv50_display_fini(struct drm_device *dev);
 void nv50_display_destroy(struct drm_device *dev);
 int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
 int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
 
+int  nv50_display_sync(struct drm_device *);
 int  nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
                            struct nouveau_channel *chan);
 void nv50_display_flip_stop(struct drm_crtc *);
 
+int  nv50_evo_create(struct drm_device *dev);
+void nv50_evo_destroy(struct drm_device *dev);
 int  nv50_evo_init(struct drm_device *dev);
 void nv50_evo_fini(struct drm_device *dev);
 void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
index c99d9751880c56d6012b06d36858381550f23685..9b962e989d7c6914001ae20b3d59bde7e830fdfe 100644 (file)
@@ -218,7 +218,7 @@ nv50_evo_channel_fini(struct nouveau_channel *evo)
        }
 }
 
-static void
+void
 nv50_evo_destroy(struct drm_device *dev)
 {
        struct nv50_display *disp = nv50_display(dev);
@@ -235,7 +235,7 @@ nv50_evo_destroy(struct drm_device *dev)
        nv50_evo_channel_del(&disp->master);
 }
 
-static int
+int
 nv50_evo_create(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -388,12 +388,6 @@ nv50_evo_init(struct drm_device *dev)
        struct nv50_display *disp = nv50_display(dev);
        int ret, i;
 
-       if (!disp->master) {
-               ret = nv50_evo_create(dev);
-               if (ret)
-                       return ret;
-       }
-
        ret = nv50_evo_channel_init(disp->master);
        if (ret)
                return ret;
@@ -420,6 +414,4 @@ nv50_evo_fini(struct drm_device *dev)
 
        if (disp->master)
                nv50_evo_channel_fini(disp->master);
-
-       nv50_evo_destroy(dev);
 }
index c34a074f7ea17761dee36e2943ce3b656a609b0d..3bc2a565c20be72dc2d43b9aa360c542818a24dd 100644 (file)
@@ -230,6 +230,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
        struct drm_device *dev = chan->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_gpuobj *ramfc = NULL;
+        uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
        unsigned long flags;
        int ret;
 
@@ -280,8 +281,9 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
        nv_wo32(ramfc, 0x7c, 0x30000001);
        nv_wo32(ramfc, 0x78, 0x00000000);
        nv_wo32(ramfc, 0x3c, 0x403f6078);
-       nv_wo32(ramfc, 0x50, chan->pushbuf_base + chan->dma.ib_base * 4);
-       nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16);
+       nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset));
+       nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) |
+                drm_order(chan->dma.ib_max + 1) << 16);
 
        if (dev_priv->chipset != 0x50) {
                nv_wo32(chan->ramin, 0, chan->id);
index 793a5ccca12157319e5393490fd0783dc97c6b59..f429e6a8ca7aeba09b3f8ef852376ea8d360c4ee 100644 (file)
 #include "drmP.h"
 #include "nouveau_drv.h"
 #include "nouveau_hw.h"
+#include "nouveau_gpio.h"
 
 #include "nv50_display.h"
 
-static void nv50_gpio_isr(struct drm_device *dev);
-static void nv50_gpio_isr_bh(struct work_struct *work);
-
-struct nv50_gpio_priv {
-       struct list_head handlers;
-       spinlock_t lock;
-};
-
-struct nv50_gpio_handler {
-       struct drm_device *dev;
-       struct list_head head;
-       struct work_struct work;
-       bool inhibit;
-
-       struct dcb_gpio_entry *gpio;
-
-       void (*handler)(void *data, int state);
-       void *data;
-};
-
 static int
-nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
+nv50_gpio_location(int line, u32 *reg, u32 *shift)
 {
        const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
 
-       if (gpio->line >= 32)
+       if (line >= 32)
                return -EINVAL;
 
-       *reg = nv50_gpio_reg[gpio->line >> 3];
-       *shift = (gpio->line & 7) << 2;
+       *reg = nv50_gpio_reg[line >> 3];
+       *shift = (line & 7) << 2;
        return 0;
 }
 
 int
-nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out)
 {
-       struct dcb_gpio_entry *gpio;
-       uint32_t r, s, v;
-
-       gpio = nouveau_bios_gpio_entry(dev, tag);
-       if (!gpio)
-               return -ENOENT;
+       u32 reg, shift;
 
-       if (nv50_gpio_location(gpio, &r, &s))
+       if (nv50_gpio_location(line, &reg, &shift))
                return -EINVAL;
 
-       v = nv_rd32(dev, r) >> (s + 2);
-       return ((v & 1) == (gpio->state[1] & 1));
+       nv_mask(dev, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
+       return 0;
 }
 
 int
-nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+nv50_gpio_sense(struct drm_device *dev, int line)
 {
-       struct dcb_gpio_entry *gpio;
-       uint32_t r, s, v;
-
-       gpio = nouveau_bios_gpio_entry(dev, tag);
-       if (!gpio)
-               return -ENOENT;
+       u32 reg, shift;
 
-       if (nv50_gpio_location(gpio, &r, &s))
+       if (nv50_gpio_location(line, &reg, &shift))
                return -EINVAL;
 
-       v  = nv_rd32(dev, r) & ~(0x3 << s);
-       v |= (gpio->state[state] ^ 2) << s;
-       nv_wr32(dev, r, v);
-       return 0;
+       return !!(nv_rd32(dev, reg) & (4 << shift));
 }
 
-int
-nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+void
+nv50_gpio_irq_enable(struct drm_device *dev, int line, bool on)
 {
-       struct dcb_gpio_entry *gpio;
-       u32 v;
-
-       gpio = nouveau_bios_gpio_entry(dev, tag);
-       if (!gpio)
-               return -ENOENT;
+       u32 reg  = line < 16 ? 0xe050 : 0xe070;
+       u32 mask = 0x00010001 << (line & 0xf);
 
-       v  = nv_rd32(dev, 0x00d610 + (gpio->line * 4));
-       v &= 0x00004000;
-       return (!!v == (gpio->state[1] & 1));
+       nv_wr32(dev, reg + 4, mask);
+       nv_mask(dev, reg + 0, mask, on ? mask : 0);
 }
 
 int
-nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out)
 {
-       struct dcb_gpio_entry *gpio;
-       u32 v;
-
-       gpio = nouveau_bios_gpio_entry(dev, tag);
-       if (!gpio)
-               return -ENOENT;
-
-       v = gpio->state[state] ^ 2;
-
-       nv_mask(dev, 0x00d610 + (gpio->line * 4), 0x00003000, v << 12);
+       u32 data = ((dir ^ 1) << 13) | (out << 12);
+       nv_mask(dev, 0x00d610 + (line * 4), 0x00003000, data);
+       nv_mask(dev, 0x00d604, 0x00000001, 0x00000001); /* update? */
        return 0;
 }
 
 int
-nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag,
-                      void (*handler)(void *, int), void *data)
+nvd0_gpio_sense(struct drm_device *dev, int line)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-       struct nv50_gpio_priv *priv = pgpio->priv;
-       struct nv50_gpio_handler *gpioh;
-       struct dcb_gpio_entry *gpio;
-       unsigned long flags;
-
-       gpio = nouveau_bios_gpio_entry(dev, tag);
-       if (!gpio)
-               return -ENOENT;
-
-       gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL);
-       if (!gpioh)
-               return -ENOMEM;
-
-       INIT_WORK(&gpioh->work, nv50_gpio_isr_bh);
-       gpioh->dev  = dev;
-       gpioh->gpio = gpio;
-       gpioh->handler = handler;
-       gpioh->data = data;
-
-       spin_lock_irqsave(&priv->lock, flags);
-       list_add(&gpioh->head, &priv->handlers);
-       spin_unlock_irqrestore(&priv->lock, flags);
-       return 0;
+       return !!(nv_rd32(dev, 0x00d610 + (line * 4)) & 0x00004000);
 }
 
-void
-nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
-                        void (*handler)(void *, int), void *data)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-       struct nv50_gpio_priv *priv = pgpio->priv;
-       struct nv50_gpio_handler *gpioh, *tmp;
-       struct dcb_gpio_entry *gpio;
-       LIST_HEAD(tofree);
-       unsigned long flags;
-
-       gpio = nouveau_bios_gpio_entry(dev, tag);
-       if (!gpio)
-               return;
-
-       spin_lock_irqsave(&priv->lock, flags);
-       list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) {
-               if (gpioh->gpio != gpio ||
-                   gpioh->handler != handler ||
-                   gpioh->data != data)
-                       continue;
-               list_move(&gpioh->head, &tofree);
-       }
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       list_for_each_entry_safe(gpioh, tmp, &tofree, head) {
-               flush_work_sync(&gpioh->work);
-               kfree(gpioh);
-       }
-}
-
-bool
-nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
-{
-       struct dcb_gpio_entry *gpio;
-       u32 reg, mask;
-
-       gpio = nouveau_bios_gpio_entry(dev, tag);
-       if (!gpio)
-               return false;
-
-       reg  = gpio->line < 16 ? 0xe050 : 0xe070;
-       mask = 0x00010001 << (gpio->line & 0xf);
-
-       nv_wr32(dev, reg + 4, mask);
-       reg = nv_mask(dev, reg + 0, mask, on ? mask : 0);
-       return (reg & mask) == mask;
-}
-
-static int
-nv50_gpio_create(struct drm_device *dev)
+static void
+nv50_gpio_isr(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-       struct nv50_gpio_priv *priv;
-
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
+       u32 intr0, intr1 = 0;
+       u32 hi, lo;
 
-       INIT_LIST_HEAD(&priv->handlers);
-       spin_lock_init(&priv->lock);
-       pgpio->priv = priv;
-       return 0;
-}
+       intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+       if (dev_priv->chipset >= 0x90)
+               intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
 
-static void
-nv50_gpio_destroy(struct drm_device *dev)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+       hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+       lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+       nouveau_gpio_isr(dev, 0, hi | lo);
 
-       kfree(pgpio->priv);
-       pgpio->priv = NULL;
+       nv_wr32(dev, 0xe054, intr0);
+       if (dev_priv->chipset >= 0x90)
+               nv_wr32(dev, 0xe074, intr1);
 }
 
 int
 nv50_gpio_init(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-       int ret;
-
-       if (!pgpio->priv) {
-               ret = nv50_gpio_create(dev);
-               if (ret)
-                       return ret;
-       }
 
        /* disable, and ack any pending gpio interrupts */
        nv_wr32(dev, 0xe050, 0x00000000);
@@ -270,64 +136,4 @@ nv50_gpio_fini(struct drm_device *dev)
        if (dev_priv->chipset >= 0x90)
                nv_wr32(dev, 0xe070, 0x00000000);
        nouveau_irq_unregister(dev, 21);
-
-       nv50_gpio_destroy(dev);
-}
-
-static void
-nv50_gpio_isr_bh(struct work_struct *work)
-{
-       struct nv50_gpio_handler *gpioh =
-               container_of(work, struct nv50_gpio_handler, work);
-       struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-       struct nv50_gpio_priv *priv = pgpio->priv;
-       unsigned long flags;
-       int state;
-
-       state = pgpio->get(gpioh->dev, gpioh->gpio->tag);
-       if (state < 0)
-               return;
-
-       gpioh->handler(gpioh->data, state);
-
-       spin_lock_irqsave(&priv->lock, flags);
-       gpioh->inhibit = false;
-       spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void
-nv50_gpio_isr(struct drm_device *dev)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-       struct nv50_gpio_priv *priv = pgpio->priv;
-       struct nv50_gpio_handler *gpioh;
-       u32 intr0, intr1 = 0;
-       u32 hi, lo, ch;
-
-       intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
-       if (dev_priv->chipset >= 0x90)
-               intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
-
-       hi = (intr0 & 0x0000ffff) | (intr1 << 16);
-       lo = (intr0 >> 16) | (intr1 & 0xffff0000);
-       ch = hi | lo;
-
-       nv_wr32(dev, 0xe054, intr0);
-       if (dev_priv->chipset >= 0x90)
-               nv_wr32(dev, 0xe074, intr1);
-
-       spin_lock(&priv->lock);
-       list_for_each_entry(gpioh, &priv->handlers, head) {
-               if (!(ch & (1 << gpioh->gpio->line)))
-                       continue;
-
-               if (gpioh->inhibit)
-                       continue;
-               gpioh->inhibit = true;
-
-               schedule_work(&gpioh->work);
-       }
-       spin_unlock(&priv->lock);
 }
index ac601f7c4e1a01c7175c9a201f3474e958ef98b7..33d5711a918d659aa000789fd43748bcd020c857 100644 (file)
@@ -616,9 +616,9 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
                        }
                        break;
                case 7: /* MP error */
-                       if (ustatus & 0x00010000) {
+                       if (ustatus & 0x04030000) {
                                nv50_pgraph_mp_trap(dev, i, display);
-                               ustatus &= ~0x00010000;
+                               ustatus &= ~0x04030000;
                        }
                        break;
                case 8: /* TPDMA error */
index 3d5a86b98282b6ee74b046a600fe19a6778a0e71..03937212e9d81c97a3595e269cadc13c0f4eea97 100644 (file)
 #include "drmP.h"
 #include "nouveau_drv.h"
 #include "nouveau_bios.h"
+#include "nouveau_hw.h"
 #include "nouveau_pm.h"
+#include "nouveau_hwsq.h"
 
-struct nv50_pm_state {
-       struct nouveau_pm_level *perflvl;
-       struct pll_lims pll;
-       enum pll_types type;
-       int N, M, P;
+enum clk_src {
+       clk_src_crystal,
+       clk_src_href,
+       clk_src_hclk,
+       clk_src_hclkm3,
+       clk_src_hclkm3d2,
+       clk_src_host,
+       clk_src_nvclk,
+       clk_src_sclk,
+       clk_src_mclk,
+       clk_src_vdec,
+       clk_src_dom6
 };
 
+static u32 read_clk(struct drm_device *, enum clk_src);
+
+static u32
+read_div(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+       switch (dev_priv->chipset) {
+       case 0x50: /* it exists, but only has bit 31, not the dividers.. */
+       case 0x84:
+       case 0x86:
+       case 0x98:
+       case 0xa0:
+               return nv_rd32(dev, 0x004700);
+       case 0x92:
+       case 0x94:
+       case 0x96:
+               return nv_rd32(dev, 0x004800);
+       default:
+               return 0x00000000;
+       }
+}
+
+static u32
+read_pll_src(struct drm_device *dev, u32 base)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u32 coef, ref = read_clk(dev, clk_src_crystal);
+       u32 rsel = nv_rd32(dev, 0x00e18c);
+       int P, N, M, id;
+
+       switch (dev_priv->chipset) {
+       case 0x50:
+       case 0xa0:
+               switch (base) {
+               case 0x4020:
+               case 0x4028: id = !!(rsel & 0x00000004); break;
+               case 0x4008: id = !!(rsel & 0x00000008); break;
+               case 0x4030: id = 0; break;
+               default:
+                       NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
+                       return 0;
+               }
+
+               coef = nv_rd32(dev, 0x00e81c + (id * 0x0c));
+               ref *=  (coef & 0x01000000) ? 2 : 4;
+               P    =  (coef & 0x00070000) >> 16;
+               N    = ((coef & 0x0000ff00) >> 8) + 1;
+               M    = ((coef & 0x000000ff) >> 0) + 1;
+               break;
+       case 0x84:
+       case 0x86:
+       case 0x92:
+               coef = nv_rd32(dev, 0x00e81c);
+               P    = (coef & 0x00070000) >> 16;
+               N    = (coef & 0x0000ff00) >> 8;
+               M    = (coef & 0x000000ff) >> 0;
+               break;
+       case 0x94:
+       case 0x96:
+       case 0x98:
+               rsel = nv_rd32(dev, 0x00c050);
+               switch (base) {
+               case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
+               case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
+               case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
+               case 0x4030: rsel = 3; break;
+               default:
+                       NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
+                       return 0;
+               }
+
+               switch (rsel) {
+               case 0: id = 1; break;
+               case 1: return read_clk(dev, clk_src_crystal);
+               case 2: return read_clk(dev, clk_src_href);
+               case 3: id = 0; break;
+               }
+
+               coef =  nv_rd32(dev, 0x00e81c + (id * 0x28));
+               P    = (nv_rd32(dev, 0x00e824 + (id * 0x28)) >> 16) & 7;
+               P   += (coef & 0x00070000) >> 16;
+               N    = (coef & 0x0000ff00) >> 8;
+               M    = (coef & 0x000000ff) >> 0;
+               break;
+       default:
+               BUG_ON(1);
+       }
+
+       if (M)
+               return (ref * N / M) >> P;
+       return 0;
+}
+
+static u32
+read_pll_ref(struct drm_device *dev, u32 base)
+{
+       u32 src, mast = nv_rd32(dev, 0x00c040);
+
+       switch (base) {
+       case 0x004028:
+               src = !!(mast & 0x00200000);
+               break;
+       case 0x004020:
+               src = !!(mast & 0x00400000);
+               break;
+       case 0x004008:
+               src = !!(mast & 0x00010000);
+               break;
+       case 0x004030:
+               src = !!(mast & 0x02000000);
+               break;
+       case 0x00e810:
+               return read_clk(dev, clk_src_crystal);
+       default:
+               NV_ERROR(dev, "bad pll 0x%06x\n", base);
+               return 0;
+       }
+
+       if (src)
+               return read_clk(dev, clk_src_href);
+       return read_pll_src(dev, base);
+}
+
+static u32
+read_pll(struct drm_device *dev, u32 base)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u32 mast = nv_rd32(dev, 0x00c040);
+       u32 ctrl = nv_rd32(dev, base + 0);
+       u32 coef = nv_rd32(dev, base + 4);
+       u32 ref = read_pll_ref(dev, base);
+       u32 clk = 0;
+       int N1, N2, M1, M2;
+
+       if (base == 0x004028 && (mast & 0x00100000)) {
+               /* wtf, appears to only disable post-divider on nva0 */
+               if (dev_priv->chipset != 0xa0)
+                       return read_clk(dev, clk_src_dom6);
+       }
+
+       N2 = (coef & 0xff000000) >> 24;
+       M2 = (coef & 0x00ff0000) >> 16;
+       N1 = (coef & 0x0000ff00) >> 8;
+       M1 = (coef & 0x000000ff);
+       if ((ctrl & 0x80000000) && M1) {
+               clk = ref * N1 / M1;
+               if ((ctrl & 0x40000100) == 0x40000000) {
+                       if (M2)
+                               clk = clk * N2 / M2;
+                       else
+                               clk = 0;
+               }
+       }
+
+       return clk;
+}
+
+static u32
+read_clk(struct drm_device *dev, enum clk_src src)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u32 mast = nv_rd32(dev, 0x00c040);
+       u32 P = 0;
+
+       switch (src) {
+       case clk_src_crystal:
+               return dev_priv->crystal;
+       case clk_src_href:
+               return 100000; /* PCIE reference clock */
+       case clk_src_hclk:
+               return read_clk(dev, clk_src_href) * 27778 / 10000;
+       case clk_src_hclkm3:
+               return read_clk(dev, clk_src_hclk) * 3;
+       case clk_src_hclkm3d2:
+               return read_clk(dev, clk_src_hclk) * 3 / 2;
+       case clk_src_host:
+               switch (mast & 0x30000000) {
+               case 0x00000000: return read_clk(dev, clk_src_href);
+               case 0x10000000: break;
+               case 0x20000000: /* !0x50 */
+               case 0x30000000: return read_clk(dev, clk_src_hclk);
+               }
+               break;
+       case clk_src_nvclk:
+               if (!(mast & 0x00100000))
+                       P = (nv_rd32(dev, 0x004028) & 0x00070000) >> 16;
+               switch (mast & 0x00000003) {
+               case 0x00000000: return read_clk(dev, clk_src_crystal) >> P;
+               case 0x00000001: return read_clk(dev, clk_src_dom6);
+               case 0x00000002: return read_pll(dev, 0x004020) >> P;
+               case 0x00000003: return read_pll(dev, 0x004028) >> P;
+               }
+               break;
+       case clk_src_sclk:
+               P = (nv_rd32(dev, 0x004020) & 0x00070000) >> 16;
+               switch (mast & 0x00000030) {
+               case 0x00000000:
+                       if (mast & 0x00000080)
+                               return read_clk(dev, clk_src_host) >> P;
+                       return read_clk(dev, clk_src_crystal) >> P;
+               case 0x00000010: break;
+               case 0x00000020: return read_pll(dev, 0x004028) >> P;
+               case 0x00000030: return read_pll(dev, 0x004020) >> P;
+               }
+               break;
+       case clk_src_mclk:
+               P = (nv_rd32(dev, 0x004008) & 0x00070000) >> 16;
+               if (nv_rd32(dev, 0x004008) & 0x00000200) {
+                       switch (mast & 0x0000c000) {
+                       case 0x00000000:
+                               return read_clk(dev, clk_src_crystal) >> P;
+                       case 0x00008000:
+                       case 0x0000c000:
+                               return read_clk(dev, clk_src_href) >> P;
+                       }
+               } else {
+                       return read_pll(dev, 0x004008) >> P;
+               }
+               break;
+       case clk_src_vdec:
+               P = (read_div(dev) & 0x00000700) >> 8;
+               switch (dev_priv->chipset) {
+               case 0x84:
+               case 0x86:
+               case 0x92:
+               case 0x94:
+               case 0x96:
+               case 0xa0:
+                       switch (mast & 0x00000c00) {
+                       case 0x00000000:
+                               if (dev_priv->chipset == 0xa0) /* wtf?? */
+                                       return read_clk(dev, clk_src_nvclk) >> P;
+                               return read_clk(dev, clk_src_crystal) >> P;
+                       case 0x00000400:
+                               return 0;
+                       case 0x00000800:
+                               if (mast & 0x01000000)
+                                       return read_pll(dev, 0x004028) >> P;
+                               return read_pll(dev, 0x004030) >> P;
+                       case 0x00000c00:
+                               return read_clk(dev, clk_src_nvclk) >> P;
+                       }
+                       break;
+               case 0x98:
+                       switch (mast & 0x00000c00) {
+                       case 0x00000000:
+                               return read_clk(dev, clk_src_nvclk) >> P;
+                       case 0x00000400:
+                               return 0;
+                       case 0x00000800:
+                               return read_clk(dev, clk_src_hclkm3d2) >> P;
+                       case 0x00000c00:
+                               return read_clk(dev, clk_src_mclk) >> P;
+                       }
+                       break;
+               }
+               break;
+       case clk_src_dom6:
+               switch (dev_priv->chipset) {
+               case 0x50:
+               case 0xa0:
+                       return read_pll(dev, 0x00e810) >> 2;
+               case 0x84:
+               case 0x86:
+               case 0x92:
+               case 0x94:
+               case 0x96:
+               case 0x98:
+                       P = (read_div(dev) & 0x00000007) >> 0;
+                       switch (mast & 0x0c000000) {
+                       case 0x00000000: return read_clk(dev, clk_src_href);
+                       case 0x04000000: break;
+                       case 0x08000000: return read_clk(dev, clk_src_hclk);
+                       case 0x0c000000:
+                               return read_clk(dev, clk_src_hclkm3) >> P;
+                       }
+                       break;
+               default:
+                       break;
+               }
+       default:
+               break;
+       }
+
+       NV_DEBUG(dev, "unknown clock source %d 0x%08x\n", src, mast);
+       return 0;
+}
+
 int
-nv50_pm_clock_get(struct drm_device *dev, u32 id)
+nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 {
-       struct pll_lims pll;
-       int P, N, M, ret;
-       u32 reg0, reg1;
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       if (dev_priv->chipset == 0xaa ||
+           dev_priv->chipset == 0xac)
+               return 0;
 
-       ret = get_pll_limits(dev, id, &pll);
+       perflvl->core   = read_clk(dev, clk_src_nvclk);
+       perflvl->shader = read_clk(dev, clk_src_sclk);
+       perflvl->memory = read_clk(dev, clk_src_mclk);
+       if (dev_priv->chipset != 0x50) {
+               perflvl->vdec = read_clk(dev, clk_src_vdec);
+               perflvl->dom6 = read_clk(dev, clk_src_dom6);
+       }
+
+       return 0;
+}
+
+struct nv50_pm_state {
+       struct hwsq_ucode mclk_hwsq;
+       u32 mscript;
+
+       u32 emast;
+       u32 nctrl;
+       u32 ncoef;
+       u32 sctrl;
+       u32 scoef;
+
+       u32 amast;
+       u32 pdivs;
+};
+
+static u32
+calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll,
+        u32 clk, int *N1, int *M1, int *log2P)
+{
+       struct nouveau_pll_vals coef;
+       int ret;
+
+       ret = get_pll_limits(dev, reg, pll);
        if (ret)
-               return ret;
+               return 0;
+
+       pll->vco2.maxfreq = 0;
+       pll->refclk = read_pll_ref(dev, reg);
+       if (!pll->refclk)
+               return 0;
 
-       reg0 = nv_rd32(dev, pll.reg + 0);
-       reg1 = nv_rd32(dev, pll.reg + 4);
+       ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef);
+       if (ret == 0)
+               return 0;
 
-       if ((reg0 & 0x80000000) == 0) {
-               if (id == PLL_SHADER) {
-                       NV_DEBUG(dev, "Shader PLL is disabled. "
-                               "Shader clock is twice the core\n");
-                       ret = nv50_pm_clock_get(dev, PLL_CORE);
-                       if (ret > 0)
-                               return ret << 1;
-               } else if (id == PLL_MEMORY) {
-                       NV_DEBUG(dev, "Memory PLL is disabled. "
-                               "Memory clock is equal to the ref_clk\n");
-                       return pll.refclk;
+       *N1 = coef.N1;
+       *M1 = coef.M1;
+       *log2P = coef.log2P;
+       return ret;
+}
+
+static inline u32
+calc_div(u32 src, u32 target, int *div)
+{
+       u32 clk0 = src, clk1 = src;
+       for (*div = 0; *div <= 7; (*div)++) {
+               if (clk0 <= target) {
+                       clk1 = clk0 << (*div ? 1 : 0);
+                       break;
                }
+               clk0 >>= 1;
+       }
+
+       if (target - clk0 <= clk1 - target)
+               return clk0;
+       (*div)--;
+       return clk1;
+}
+
+static inline u32
+clk_same(u32 a, u32 b)
+{
+       return ((a / 1000) == (b / 1000));
+}
+
+static int
+calc_mclk(struct drm_device *dev, u32 freq, struct hwsq_ucode *hwsq)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct pll_lims pll;
+       u32 mast = nv_rd32(dev, 0x00c040);
+       u32 ctrl = nv_rd32(dev, 0x004008);
+       u32 coef = nv_rd32(dev, 0x00400c);
+       u32 orig = ctrl;
+       u32 crtc_mask = 0;
+       int N, M, P;
+       int ret, i;
+
+       /* use pcie refclock if possible, otherwise use mpll */
+       ctrl &= ~0x81ff0200;
+       if (clk_same(freq, read_clk(dev, clk_src_href))) {
+               ctrl |= 0x00000200 | (pll.log2p_bias << 19);
+       } else {
+               ret = calc_pll(dev, 0x4008, &pll, freq, &N, &M, &P);
+               if (ret == 0)
+                       return -EINVAL;
+
+               ctrl |= 0x80000000 | (P << 22) | (P << 16);
+               ctrl |= pll.log2p_bias << 19;
+               coef  = (N << 8) | M;
+       }
+
+       mast &= ~0xc0000000; /* get MCLK_2 from HREF */
+       mast |=  0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
+
+       /* determine active crtcs */
+       for (i = 0; i < 2; i++) {
+               if (nv_rd32(dev, NV50_PDISPLAY_CRTC_C(i, CLOCK)))
+                       crtc_mask |= (1 << i);
+       }
+
+       /* build the ucode which will reclock the memory for us */
+       hwsq_init(hwsq);
+       if (crtc_mask) {
+               hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */
+               hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */
        }
+       if (dev_priv->chipset >= 0x92)
+               hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */
+       hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
+       hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
+
+       /* prepare memory controller */
+       hwsq_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge banks and idle */
+       hwsq_wr32(hwsq, 0x1002d0, 0x00000001); /* force refresh */
+       hwsq_wr32(hwsq, 0x100210, 0x00000000); /* stop the automatic refresh */
+       hwsq_wr32(hwsq, 0x1002dc, 0x00000001); /* start self refresh mode */
 
-       P = (reg0 & 0x00070000) >> 16;
-       N = (reg1 & 0x0000ff00) >> 8;
-       M = (reg1 & 0x000000ff);
+       /* reclock memory */
+       hwsq_wr32(hwsq, 0xc040, mast);
+       hwsq_wr32(hwsq, 0x4008, orig | 0x00000200); /* bypass MPLL */
+       hwsq_wr32(hwsq, 0x400c, coef);
+       hwsq_wr32(hwsq, 0x4008, ctrl);
 
-       return ((pll.refclk * N / M) >> P);
+       /* restart memory controller */
+       hwsq_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge banks and idle */
+       hwsq_wr32(hwsq, 0x1002dc, 0x00000000); /* stop self refresh mode */
+       hwsq_wr32(hwsq, 0x100210, 0x80000000); /* restart automatic refresh */
+       hwsq_usec(hwsq, 12); /* wait for the PLL to stabilize */
+
+       hwsq_usec(hwsq, 48); /* may be unnecessary: causes flickering */
+       hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
+       hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
+       if (dev_priv->chipset >= 0x92)
+               hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */
+       hwsq_fini(hwsq);
+       return 0;
 }
 
 void *
-nv50_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
-                 u32 id, int khz)
+nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 {
-       struct nv50_pm_state *state;
-       int dummy, ret;
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nv50_pm_state *info;
+       struct pll_lims pll;
+       int ret = -EINVAL;
+       int N, M, P1, P2;
+       u32 clk, out;
 
-       state = kzalloc(sizeof(*state), GFP_KERNEL);
-       if (!state)
+       if (dev_priv->chipset == 0xaa ||
+           dev_priv->chipset == 0xac)
+               return ERR_PTR(-ENODEV);
+
+       info = kmalloc(sizeof(*info), GFP_KERNEL);
+       if (!info)
                return ERR_PTR(-ENOMEM);
-       state->type = id;
-       state->perflvl = perflvl;
 
-       ret = get_pll_limits(dev, id, &state->pll);
-       if (ret < 0) {
-               kfree(state);
-               return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
+       /* core: for the moment at least, always use nvpll */
+       clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1);
+       if (clk == 0)
+               goto error;
+
+       info->emast = 0x00000003;
+       info->nctrl = 0x80000000 | (P1 << 19) | (P1 << 16);
+       info->ncoef = (N << 8) | M;
+
+       /* shader: tie to nvclk if possible, otherwise use spll.  have to be
+        * very careful that the shader clock is at least twice the core, or
+        * some chipsets will be very unhappy.  i expect most or all of these
+        * cases will be handled by tying to nvclk, but it's possible there's
+        * corners
+        */
+       if (P1-- && perflvl->shader == (perflvl->core << 1)) {
+               info->emast |= 0x00000020;
+               info->sctrl  = 0x00000000 | (P1 << 19) | (P1 << 16);
+               info->scoef  = nv_rd32(dev, 0x004024);
+       } else {
+               clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1);
+               if (clk == 0)
+                       goto error;
+
+               info->emast |= 0x00000030;
+               info->sctrl  = 0x80000000 | (P1 << 19) | (P1 << 16);
+               info->scoef  = (N << 8) | M;
+       }
+
+       /* memory: build hwsq ucode which we'll use to reclock memory */
+       info->mclk_hwsq.len = 0;
+       if (perflvl->memory) {
+               clk = calc_mclk(dev, perflvl->memory, &info->mclk_hwsq);
+               if (clk < 0) {
+                       ret = clk;
+                       goto error;
+               }
+
+               info->mscript = perflvl->memscript;
+       }
+
+       /* vdec: avoid modifying xpll until we know exactly how the other
+        * clock domains work, i suspect at least some of them can also be
+        * tied to xpll...
+        */
+       info->amast = nv_rd32(dev, 0x00c040);
+       info->pdivs = read_div(dev);
+       if (perflvl->vdec) {
+               /* see how close we can get using nvclk as a source */
+               clk = calc_div(perflvl->core, perflvl->vdec, &P1);
+
+               /* see how close we can get using xpll/hclk as a source */
+               if (dev_priv->chipset != 0x98)
+                       out = read_pll(dev, 0x004030);
+               else
+                       out = read_clk(dev, clk_src_hclkm3d2);
+               out = calc_div(out, perflvl->vdec, &P2);
+
+               /* select whichever gets us closest */
+               info->amast &= ~0x00000c00;
+               info->pdivs &= ~0x00000700;
+               if (abs((int)perflvl->vdec - clk) <=
+                   abs((int)perflvl->vdec - out)) {
+                       if (dev_priv->chipset != 0x98)
+                               info->amast |= 0x00000c00;
+                       info->pdivs |= P1 << 8;
+               } else {
+                       info->amast |= 0x00000800;
+                       info->pdivs |= P2 << 8;
+               }
+       }
+
+       /* dom6: nfi what this is, but we're limited to various combinations
+        * of the host clock frequency
+        */
+       if (perflvl->dom6) {
+               info->amast &= ~0x0c000000;
+               if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) {
+                       info->amast |= 0x00000000;
+               } else
+               if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) {
+                       info->amast |= 0x08000000;
+               } else {
+                       clk = read_clk(dev, clk_src_hclk) * 3;
+                       clk = calc_div(clk, perflvl->dom6, &P1);
+
+                       info->amast |= 0x0c000000;
+                       info->pdivs  = (info->pdivs & ~0x00000007) | P1;
+               }
        }
 
-       ret = nv50_calc_pll(dev, &state->pll, khz, &state->N, &state->M,
-                           &dummy, &dummy, &state->P);
-       if (ret < 0) {
-               kfree(state);
-               return ERR_PTR(ret);
+       return info;
+error:
+       kfree(info);
+       return ERR_PTR(ret);
+}
+
+static int
+prog_mclk(struct drm_device *dev, struct hwsq_ucode *hwsq)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u32 hwsq_data, hwsq_kick;
+       int i;
+
+       if (dev_priv->chipset < 0x90) {
+               hwsq_data = 0x001400;
+               hwsq_kick = 0x00000003;
+       } else {
+               hwsq_data = 0x080000;
+               hwsq_kick = 0x00000001;
        }
 
-       return state;
+       /* upload hwsq ucode */
+       nv_mask(dev, 0x001098, 0x00000008, 0x00000000);
+       nv_wr32(dev, 0x001304, 0x00000000);
+       for (i = 0; i < hwsq->len / 4; i++)
+               nv_wr32(dev, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
+       nv_mask(dev, 0x001098, 0x00000018, 0x00000018);
+
+       /* launch, and wait for completion */
+       nv_wr32(dev, 0x00130c, hwsq_kick);
+       if (!nv_wait(dev, 0x001308, 0x00000100, 0x00000000)) {
+               NV_ERROR(dev, "hwsq ucode exec timed out\n");
+               NV_ERROR(dev, "0x001308: 0x%08x\n", nv_rd32(dev, 0x001308));
+               for (i = 0; i < hwsq->len / 4; i++) {
+                       NV_ERROR(dev, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
+                                nv_rd32(dev, 0x001400 + (i * 4)));
+               }
+
+               return -EIO;
+       }
+
+       return 0;
 }
 
-void
-nv50_pm_clock_set(struct drm_device *dev, void *pre_state)
+int
+nv50_pm_clocks_set(struct drm_device *dev, void *data)
 {
-       struct nv50_pm_state *state = pre_state;
-       struct nouveau_pm_level *perflvl = state->perflvl;
-       u32 reg = state->pll.reg, tmp;
-       struct bit_entry BIT_M;
-       u16 script;
-       int N = state->N;
-       int M = state->M;
-       int P = state->P;
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nv50_pm_state *info = data;
+       struct bit_entry M;
+       int ret = 0;
 
-       if (state->type == PLL_MEMORY && perflvl->memscript &&
-           bit_table(dev, 'M', &BIT_M) == 0 &&
-           BIT_M.version == 1 && BIT_M.length >= 0x0b) {
-               script = ROM16(BIT_M.data[0x05]);
-               if (script)
-                       nouveau_bios_run_init_table(dev, script, NULL, -1);
-               script = ROM16(BIT_M.data[0x07]);
-               if (script)
-                       nouveau_bios_run_init_table(dev, script, NULL, -1);
-               script = ROM16(BIT_M.data[0x09]);
-               if (script)
-                       nouveau_bios_run_init_table(dev, script, NULL, -1);
+       /* halt and idle execution engines */
+       nv_mask(dev, 0x002504, 0x00000001, 0x00000001);
+       if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010))
+               goto error;
 
-               nouveau_bios_run_init_table(dev, perflvl->memscript, NULL, -1);
+       /* memory: it is *very* important we change this first, the ucode
+        * we build in pre() now has hardcoded 0xc040 values, which can't
+        * change before we execute it or the engine clocks may end up
+        * messed up.
+        */
+       if (info->mclk_hwsq.len) {
+               /* execute some scripts that do ??? from the vbios.. */
+               if (!bit_table(dev, 'M', &M) && M.version == 1) {
+                       if (M.length >= 6)
+                               nouveau_bios_init_exec(dev, ROM16(M.data[5]));
+                       if (M.length >= 8)
+                               nouveau_bios_init_exec(dev, ROM16(M.data[7]));
+                       if (M.length >= 10)
+                               nouveau_bios_init_exec(dev, ROM16(M.data[9]));
+                       nouveau_bios_init_exec(dev, info->mscript);
+               }
+
+               ret = prog_mclk(dev, &info->mclk_hwsq);
+               if (ret)
+                       goto resume;
        }
 
-       if (state->type == PLL_MEMORY) {
-               nv_wr32(dev, 0x100210, 0);
-               nv_wr32(dev, 0x1002dc, 1);
+       /* reclock vdec/dom6 */
+       nv_mask(dev, 0x00c040, 0x00000c00, 0x00000000);
+       switch (dev_priv->chipset) {
+       case 0x92:
+       case 0x94:
+       case 0x96:
+               nv_mask(dev, 0x004800, 0x00000707, info->pdivs);
+               break;
+       default:
+               nv_mask(dev, 0x004700, 0x00000707, info->pdivs);
+               break;
        }
+       nv_mask(dev, 0x00c040, 0x0c000c00, info->amast);
 
-       tmp  = nv_rd32(dev, reg + 0) & 0xfff8ffff;
-       tmp |= 0x80000000 | (P << 16);
-       nv_wr32(dev, reg + 0, tmp);
-       nv_wr32(dev, reg + 4, (N << 8) | M);
+       /* core/shader: make sure sclk/nvclk are disconnected from their
+        * plls (nvclk to dom6, sclk to hclk), modify the plls, and
+        * reconnect sclk/nvclk to their new clock source
+        */
+       if (dev_priv->chipset < 0x92)
+               nv_mask(dev, 0x00c040, 0x001000b0, 0x00100080); /* grrr! */
+       else
+               nv_mask(dev, 0x00c040, 0x000000b3, 0x00000081);
+       nv_mask(dev, 0x004020, 0xc03f0100, info->sctrl);
+       nv_wr32(dev, 0x004024, info->scoef);
+       nv_mask(dev, 0x004028, 0xc03f0100, info->nctrl);
+       nv_wr32(dev, 0x00402c, info->ncoef);
+       nv_mask(dev, 0x00c040, 0x00100033, info->emast);
+
+       goto resume;
+error:
+       ret = -EBUSY;
+resume:
+       nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
+       kfree(info);
+       return ret;
+}
 
-       if (state->type == PLL_MEMORY) {
-               nv_wr32(dev, 0x1002dc, 0);
-               nv_wr32(dev, 0x100210, 0x80000000);
+static int
+pwm_info(struct drm_device *dev, int *line, int *ctrl, int *indx)
+{
+       if (*line == 0x04) {
+               *ctrl = 0x00e100;
+               *line = 4;
+               *indx = 0;
+       } else
+       if (*line == 0x09) {
+               *ctrl = 0x00e100;
+               *line = 9;
+               *indx = 1;
+       } else
+       if (*line == 0x10) {
+               *ctrl = 0x00e28c;
+               *line = 0;
+               *indx = 0;
+       } else {
+               NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", *line);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+int
+nv50_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
+{
+       int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
+       if (ret)
+               return ret;
+
+       if (nv_rd32(dev, ctrl) & (1 << line)) {
+               *divs = nv_rd32(dev, 0x00e114 + (id * 8));
+               *duty = nv_rd32(dev, 0x00e118 + (id * 8));
+               return 0;
        }
 
-       kfree(state);
+       return -EINVAL;
 }
 
+int
+nv50_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
+{
+       int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
+       if (ret)
+               return ret;
+
+       nv_mask(dev, ctrl, 0x00010001 << line, 0x00000001 << line);
+       nv_wr32(dev, 0x00e114 + (id * 8), divs);
+       nv_wr32(dev, 0x00e118 + (id * 8), duty | 0x80000000);
+       return 0;
+}
index 2633aa8554ebfc06b361e39fc0b3afd71af1ce91..c4423ba9c9bf67cab15cb7c6ac8bfa2acd5d909d 100644 (file)
@@ -60,6 +60,8 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
        BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
        OUT_RING  (evo, 0);
 
+       nouveau_hdmi_mode_set(encoder, NULL);
+
        nv_encoder->crtc = NULL;
        nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
 }
@@ -172,6 +174,12 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
 static void
 nv50_sor_prepare(struct drm_encoder *encoder)
 {
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       nv50_sor_disconnect(encoder);
+       if (nv_encoder->dcb->type == OUTPUT_DP) {
+               /* avoid race between link training and supervisor intr */
+               nv50_display_sync(encoder->dev);
+       }
 }
 
 static void
@@ -180,8 +188,8 @@ nv50_sor_commit(struct drm_encoder *encoder)
 }
 
 static void
-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
-                 struct drm_display_mode *adjusted_mode)
+nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+                 struct drm_display_mode *mode)
 {
        struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
        struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
@@ -193,24 +201,27 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 
        NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n",
                     nv_encoder->or, nv_encoder->dcb->type, crtc->index);
+       nv_encoder->crtc = encoder->crtc;
 
        switch (nv_encoder->dcb->type) {
        case OUTPUT_TMDS:
                if (nv_encoder->dcb->sorconf.link & 1) {
-                       if (adjusted_mode->clock < 165000)
+                       if (mode->clock < 165000)
                                mode_ctl = 0x0100;
                        else
                                mode_ctl = 0x0500;
                } else
                        mode_ctl = 0x0200;
+
+               nouveau_hdmi_mode_set(encoder, mode);
                break;
        case OUTPUT_DP:
                nv_connector = nouveau_encoder_connector_get(nv_encoder);
                if (nv_connector && nv_connector->base.display_info.bpc == 6) {
-                       nv_encoder->dp.datarate = crtc->mode->clock * 18 / 8;
+                       nv_encoder->dp.datarate = mode->clock * 18 / 8;
                        mode_ctl |= 0x00020000;
                } else {
-                       nv_encoder->dp.datarate = crtc->mode->clock * 24 / 8;
+                       nv_encoder->dp.datarate = mode->clock * 24 / 8;
                        mode_ctl |= 0x00050000;
                }
 
@@ -228,10 +239,10 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
        else
                mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
 
-       if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
                mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
 
-       if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
                mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
 
        nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -239,12 +250,11 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
        ret = RING_SPACE(evo, 2);
        if (ret) {
                NV_ERROR(dev, "no space while connecting SOR\n");
+               nv_encoder->crtc = NULL;
                return;
        }
        BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
        OUT_RING(evo, mode_ctl);
-
-       nv_encoder->crtc = encoder->crtc;
 }
 
 static struct drm_crtc *
index 40b84f22d819cd8f95e542429395d02120708980..6f38ceae3aa41d987a2a884afe3b3128dc1c5bb1 100644 (file)
@@ -48,7 +48,7 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
                        phys |= 0x60;
                else if (coverage <= 64 * 1024 * 1024)
                        phys |= 0x40;
-               else if (coverage < 128 * 1024 * 1024)
+               else if (coverage <= 128 * 1024 * 1024)
                        phys |= 0x20;
        }
 
diff --git a/drivers/gpu/drm/nouveau/nv84_bsp.c b/drivers/gpu/drm/nouveau/nv84_bsp.c
new file mode 100644 (file)
index 0000000..7487573
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
+ *     more than just an enable/disable stub this needs to be split out to
+ *     nv98_bsp.c...
+ */
+
+struct nv84_bsp_engine {
+       struct nouveau_exec_engine base;
+};
+
+static int
+nv84_bsp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+       if (!(nv_rd32(dev, 0x000200) & 0x00008000))
+               return 0;
+
+       nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
+       return 0;
+}
+
+static int
+nv84_bsp_init(struct drm_device *dev, int engine)
+{
+       nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
+       nv_mask(dev, 0x000200, 0x00008000, 0x00008000);
+       return 0;
+}
+
+static void
+nv84_bsp_destroy(struct drm_device *dev, int engine)
+{
+       struct nv84_bsp_engine *pbsp = nv_engine(dev, engine);
+
+       NVOBJ_ENGINE_DEL(dev, BSP);
+
+       kfree(pbsp);
+}
+
+int
+nv84_bsp_create(struct drm_device *dev)
+{
+       struct nv84_bsp_engine *pbsp;
+
+       pbsp = kzalloc(sizeof(*pbsp), GFP_KERNEL);
+       if (!pbsp)
+               return -ENOMEM;
+
+       pbsp->base.destroy = nv84_bsp_destroy;
+       pbsp->base.init = nv84_bsp_init;
+       pbsp->base.fini = nv84_bsp_fini;
+
+       NVOBJ_ENGINE_ADD(dev, BSP, &pbsp->base);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv84_vp.c b/drivers/gpu/drm/nouveau/nv84_vp.c
new file mode 100644 (file)
index 0000000..6570d30
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
+ *     more than just an enable/disable stub this needs to be split out to
+ *     nv98_vp.c...
+ */
+
+struct nv84_vp_engine {
+       struct nouveau_exec_engine base;
+};
+
+static int
+nv84_vp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+       if (!(nv_rd32(dev, 0x000200) & 0x00020000))
+               return 0;
+
+       nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
+       return 0;
+}
+
+static int
+nv84_vp_init(struct drm_device *dev, int engine)
+{
+       nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
+       nv_mask(dev, 0x000200, 0x00020000, 0x00020000);
+       return 0;
+}
+
+static void
+nv84_vp_destroy(struct drm_device *dev, int engine)
+{
+       struct nv84_vp_engine *pvp = nv_engine(dev, engine);
+
+       NVOBJ_ENGINE_DEL(dev, VP);
+
+       kfree(pvp);
+}
+
+int
+nv84_vp_create(struct drm_device *dev)
+{
+       struct nv84_vp_engine *pvp;
+
+       pvp = kzalloc(sizeof(*pvp), GFP_KERNEL);
+       if (!pvp)
+               return -ENOMEM;
+
+       pvp->base.destroy = nv84_vp_destroy;
+       pvp->base.init = nv84_vp_init;
+       pvp->base.fini = nv84_vp_fini;
+
+       NVOBJ_ENGINE_ADD(dev, VP, &pvp->base);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.c b/drivers/gpu/drm/nouveau/nv98_crypt.c
new file mode 100644 (file)
index 0000000..db94ff0
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+struct nv98_crypt_engine {
+       struct nouveau_exec_engine base;
+};
+
+static int
+nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
+{
+       if (!(nv_rd32(dev, 0x000200) & 0x00004000))
+               return 0;
+
+       nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+       return 0;
+}
+
+static int
+nv98_crypt_init(struct drm_device *dev, int engine)
+{
+       nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+       nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
+       return 0;
+}
+
+static void
+nv98_crypt_destroy(struct drm_device *dev, int engine)
+{
+       struct nv98_crypt_engine *pcrypt = nv_engine(dev, engine);
+
+       NVOBJ_ENGINE_DEL(dev, CRYPT);
+
+       kfree(pcrypt);
+}
+
+int
+nv98_crypt_create(struct drm_device *dev)
+{
+       struct nv98_crypt_engine *pcrypt;
+
+       pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
+       if (!pcrypt)
+               return -ENOMEM;
+
+       pcrypt->base.destroy = nv98_crypt_destroy;
+       pcrypt->base.init = nv98_crypt_init;
+       pcrypt->base.fini = nv98_crypt_fini;
+
+       NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv98_ppp.c b/drivers/gpu/drm/nouveau/nv98_ppp.c
new file mode 100644 (file)
index 0000000..a987dd6
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+struct nv98_ppp_engine {
+       struct nouveau_exec_engine base;
+};
+
+static int
+nv98_ppp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+       if (!(nv_rd32(dev, 0x000200) & 0x00000002))
+               return 0;
+
+       nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
+       return 0;
+}
+
+static int
+nv98_ppp_init(struct drm_device *dev, int engine)
+{
+       nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
+       nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
+       return 0;
+}
+
+static void
+nv98_ppp_destroy(struct drm_device *dev, int engine)
+{
+       struct nv98_ppp_engine *pppp = nv_engine(dev, engine);
+
+       NVOBJ_ENGINE_DEL(dev, PPP);
+
+       kfree(pppp);
+}
+
+int
+nv98_ppp_create(struct drm_device *dev)
+{
+       struct nv98_ppp_engine *pppp;
+
+       pppp = kzalloc(sizeof(*pppp), GFP_KERNEL);
+       if (!pppp)
+               return -ENOMEM;
+
+       pppp->base.destroy = nv98_ppp_destroy;
+       pppp->base.init = nv98_ppp_init;
+       pppp->base.fini = nv98_ppp_fini;
+
+       NVOBJ_ENGINE_ADD(dev, PPP, &pppp->base);
+       return 0;
+}
index eaf35f8321eede807fedf760fc54c21a0b8e7978..abc36626fef0d2b12e0145616dbc991182a7182e 100644 (file)
@@ -31,8 +31,9 @@
  */
 
 ifdef(`NVA3',
-.section nva3_pcopy_data,
-.section nvc0_pcopy_data
+.section #nva3_pcopy_data
+,
+.section #nvc0_pcopy_data
 )
 
 ctx_object:                   .b32 0
@@ -42,7 +43,7 @@ ctx_dma_query:                .b32 0
 ctx_dma_src:                  .b32 0
 ctx_dma_dst:                  .b32 0
 ,)
-.equ ctx_dma_count 3
+.equ #ctx_dma_count 3
 ctx_query_address_high:       .b32 0
 ctx_query_address_low:        .b32 0
 ctx_query_counter:            .b32 0
@@ -78,64 +79,65 @@ ctx_ycnt:                     .b32 0
 dispatch_table:
 // mthd 0x0000, NAME
 .b16 0x000 1
-.b32 ctx_object                     ~0xffffffff
+.b32 #ctx_object                     ~0xffffffff
 // mthd 0x0100, NOP
 .b16 0x040 1
-.b32 0x00010000 + cmd_nop           ~0xffffffff
+.b32 0x00010000 + #cmd_nop           ~0xffffffff
 // mthd 0x0140, PM_TRIGGER
 .b16 0x050 1
-.b32 0x00010000 + cmd_pm_trigger    ~0xffffffff
+.b32 0x00010000 + #cmd_pm_trigger    ~0xffffffff
 ifdef(`NVA3', `
 // mthd 0x0180-0x018c, DMA_
-.b16 0x060 ctx_dma_count
+.b16 0x060 #ctx_dma_count
 dispatch_dma:
-.b32 0x00010000 + cmd_dma           ~0xffffffff
-.b32 0x00010000 + cmd_dma           ~0xffffffff
-.b32 0x00010000 + cmd_dma           ~0xffffffff
+.b32 0x00010000 + #cmd_dma           ~0xffffffff
+.b32 0x00010000 + #cmd_dma           ~0xffffffff
+.b32 0x00010000 + #cmd_dma           ~0xffffffff
 ',)
 // mthd 0x0200-0x0218, SRC_TILE
 .b16 0x80 7
-.b32 ctx_src_tile_mode              ~0x00000fff
-.b32 ctx_src_xsize                  ~0x0007ffff
-.b32 ctx_src_ysize                  ~0x00001fff
-.b32 ctx_src_zsize                  ~0x000007ff
-.b32 ctx_src_zoff                   ~0x00000fff
-.b32 ctx_src_xoff                   ~0x0007ffff
-.b32 ctx_src_yoff                   ~0x00001fff
+.b32 #ctx_src_tile_mode              ~0x00000fff
+.b32 #ctx_src_xsize                  ~0x0007ffff
+.b32 #ctx_src_ysize                  ~0x00001fff
+.b32 #ctx_src_zsize                  ~0x000007ff
+.b32 #ctx_src_zoff                   ~0x00000fff
+.b32 #ctx_src_xoff                   ~0x0007ffff
+.b32 #ctx_src_yoff                   ~0x00001fff
 // mthd 0x0220-0x0238, DST_TILE
 .b16 0x88 7
-.b32 ctx_dst_tile_mode              ~0x00000fff
-.b32 ctx_dst_xsize                  ~0x0007ffff
-.b32 ctx_dst_ysize                  ~0x00001fff
-.b32 ctx_dst_zsize                  ~0x000007ff
-.b32 ctx_dst_zoff                   ~0x00000fff
-.b32 ctx_dst_xoff                   ~0x0007ffff
-.b32 ctx_dst_yoff                   ~0x00001fff
+.b32 #ctx_dst_tile_mode              ~0x00000fff
+.b32 #ctx_dst_xsize                  ~0x0007ffff
+.b32 #ctx_dst_ysize                  ~0x00001fff
+.b32 #ctx_dst_zsize                  ~0x000007ff
+.b32 #ctx_dst_zoff                   ~0x00000fff
+.b32 #ctx_dst_xoff                   ~0x0007ffff
+.b32 #ctx_dst_yoff                   ~0x00001fff
 // mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH
 .b16 0xc0 2
-.b32 0x00010000 + cmd_exec          ~0xffffffff
-.b32 0x00010000 + cmd_wrcache_flush ~0xffffffff
+.b32 0x00010000 + #cmd_exec          ~0xffffffff
+.b32 0x00010000 + #cmd_wrcache_flush ~0xffffffff
 // mthd 0x030c-0x0340, various stuff
 .b16 0xc3 14
-.b32 ctx_src_address_high           ~0x000000ff
-.b32 ctx_src_address_low            ~0xfffffff0
-.b32 ctx_dst_address_high           ~0x000000ff
-.b32 ctx_dst_address_low            ~0xfffffff0
-.b32 ctx_src_pitch                  ~0x0007ffff
-.b32 ctx_dst_pitch                  ~0x0007ffff
-.b32 ctx_xcnt                       ~0x0000ffff
-.b32 ctx_ycnt                       ~0x00001fff
-.b32 ctx_format                     ~0x0333ffff
-.b32 ctx_swz_const0                 ~0xffffffff
-.b32 ctx_swz_const1                 ~0xffffffff
-.b32 ctx_query_address_high         ~0x000000ff
-.b32 ctx_query_address_low          ~0xffffffff
-.b32 ctx_query_counter              ~0xffffffff
+.b32 #ctx_src_address_high           ~0x000000ff
+.b32 #ctx_src_address_low            ~0xfffffff0
+.b32 #ctx_dst_address_high           ~0x000000ff
+.b32 #ctx_dst_address_low            ~0xfffffff0
+.b32 #ctx_src_pitch                  ~0x0007ffff
+.b32 #ctx_dst_pitch                  ~0x0007ffff
+.b32 #ctx_xcnt                       ~0x0000ffff
+.b32 #ctx_ycnt                       ~0x00001fff
+.b32 #ctx_format                     ~0x0333ffff
+.b32 #ctx_swz_const0                 ~0xffffffff
+.b32 #ctx_swz_const1                 ~0xffffffff
+.b32 #ctx_query_address_high         ~0x000000ff
+.b32 #ctx_query_address_low          ~0xffffffff
+.b32 #ctx_query_counter              ~0xffffffff
 .b16 0x800 0
 
 ifdef(`NVA3',
-.section nva3_pcopy_code,
-.section nvc0_pcopy_code
+.section #nva3_pcopy_code
+,
+.section #nvc0_pcopy_code
 )
 
 main:
@@ -143,12 +145,12 @@ main:
    mov $sp $r0
 
    // setup i0 handler and route fifo and ctxswitch to it
-   mov $r1 ih
+   mov $r1 #ih
    mov $iv0 $r1
    mov $r1 0x400
    movw $r2 0xfff3
    sethi $r2 0
-   iowr I[$r2 + 0x300] $r2
+   iowr I[$r1 + 0x300] $r2
 
    // enable interrupts
    or $r2 0xc
@@ -164,19 +166,19 @@ main:
    bset $flags $p0
    spin:
       sleep $p0
-      bra spin
+      bra #spin
 
 // i0 handler
 ih:
    iord $r1 I[$r0 + 0x200]
 
    and $r2 $r1 0x00000008
-   bra e ih_no_chsw
-      call chsw
+   bra e #ih_no_chsw
+      call #chsw
    ih_no_chsw:
    and $r2 $r1 0x00000004
-   bra e ih_no_cmd
-      call dispatch
+   bra e #ih_no_cmd
+      call #dispatch
 
    ih_no_cmd:
    and $r1 $r1 0x0000000c
@@ -235,9 +237,9 @@ ifdef(`NVA3', `
    sethi $r4 0x60000
 
    // swap!
-   bra $p1 swctx_load
+   bra $p1 #swctx_load
       xdst $r0 $r4
-      bra swctx_done
+      bra #swctx_done
    swctx_load:
       xdld $r0 $r4
    swctx_done:
@@ -251,9 +253,9 @@ chsw:
 
    // if it's active, unload it and return
    xbit $r15 $r3 0x1e
-   bra e chsw_no_unload
+   bra e #chsw_no_unload
       bclr $flags $p1
-      call swctx
+      call #swctx
       bclr $r3 0x1e
       iowr I[$r2] $r3
       mov $r4 1
@@ -266,20 +268,20 @@ chsw:
 
    // is there a channel waiting to be loaded?
    xbit $r13 $r3 0x1e
-   bra e chsw_finish_load
+   bra e #chsw_finish_load
       bset $flags $p1
-      call swctx
+      call #swctx
 ifdef(`NVA3',
       // load dma objects back into TARGET regs
-      mov $r5 ctx_dma
-      mov $r6 ctx_dma_count
+      mov $r5 #ctx_dma
+      mov $r6 #ctx_dma_count
       chsw_load_ctx_dma:
          ld b32 $r7 D[$r5 + $r6 * 4]
          add b32 $r8 $r6 0x180
          shl b32 $r8 8
          iowr I[$r8] $r7
          sub b32 $r6 1
-         bra nc chsw_load_ctx_dma
+         bra nc #chsw_load_ctx_dma
 ,)
 
    chsw_finish_load:
@@ -297,7 +299,7 @@ dispatch:
    shl b32 $r2 0x10
 
    // lookup method in the dispatch table, ILLEGAL_MTHD if not found
-   mov $r5 dispatch_table
+   mov $r5 #dispatch_table
    clear b32 $r6
    clear b32 $r7
    dispatch_loop:
@@ -305,14 +307,14 @@ dispatch:
       ld b16 $r7 D[$r5 + 2]
       add b32 $r5 4
       cmpu b32 $r4 $r6
-      bra c dispatch_illegal_mthd
+      bra c #dispatch_illegal_mthd
       add b32 $r7 $r6
       cmpu b32 $r4 $r7
-      bra c dispatch_valid_mthd
+      bra c #dispatch_valid_mthd
       sub b32 $r7 $r6
       shl b32 $r7 3
       add b32 $r5 $r7
-      bra dispatch_loop
+      bra #dispatch_loop
 
    // ensure no bits set in reserved fields, INVALID_BITFIELD
    dispatch_valid_mthd:
@@ -322,20 +324,20 @@ dispatch:
    ld b32 $r5 D[$r4 + 4]
    and $r5 $r3
    cmpu b32 $r5 0
-   bra ne dispatch_invalid_bitfield
+   bra ne #dispatch_invalid_bitfield
 
    // depending on dispatch flags: execute method, or save data as state
    ld b16 $r5 D[$r4 + 0]
    ld b16 $r6 D[$r4 + 2]
    cmpu b32 $r6 0
-   bra ne dispatch_cmd
+   bra ne #dispatch_cmd
       st b32 D[$r5] $r3
-      bra dispatch_done
+      bra #dispatch_done
    dispatch_cmd:
       bclr $flags $p1
       call $r5
-      bra $p1 dispatch_error
-      bra dispatch_done
+      bra $p1 #dispatch_error
+      bra #dispatch_done
 
    dispatch_invalid_bitfield:
    or $r2 2
@@ -353,7 +355,7 @@ dispatch:
       iord $r2 I[$r0 + 0x200]
       and $r2 0x40
       cmpu b32 $r2 0
-      bra ne hostirq_wait
+      bra ne #hostirq_wait
 
    dispatch_done:
    mov $r2 0x1d00
@@ -409,10 +411,10 @@ ifdef(`NVA3',
 //       $r2: hostirq state
 //       $r3: data
 cmd_dma:
-   sub b32 $r4 dispatch_dma
+   sub b32 $r4 #dispatch_dma
    shr b32 $r4 1
    bset $r3 0x1e
-   st b32 D[$r4 + ctx_dma] $r3
+   st b32 D[$r4 + #ctx_dma] $r3
    add b32 $r4 0x600
    shl b32 $r4 6
    iowr I[$r4] $r3
@@ -430,7 +432,7 @@ cmd_exec_set_format:
    st b32 D[$sp + 0x0c] $r0
 
    // extract cpp, src_ncomp and dst_ncomp from FORMAT
-   ld b32 $r4 D[$r0 + ctx_format]
+   ld b32 $r4 D[$r0 + #ctx_format]
    extr $r5 $r4 16:17
    add b32 $r5 1
    extr $r6 $r4 20:21
@@ -448,22 +450,22 @@ cmd_exec_set_format:
       clear b32 $r11
       bpc_loop:
          cmpu b8 $r10 4
-         bra nc cmp_c0
+         bra nc #cmp_c0
             mulu $r12 $r10 $r5
             add b32 $r12 $r11
             bset $flags $p2
-            bra bpc_next
+            bra #bpc_next
          cmp_c0:
-         bra ne cmp_c1
+         bra ne #cmp_c1
             mov $r12 0x10
             add b32 $r12 $r11
-            bra bpc_next
+            bra #bpc_next
          cmp_c1:
          cmpu b8 $r10 6
-         bra nc cmp_zero
+         bra nc #cmp_zero
             mov $r12 0x14
             add b32 $r12 $r11
-            bra bpc_next
+            bra #bpc_next
          cmp_zero:
             mov $r12 0x80
          bpc_next:
@@ -471,22 +473,22 @@ cmd_exec_set_format:
          add b32 $r8 1
          add b32 $r11 1
          cmpu b32 $r11 $r5
-         bra c bpc_loop
+         bra c #bpc_loop
       add b32 $r9 1
       cmpu b32 $r9 $r7
-      bra c ncomp_loop
+      bra c #ncomp_loop
 
    // SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang)
    mulu $r6 $r5
-   st b32 D[$r0 + ctx_src_cpp] $r6
-   ld b32 $r8 D[$r0 + ctx_xcnt]
+   st b32 D[$r0 + #ctx_src_cpp] $r6
+   ld b32 $r8 D[$r0 + #ctx_xcnt]
    mulu $r6 $r8
-   bra $p2 dst_xcnt
+   bra $p2 #dst_xcnt
    clear b32 $r6
 
    dst_xcnt:
    mulu $r7 $r5
-   st b32 D[$r0 + ctx_dst_cpp] $r7
+   st b32 D[$r0 + #ctx_dst_cpp] $r7
    mulu $r7 $r8
 
    mov $r5 0x810
@@ -494,10 +496,10 @@ cmd_exec_set_format:
    iowr I[$r5 + 0x000] $r6
    iowr I[$r5 + 0x100] $r7
    add b32 $r5 0x800
-   ld b32 $r6 D[$r0 + ctx_dst_cpp]
+   ld b32 $r6 D[$r0 + #ctx_dst_cpp]
    sub b32 $r6 1
    shl b32 $r6 8
-   ld b32 $r7 D[$r0 + ctx_src_cpp]
+   ld b32 $r7 D[$r0 + #ctx_src_cpp]
    sub b32 $r7 1
    or $r6 $r7
    iowr I[$r5 + 0x000] $r6
@@ -511,9 +513,9 @@ cmd_exec_set_format:
    ld b32 $r6 D[$sp + 0x0c]
    iowr I[$r5 + 0x300] $r6
    add b32 $r5 0x400
-   ld b32 $r6 D[$r0 + ctx_swz_const0]
+   ld b32 $r6 D[$r0 + #ctx_swz_const0]
    iowr I[$r5 + 0x000] $r6
-   ld b32 $r6 D[$r0 + ctx_swz_const1]
+   ld b32 $r6 D[$r0 + #ctx_swz_const1]
    iowr I[$r5 + 0x100] $r6
    add $sp 0x10
    ret
@@ -543,7 +545,7 @@ cmd_exec_set_format:
 //
 cmd_exec_set_surface_tiled:
    // translate TILE_MODE into Tp, Th, Td shift values
-   ld b32 $r7 D[$r5 + ctx_src_tile_mode]
+   ld b32 $r7 D[$r5 + #ctx_src_tile_mode]
    extr $r9 $r7 8:11
    extr $r8 $r7 4:7
 ifdef(`NVA3',
@@ -553,9 +555,9 @@ ifdef(`NVA3',
 )
    extr $r7 $r7 0:3
    cmp b32 $r7 0xe
-   bra ne xtile64
+   bra ne #xtile64
    mov $r7 4
-   bra xtileok
+   bra #xtileok
    xtile64:
    xbit $r7 $flags $p2
    add b32 $r7 17
@@ -565,8 +567,8 @@ ifdef(`NVA3',
 
    // Op = (x * cpp) & ((1 << Tp) - 1)
    // Tx = (x * cpp) >> Tp
-   ld b32 $r10 D[$r5 + ctx_src_xoff]
-   ld b32 $r11 D[$r5 + ctx_src_cpp]
+   ld b32 $r10 D[$r5 + #ctx_src_xoff]
+   ld b32 $r11 D[$r5 + #ctx_src_cpp]
    mulu $r10 $r11
    mov $r11 1
    shl b32 $r11 $r7
@@ -576,7 +578,7 @@ ifdef(`NVA3',
 
    // Tyo = y & ((1 << Th) - 1)
    // Ty  = y >> Th
-   ld b32 $r13 D[$r5 + ctx_src_yoff]
+   ld b32 $r13 D[$r5 + #ctx_src_yoff]
    mov $r14 1
    shl b32 $r14 $r8
    sub b32 $r14 1
@@ -598,8 +600,8 @@ ifdef(`NVA3',
    add b32 $r12 $r11
 
    // nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp)
-   ld b32 $r15 D[$r5 + ctx_src_xsize]
-   ld b32 $r11 D[$r5 + ctx_src_cpp]
+   ld b32 $r15 D[$r5 + #ctx_src_xsize]
+   ld b32 $r11 D[$r5 + #ctx_src_cpp]
    mulu $r15 $r11
    mov $r11 1
    shl b32 $r11 $r7
@@ -609,7 +611,7 @@ ifdef(`NVA3',
    push $r15
 
    // nTy = (h + ((1 << Th) - 1)) >> Th
-   ld b32 $r15 D[$r5 + ctx_src_ysize]
+   ld b32 $r15 D[$r5 + #ctx_src_ysize]
    mov $r11 1
    shl b32 $r11 $r8
    sub b32 $r11 1
@@ -629,7 +631,7 @@ ifdef(`NVA3',
    // Tz  = z >> Td
    // Op += Tzo << Tys
    // Ts  = Tys + Td
-   ld b32 $r8 D[$r5 + ctx_src_zoff]
+   ld b32 $r8 D[$r5 + #ctx_src_zoff]
    mov $r14 1
    shl b32 $r14 $r9
    sub b32 $r14 1
@@ -656,8 +658,8 @@ ifdef(`NVA3',
 
    // SRC_ADDRESS_LOW   = (Ot + Op) & 0xffffffff
    // CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16
-   ld b32 $r7 D[$r5 + ctx_src_address_low]
-   ld b32 $r8 D[$r5 + ctx_src_address_high]
+   ld b32 $r7 D[$r5 + #ctx_src_address_low]
+   ld b32 $r8 D[$r5 + #ctx_src_address_high]
    add b32 $r10 $r12
    add b32 $r7 $r10
    adc b32 $r8 0
@@ -677,14 +679,14 @@ cmd_exec_set_surface_linear:
    xbit $r6 $flags $p2
    add b32 $r6 0x202
    shl b32 $r6 8
-   ld b32 $r7 D[$r5 + ctx_src_address_low]
+   ld b32 $r7 D[$r5 + #ctx_src_address_low]
    iowr I[$r6 + 0x000] $r7
    add b32 $r6 0x400
-   ld b32 $r7 D[$r5 + ctx_src_address_high]
+   ld b32 $r7 D[$r5 + #ctx_src_address_high]
    shl b32 $r7 16
    iowr I[$r6 + 0x000] $r7
    add b32 $r6 0x400
-   ld b32 $r7 D[$r5 + ctx_src_pitch]
+   ld b32 $r7 D[$r5 + #ctx_src_pitch]
    iowr I[$r6 + 0x000] $r7
    ret
 
@@ -697,7 +699,7 @@ cmd_exec_wait:
    loop:
       iord $r1 I[$r0]
       and $r1 1
-      bra ne loop
+      bra ne #loop
    pop $r1
    pop $r0
    ret
@@ -705,18 +707,18 @@ cmd_exec_wait:
 cmd_exec_query:
    // if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI }
    xbit $r4 $r3 13
-   bra ne query_counter
-      call cmd_exec_wait
+   bra ne #query_counter
+      call #cmd_exec_wait
       mov $r4 0x80c
       shl b32 $r4 6
-      ld b32 $r5 D[$r0 + ctx_query_address_low]
+      ld b32 $r5 D[$r0 + #ctx_query_address_low]
       add b32 $r5 4
       iowr I[$r4 + 0x000] $r5
       iowr I[$r4 + 0x100] $r0
       mov $r5 0xc
       iowr I[$r4 + 0x200] $r5
       add b32 $r4 0x400
-      ld b32 $r5 D[$r0 + ctx_query_address_high]
+      ld b32 $r5 D[$r0 + #ctx_query_address_high]
       shl b32 $r5 16
       iowr I[$r4 + 0x000] $r5
       add b32 $r4 0x500
@@ -741,16 +743,16 @@ cmd_exec_query:
 
    // write COUNTER
    query_counter:
-   call cmd_exec_wait
+   call #cmd_exec_wait
    mov $r4 0x80c
    shl b32 $r4 6
-   ld b32 $r5 D[$r0 + ctx_query_address_low]
+   ld b32 $r5 D[$r0 + #ctx_query_address_low]
    iowr I[$r4 + 0x000] $r5
    iowr I[$r4 + 0x100] $r0
    mov $r5 0x4
    iowr I[$r4 + 0x200] $r5
    add b32 $r4 0x400
-   ld b32 $r5 D[$r0 + ctx_query_address_high]
+   ld b32 $r5 D[$r0 + #ctx_query_address_high]
    shl b32 $r5 16
    iowr I[$r4 + 0x000] $r5
    add b32 $r4 0x500
@@ -759,7 +761,7 @@ cmd_exec_query:
    mov $r5 0x00001110
    sethi $r5 0x13120000
    iowr I[$r4 + 0x100] $r5
-   ld b32 $r5 D[$r0 + ctx_query_counter]
+   ld b32 $r5 D[$r0 + #ctx_query_counter]
    add b32 $r4 0x500
    iowr I[$r4 + 0x000] $r5
    mov $r5 0x00002601
@@ -787,22 +789,22 @@ cmd_exec_query:
 //       $r2: hostirq state
 //       $r3: data
 cmd_exec:
-   call cmd_exec_wait
+   call #cmd_exec_wait
 
    // if format requested, call function to calculate it, otherwise
    // fill in cpp/xcnt for both surfaces as if (cpp == 1)
    xbit $r15 $r3 0
-   bra e cmd_exec_no_format
-      call cmd_exec_set_format
+   bra e #cmd_exec_no_format
+      call #cmd_exec_set_format
       mov $r4 0x200
-      bra cmd_exec_init_src_surface
+      bra #cmd_exec_init_src_surface
    cmd_exec_no_format:
       mov $r6 0x810
       shl b32 $r6 6
       mov $r7 1
-      st b32 D[$r0 + ctx_src_cpp] $r7
-      st b32 D[$r0 + ctx_dst_cpp] $r7
-      ld b32 $r7 D[$r0 + ctx_xcnt]
+      st b32 D[$r0 + #ctx_src_cpp] $r7
+      st b32 D[$r0 + #ctx_dst_cpp] $r7
+      ld b32 $r7 D[$r0 + #ctx_xcnt]
       iowr I[$r6 + 0x000] $r7
       iowr I[$r6 + 0x100] $r7
       clear b32 $r4
@@ -811,28 +813,28 @@ cmd_exec:
    bclr $flags $p2
    clear b32 $r5
    xbit $r15 $r3 4
-   bra e src_tiled
-      call cmd_exec_set_surface_linear
-      bra cmd_exec_init_dst_surface
+   bra e #src_tiled
+      call #cmd_exec_set_surface_linear
+      bra #cmd_exec_init_dst_surface
    src_tiled:
-      call cmd_exec_set_surface_tiled
+      call #cmd_exec_set_surface_tiled
       bset $r4 7
 
    cmd_exec_init_dst_surface:
    bset $flags $p2
-   mov $r5 ctx_dst_address_high - ctx_src_address_high
+   mov $r5 #ctx_dst_address_high - #ctx_src_address_high
    xbit $r15 $r3 8
-   bra e dst_tiled
-      call cmd_exec_set_surface_linear
-      bra cmd_exec_kick
+   bra e #dst_tiled
+      call #cmd_exec_set_surface_linear
+      bra #cmd_exec_kick
    dst_tiled:
-      call cmd_exec_set_surface_tiled
+      call #cmd_exec_set_surface_tiled
       bset $r4 8
 
    cmd_exec_kick:
    mov $r5 0x800
    shl b32 $r5 6
-   ld b32 $r6 D[$r0 + ctx_ycnt]
+   ld b32 $r6 D[$r0 + #ctx_ycnt]
    iowr I[$r5 + 0x100] $r6
    mov $r6 0x0041
    // SRC_TARGET = 1, DST_TARGET = 2
@@ -842,8 +844,8 @@ cmd_exec:
 
    // if requested, queue up a QUERY write after the copy has completed
    xbit $r15 $r3 12
-   bra e cmd_exec_done
-      call cmd_exec_query
+   bra e #cmd_exec_done
+      call #cmd_exec_query
 
    cmd_exec_done:
    ret
index 2731de22ebe92c7b911fe960a276892134cb152a..1f33fbdc00be5801385ac51cb3a592f23a3e180b 100644 (file)
@@ -152,7 +152,7 @@ uint32_t nva3_pcopy_code[] = {
        0xf10010fe,
        0xf1040017,
        0xf0fff327,
-       0x22d00023,
+       0x12d00023,
        0x0c25f0c0,
        0xf40012d0,
        0x17f11031,
index 618c144b7a30e23fcd41e0762cbdaa4d48febfcc..9e636e6ef6d753c52be9f79de0260f23c4f343d1 100644 (file)
@@ -287,12 +287,13 @@ nva3_pm_grcp_idle(void *data)
        return false;
 }
 
-void
+int
 nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nva3_pm_state *info = pre_state;
        unsigned long flags;
+       int ret = -EAGAIN;
 
        /* prevent any new grctx switches from starting */
        spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
@@ -328,6 +329,8 @@ nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
                nv_wr32(dev, 0x100210, 0x80000000);
        }
 
+       ret = 0;
+
 cleanup:
        /* unfreeze PFIFO */
        nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
@@ -339,4 +342,5 @@ cleanup:
                nv_mask(dev, 0x400824, 0x10000000, 0x10000000);
        spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
        kfree(info);
+       return ret;
 }
index 419903880e9d36f5627b731a2ec046b5f788d1cc..a8d17458ced1db88353cd1e0d4756d9b80535f3f 100644 (file)
@@ -145,7 +145,7 @@ uint32_t nvc0_pcopy_code[] = {
        0xf10010fe,
        0xf1040017,
        0xf0fff327,
-       0x22d00023,
+       0x12d00023,
        0x0c25f0c0,
        0xf40012d0,
        0x17f11031,
index ecfafd70cf0ed2b6f9ee0b51c74031caa337de27..8ee3963f903001a45e5061bfbc81a3efc3663c5f 100644 (file)
@@ -875,14 +875,16 @@ nvc0_graph_create(struct drm_device *dev)
        case 0xcf: /* 4/0/0/0, 3 */
                priv->magic_not_rop_nr = 0x03;
                break;
+       case 0xd9: /* 1/0/0/0, 1 */
+               priv->magic_not_rop_nr = 0x01;
+               break;
        }
 
        if (!priv->magic_not_rop_nr) {
                NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
                         priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
                         priv->tp_nr[3], priv->rop_nr);
-               /* use 0xc3's values... */
-               priv->magic_not_rop_nr = 0x03;
+               priv->magic_not_rop_nr = 0x00;
        }
 
        NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
index 2a4b6dc8f9dee2317400de1d8dcf5a48db657c6f..e6b228844a32c0e742806d95b06656bd9dc72536 100644 (file)
@@ -71,9 +71,9 @@ queue_put:
        ld b32 $r9 D[$r13 + 0x4]        // PUT
        xor $r8 8
        cmpu b32 $r8 $r9
-       bra ne queue_put_next
+       bra ne #queue_put_next
                mov $r15 E_CMD_OVERFLOW
-               call error
+               call #error
                ret
 
        // store cmd/data on queue
@@ -104,7 +104,7 @@ queue_get:
        ld b32 $r8 D[$r13 + 0x0]        // GET
        ld b32 $r9 D[$r13 + 0x4]        // PUT
        cmpu b32 $r8 $r9
-       bra e queue_get_done
+       bra e #queue_get_done
                // fetch first cmd/data pair
                and $r9 $r8 7
                shl b32 $r9 3
@@ -135,9 +135,9 @@ nv_rd32:
        nv_rd32_wait:
                iord $r12 I[$r11 + 0x000]
                xbit $r12 $r12 31
-               bra ne nv_rd32_wait
+               bra ne #nv_rd32_wait
        mov $r10 6                      // DONE_MMIO_RD
-       call wait_doneo
+       call #wait_doneo
        iord $r15 I[$r11 + 0x100]       // MMIO_RDVAL
        ret
 
@@ -157,7 +157,7 @@ nv_wr32:
        nv_wr32_wait:
                iord $r12 I[$r11 + 0x000]
                xbit $r12 $r12 31
-               bra ne nv_wr32_wait
+               bra ne #nv_wr32_wait
        ret
 
 // (re)set watchdog timer
@@ -193,7 +193,7 @@ $1:
                shl b32 $r8 6
                iord $r8 I[$r8 + 0x000] // DONE
                xbit $r8 $r8 $r10
-               bra $2 wait_done_$1
+               bra $2 #wait_done_$1
        trace_clr(T_WAIT)
        ret
 ')
@@ -216,7 +216,7 @@ mmctx_size:
                add b32 $r9 $r8
                add b32 $r14 4
                cmpu b32 $r14 $r15
-               bra ne nv_mmctx_size_loop
+               bra ne #nv_mmctx_size_loop
        mov b32 $r15 $r9
        ret
 
@@ -238,12 +238,12 @@ mmctx_xfer:
        shl b32 $r8 6
        clear b32 $r9
        or $r11 $r11
-       bra e mmctx_base_disabled
+       bra e #mmctx_base_disabled
                iowr I[$r8 + 0x000] $r11        // MMCTX_BASE
                bset $r9 0                      // BASE_EN
        mmctx_base_disabled:
        or $r14 $r14
-       bra e mmctx_multi_disabled
+       bra e #mmctx_multi_disabled
                iowr I[$r8 + 0x200] $r14        // MMCTX_MULTI_STRIDE
                iowr I[$r8 + 0x300] $r15        // MMCTX_MULTI_MASK
                bset $r9 1                      // MULTI_EN
@@ -264,7 +264,7 @@ mmctx_xfer:
                mmctx_wait_free:
                        iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
                        and $r14 0x1f
-                       bra e mmctx_wait_free
+                       bra e #mmctx_wait_free
 
                // queue up an entry
                ld b32 $r14 D[$r12]
@@ -272,19 +272,19 @@ mmctx_xfer:
                iowr I[$r8 + 0x300] $r14
                add b32 $r12 4
                cmpu b32 $r12 $r13
-               bra ne mmctx_exec_loop
+               bra ne #mmctx_exec_loop
 
        xbit $r11 $r10 2
-       bra ne mmctx_stop
+       bra ne #mmctx_stop
                // wait for queue to empty
                mmctx_fini_wait:
                        iord $r11 I[$r8 + 0x000]        // MMCTX_CTRL
                        and $r11 0x1f
                        cmpu b32 $r11 0x10
-                       bra ne mmctx_fini_wait
+                       bra ne #mmctx_fini_wait
                mov $r10 2                              // DONE_MMCTX
-               call wait_donez
-               bra mmctx_done
+               call #wait_donez
+               bra #mmctx_done
        mmctx_stop:
                xbit $r11 $r10 0
                shl b32 $r11 16                 // DIR
@@ -295,7 +295,7 @@ mmctx_xfer:
                        // wait for STOP_TRIGGER to clear
                        iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
                        xbit $r11 $r11 18
-                       bra ne mmctx_stop_wait
+                       bra ne #mmctx_stop_wait
        mmctx_done:
        trace_clr(T_MMCTX)
        ret
@@ -305,7 +305,7 @@ mmctx_xfer:
 strand_wait:
        push $r10
        mov $r10 2
-       call wait_donez
+       call #wait_donez
        pop $r10
        ret
 
@@ -316,7 +316,7 @@ strand_pre:
        sethi $r8 0x20000
        mov $r9 0xc
        iowr I[$r8] $r9
-       call strand_wait
+       call #strand_wait
        ret
 
 // unknown - call after issuing strand commands
@@ -326,7 +326,7 @@ strand_post:
        sethi $r8 0x20000
        mov $r9 0xd
        iowr I[$r8] $r9
-       call strand_wait
+       call #strand_wait
        ret
 
 // Selects strand set?!
@@ -341,11 +341,11 @@ strand_set:
        iowr I[$r10 + 0x000] $r12               // 0x93c = 0xf
        mov $r12 0xb
        iowr I[$r11 + 0x000] $r12               // 0x928 = 0xb
-       call strand_wait
+       call #strand_wait
        iowr I[$r10 + 0x000] $r14               // 0x93c = <id>
        mov $r12 0xa
        iowr I[$r11 + 0x000] $r12               // 0x928 = 0xa
-       call strand_wait
+       call #strand_wait
        ret
 
 // Initialise strand context data
@@ -357,22 +357,22 @@ strand_set:
 //
 strand_ctx_init:
        trace_set(T_STRINIT)
-       call strand_pre
+       call #strand_pre
        mov $r14 3
-       call strand_set
+       call #strand_set
        mov $r10 0x46fc
        sethi $r10 0x20000
        add b32 $r11 $r10 0x400
        iowr I[$r10 + 0x100] $r0        // STRAND_FIRST_GENE = 0
        mov $r12 1
        iowr I[$r11 + 0x000] $r12       // STRAND_CMD = LATCH_FIRST_GENE
-       call strand_wait
+       call #strand_wait
        sub b32 $r12 $r0 1
        iowr I[$r10 + 0x000] $r12       // STRAND_GENE_CNT = 0xffffffff
        mov $r12 2
        iowr I[$r11 + 0x000] $r12       // STRAND_CMD = LATCH_GENE_CNT
-       call strand_wait
-       call strand_post
+       call #strand_wait
+       call #strand_post
 
        // read the size of each strand, poke the context offset of
        // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
@@ -391,7 +391,7 @@ strand_ctx_init:
                add b32 $r14 $r10
                add b32 $r8 4
                sub b32 $r9 1
-               bra ne ctx_init_strand_loop
+               bra ne #ctx_init_strand_loop
 
        shl b32 $r14 8
        sub b32 $r15 $r14 $r15
index 636fe9812f7966e33f66608e724f0f15cb31c00b..91d44ea662d9178d8e0bd4e4ab00b6a8aa8d270b 100644 (file)
@@ -87,6 +87,7 @@ nvc0_graph_class(struct drm_device *dev)
        case 0xc1:
                return 0x9197;
        case 0xc8:
+       case 0xd9:
                return 0x9297;
        default:
                return 0;
index 96b0b93d94ca8b11466145ddd2c2ce050eee5ffb..de77842b31c09df7313b6c38f6d4c54f91ffa997 100644 (file)
@@ -1268,6 +1268,17 @@ nvc0_grctx_generate_9039(struct drm_device *dev)
 static void
 nvc0_grctx_generate_90c0(struct drm_device *dev)
 {
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       int i;
+
+       for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
+               nv_mthd(dev, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
+               nv_mthd(dev, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
+               nv_mthd(dev, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
+               nv_mthd(dev, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
+               nv_mthd(dev, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
+               nv_mthd(dev, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
+       }
        nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
        nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
        nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
@@ -1276,6 +1287,12 @@ nvc0_grctx_generate_90c0(struct drm_device *dev)
        nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
        nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
        nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
+       for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
+               nv_mthd(dev, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
+               nv_mthd(dev, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
+               nv_mthd(dev, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
+               nv_mthd(dev, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
+       }
        nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
        nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
        nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
@@ -1471,14 +1488,20 @@ nvc0_grctx_generate_shaders(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 
-       if (dev_priv->chipset != 0xc1) {
-               nv_wr32(dev, 0x405800, 0x078000bf);
-               nv_wr32(dev, 0x405830, 0x02180000);
-       } else {
+       if (dev_priv->chipset == 0xd9) {
                nv_wr32(dev, 0x405800, 0x0f8000bf);
                nv_wr32(dev, 0x405830, 0x02180218);
+               nv_wr32(dev, 0x405834, 0x08000000);
+       } else
+       if (dev_priv->chipset == 0xc1) {
+               nv_wr32(dev, 0x405800, 0x0f8000bf);
+               nv_wr32(dev, 0x405830, 0x02180218);
+               nv_wr32(dev, 0x405834, 0x00000000);
+       } else {
+               nv_wr32(dev, 0x405800, 0x078000bf);
+               nv_wr32(dev, 0x405830, 0x02180000);
+               nv_wr32(dev, 0x405834, 0x00000000);
        }
-       nv_wr32(dev, 0x405834, 0x00000000);
        nv_wr32(dev, 0x405838, 0x00000000);
        nv_wr32(dev, 0x405854, 0x00000000);
        nv_wr32(dev, 0x405870, 0x00000001);
@@ -1509,7 +1532,10 @@ nvc0_grctx_generate_unk64xx(struct drm_device *dev)
        nv_wr32(dev, 0x4064ac, 0x00003fff);
        nv_wr32(dev, 0x4064b4, 0x00000000);
        nv_wr32(dev, 0x4064b8, 0x00000000);
-       if (dev_priv->chipset == 0xc1) {
+       if (dev_priv->chipset == 0xd9)
+               nv_wr32(dev, 0x4064bc, 0x00000000);
+       if (dev_priv->chipset == 0xc1 ||
+           dev_priv->chipset == 0xd9) {
                nv_wr32(dev, 0x4064c0, 0x80140078);
                nv_wr32(dev, 0x4064c4, 0x0086ffff);
        }
@@ -1550,10 +1576,23 @@ nvc0_grctx_generate_rop(struct drm_device *dev)
        /* ROPC_BROADCAST */
        nv_wr32(dev, 0x408800, 0x02802a3c);
        nv_wr32(dev, 0x408804, 0x00000040);
-       nv_wr32(dev, 0x408808, chipset != 0xc1 ? 0x0003e00d : 0x1003e005);
-       nv_wr32(dev, 0x408900, 0x3080b801);
-       nv_wr32(dev, 0x408904, chipset != 0xc1 ? 0x02000001 : 0x62000001);
-       nv_wr32(dev, 0x408908, 0x00c80929);
+       if (chipset == 0xd9) {
+               nv_wr32(dev, 0x408808, 0x1043e005);
+               nv_wr32(dev, 0x408900, 0x3080b801);
+               nv_wr32(dev, 0x408904, 0x1043e005);
+               nv_wr32(dev, 0x408908, 0x00c8102f);
+       } else
+       if (chipset == 0xc1) {
+               nv_wr32(dev, 0x408808, 0x1003e005);
+               nv_wr32(dev, 0x408900, 0x3080b801);
+               nv_wr32(dev, 0x408904, 0x62000001);
+               nv_wr32(dev, 0x408908, 0x00c80929);
+       } else {
+               nv_wr32(dev, 0x408808, 0x0003e00d);
+               nv_wr32(dev, 0x408900, 0x3080b801);
+               nv_wr32(dev, 0x408904, 0x02000001);
+               nv_wr32(dev, 0x408908, 0x00c80929);
+       }
        nv_wr32(dev, 0x40890c, 0x00000000);
        nv_wr32(dev, 0x408980, 0x0000011d);
 }
@@ -1572,7 +1611,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
        nv_wr32(dev, 0x418408, 0x00000000);
        nv_wr32(dev, 0x41840c, 0x00001008);
        nv_wr32(dev, 0x418410, 0x0fff0fff);
-       nv_wr32(dev, 0x418414, 0x00200fff);
+       nv_wr32(dev, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
        nv_wr32(dev, 0x418450, 0x00000000);
        nv_wr32(dev, 0x418454, 0x00000000);
        nv_wr32(dev, 0x418458, 0x00000000);
@@ -1587,14 +1626,17 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
        nv_wr32(dev, 0x418700, 0x00000002);
        nv_wr32(dev, 0x418704, 0x00000080);
        nv_wr32(dev, 0x418708, 0x00000000);
-       nv_wr32(dev, 0x41870c, 0x07c80000);
+       nv_wr32(dev, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
        nv_wr32(dev, 0x418710, 0x00000000);
-       nv_wr32(dev, 0x418800, 0x0006860a);
+       nv_wr32(dev, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
        nv_wr32(dev, 0x418808, 0x00000000);
        nv_wr32(dev, 0x41880c, 0x00000000);
        nv_wr32(dev, 0x418810, 0x00000000);
        nv_wr32(dev, 0x418828, 0x00008442);
-       nv_wr32(dev, 0x418830, chipset != 0xc1 ? 0x00000001 : 0x10000001);
+       if (chipset == 0xc1 || chipset == 0xd9)
+               nv_wr32(dev, 0x418830, 0x10000001);
+       else
+               nv_wr32(dev, 0x418830, 0x00000001);
        nv_wr32(dev, 0x4188d8, 0x00000008);
        nv_wr32(dev, 0x4188e0, 0x01000000);
        nv_wr32(dev, 0x4188e8, 0x00000000);
@@ -1602,7 +1644,12 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
        nv_wr32(dev, 0x4188f0, 0x00000000);
        nv_wr32(dev, 0x4188f4, 0x00000000);
        nv_wr32(dev, 0x4188f8, 0x00000000);
-       nv_wr32(dev, 0x4188fc, chipset != 0xc1 ? 0x00100000 : 0x00100018);
+       if (chipset == 0xd9)
+               nv_wr32(dev, 0x4188fc, 0x20100008);
+       else if (chipset == 0xc1)
+               nv_wr32(dev, 0x4188fc, 0x00100018);
+       else
+               nv_wr32(dev, 0x4188fc, 0x00100000);
        nv_wr32(dev, 0x41891c, 0x00ff00ff);
        nv_wr32(dev, 0x418924, 0x00000000);
        nv_wr32(dev, 0x418928, 0x00ffff00);
@@ -1616,7 +1663,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
                nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
                nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
        }
-       nv_wr32(dev, 0x418b00, 0x00000000);
+       nv_wr32(dev, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
        nv_wr32(dev, 0x418b08, 0x0a418820);
        nv_wr32(dev, 0x418b0c, 0x062080e6);
        nv_wr32(dev, 0x418b10, 0x020398a4);
@@ -1633,7 +1680,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
        nv_wr32(dev, 0x418c24, 0x00000000);
        nv_wr32(dev, 0x418c28, 0x00000000);
        nv_wr32(dev, 0x418c2c, 0x00000000);
-       if (chipset == 0xc1)
+       if (chipset == 0xc1 || chipset == 0xd9)
                nv_wr32(dev, 0x418c6c, 0x00000001);
        nv_wr32(dev, 0x418c80, 0x20200004);
        nv_wr32(dev, 0x418c8c, 0x00000001);
@@ -1653,7 +1700,10 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
        nv_wr32(dev, 0x419818, 0x00000000);
        nv_wr32(dev, 0x41983c, 0x00038bc7);
        nv_wr32(dev, 0x419848, 0x00000000);
-       nv_wr32(dev, 0x419864, chipset != 0xc1 ? 0x0000012a : 0x00000129);
+       if (chipset == 0xc1 || chipset == 0xd9)
+               nv_wr32(dev, 0x419864, 0x00000129);
+       else
+               nv_wr32(dev, 0x419864, 0x0000012a);
        nv_wr32(dev, 0x419888, 0x00000000);
        nv_wr32(dev, 0x419a00, 0x000001f0);
        nv_wr32(dev, 0x419a04, 0x00000001);
@@ -1663,7 +1713,9 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
        nv_wr32(dev, 0x419a14, 0x00000200);
        nv_wr32(dev, 0x419a1c, 0x00000000);
        nv_wr32(dev, 0x419a20, 0x00000800);
-       if (chipset != 0xc0 && chipset != 0xc8)
+       if (chipset == 0xd9)
+               nv_wr32(dev, 0x00419ac4, 0x0017f440);
+       else if (chipset != 0xc0 && chipset != 0xc8)
                nv_wr32(dev, 0x00419ac4, 0x0007f440);
        nv_wr32(dev, 0x419b00, 0x0a418820);
        nv_wr32(dev, 0x419b04, 0x062080e6);
@@ -1672,21 +1724,33 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
        nv_wr32(dev, 0x419b10, 0x0a418820);
        nv_wr32(dev, 0x419b14, 0x000000e6);
        nv_wr32(dev, 0x419bd0, 0x00900103);
-       nv_wr32(dev, 0x419be0, chipset != 0xc1 ? 0x00000001 : 0x00400001);
+       if (chipset == 0xc1 || chipset == 0xd9)
+               nv_wr32(dev, 0x419be0, 0x00400001);
+       else
+               nv_wr32(dev, 0x419be0, 0x00000001);
        nv_wr32(dev, 0x419be4, 0x00000000);
-       nv_wr32(dev, 0x419c00, 0x00000002);
+       nv_wr32(dev, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
        nv_wr32(dev, 0x419c04, 0x00000006);
        nv_wr32(dev, 0x419c08, 0x00000002);
        nv_wr32(dev, 0x419c20, 0x00000000);
-       if (chipset == 0xce || chipset == 0xcf)
+       if (dev_priv->chipset == 0xd9) {
+               nv_wr32(dev, 0x419c24, 0x00084210);
+               nv_wr32(dev, 0x419c28, 0x3cf3cf3c);
                nv_wr32(dev, 0x419cb0, 0x00020048);
-       else
+       } else
+       if (chipset == 0xce || chipset == 0xcf) {
+               nv_wr32(dev, 0x419cb0, 0x00020048);
+       } else {
                nv_wr32(dev, 0x419cb0, 0x00060048);
+       }
        nv_wr32(dev, 0x419ce8, 0x00000000);
        nv_wr32(dev, 0x419cf4, 0x00000183);
-       nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000);
+       if (chipset == 0xc1 || chipset == 0xd9)
+               nv_wr32(dev, 0x419d20, 0x12180000);
+       else
+               nv_wr32(dev, 0x419d20, 0x02180000);
        nv_wr32(dev, 0x419d24, 0x00001fff);
-       if (chipset == 0xc1)
+       if (chipset == 0xc1 || chipset == 0xd9)
                nv_wr32(dev, 0x419d44, 0x02180218);
        nv_wr32(dev, 0x419e04, 0x00000000);
        nv_wr32(dev, 0x419e08, 0x00000000);
@@ -1986,6 +2050,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
        nv_icmd(dev, 0x00000215, 0x00000040);
        nv_icmd(dev, 0x00000216, 0x00000040);
        nv_icmd(dev, 0x00000217, 0x00000040);
+       if (dev_priv->chipset == 0xd9) {
+               for (i = 0x0400; i <= 0x0417; i++)
+                       nv_icmd(dev, i, 0x00000040);
+       }
        nv_icmd(dev, 0x00000218, 0x0000c080);
        nv_icmd(dev, 0x00000219, 0x0000c080);
        nv_icmd(dev, 0x0000021a, 0x0000c080);
@@ -1994,6 +2062,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
        nv_icmd(dev, 0x0000021d, 0x0000c080);
        nv_icmd(dev, 0x0000021e, 0x0000c080);
        nv_icmd(dev, 0x0000021f, 0x0000c080);
+       if (dev_priv->chipset == 0xd9) {
+               for (i = 0x0440; i <= 0x0457; i++)
+                       nv_icmd(dev, i, 0x0000c080);
+       }
        nv_icmd(dev, 0x000000ad, 0x0000013e);
        nv_icmd(dev, 0x000000e1, 0x00000010);
        nv_icmd(dev, 0x00000290, 0x00000000);
@@ -2556,7 +2628,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
        nv_icmd(dev, 0x0000053f, 0xffff0000);
        nv_icmd(dev, 0x00000585, 0x0000003f);
        nv_icmd(dev, 0x00000576, 0x00000003);
-       if (dev_priv->chipset == 0xc1)
+       if (dev_priv->chipset == 0xc1 ||
+           dev_priv->chipset == 0xd9)
                nv_icmd(dev, 0x0000057b, 0x00000059);
        nv_icmd(dev, 0x00000586, 0x00000040);
        nv_icmd(dev, 0x00000582, 0x00000080);
@@ -2658,6 +2731,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
        nv_icmd(dev, 0x00000957, 0x00000003);
        nv_icmd(dev, 0x0000095e, 0x20164010);
        nv_icmd(dev, 0x0000095f, 0x00000020);
+       if (dev_priv->chipset == 0xd9)
+               nv_icmd(dev, 0x0000097d, 0x00000020);
        nv_icmd(dev, 0x00000683, 0x00000006);
        nv_icmd(dev, 0x00000685, 0x003fffff);
        nv_icmd(dev, 0x00000687, 0x00000c48);
index 06f5e26d1e0f5a2915134e172f889ff84b393561..15272be33b66431e4a2ee54ed2768bfc981b6ae9 100644 (file)
@@ -32,7 +32,7 @@
  * - watchdog timer around ctx operations
  */
 
-.section nvc0_grgpc_data
+.section #nvc0_grgpc_data
 include(`nvc0_graph.fuc')
 gpc_id:                        .b32 0
 gpc_mmio_list_head:    .b32 0
@@ -48,40 +48,45 @@ cmd_queue:          queue_init
 // chipset descriptions
 chipsets:
 .b8  0xc0 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc0_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc0_tpc_mmio_tail
 .b8  0xc1 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc1_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc1_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc1_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc1_tpc_mmio_tail
 .b8  0xc3 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc3_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
 .b8  0xc4 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc3_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
 .b8  0xc8 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc0_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc0_tpc_mmio_tail
 .b8  0xce 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc3_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
 .b8  0xcf 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvcf_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvcf_tpc_mmio_tail
+.b8  0xd9 0 0 0
+.b16 #nvd9_gpc_mmio_head
+.b16 #nvd9_gpc_mmio_tail
+.b16 #nvd9_tpc_mmio_head
+.b16 #nvd9_tpc_mmio_tail
 .b8  0 0 0 0
 
 // GPC mmio lists
@@ -114,6 +119,35 @@ nvc0_gpc_mmio_tail:
 mmctx_data(0x000c6c, 1);
 nvc1_gpc_mmio_tail:
 
+nvd9_gpc_mmio_head:
+mmctx_data(0x000380, 1)
+mmctx_data(0x000400, 2)
+mmctx_data(0x00040c, 3)
+mmctx_data(0x000450, 9)
+mmctx_data(0x000600, 1)
+mmctx_data(0x000684, 1)
+mmctx_data(0x000700, 5)
+mmctx_data(0x000800, 1)
+mmctx_data(0x000808, 3)
+mmctx_data(0x000828, 1)
+mmctx_data(0x000830, 1)
+mmctx_data(0x0008d8, 1)
+mmctx_data(0x0008e0, 1)
+mmctx_data(0x0008e8, 6)
+mmctx_data(0x00091c, 1)
+mmctx_data(0x000924, 3)
+mmctx_data(0x000b00, 1)
+mmctx_data(0x000b08, 6)
+mmctx_data(0x000bb8, 1)
+mmctx_data(0x000c08, 1)
+mmctx_data(0x000c10, 8)
+mmctx_data(0x000c6c, 1)
+mmctx_data(0x000c80, 1)
+mmctx_data(0x000c8c, 1)
+mmctx_data(0x001000, 3)
+mmctx_data(0x001014, 1)
+nvd9_gpc_mmio_tail:
+
 // TPC mmio lists
 nvc0_tpc_mmio_head:
 mmctx_data(0x000018, 1)
@@ -146,9 +180,34 @@ nvc3_tpc_mmio_tail:
 mmctx_data(0x000544, 1)
 nvc1_tpc_mmio_tail:
 
+nvd9_tpc_mmio_head:
+mmctx_data(0x000018, 1)
+mmctx_data(0x00003c, 1)
+mmctx_data(0x000048, 1)
+mmctx_data(0x000064, 1)
+mmctx_data(0x000088, 1)
+mmctx_data(0x000200, 6)
+mmctx_data(0x00021c, 2)
+mmctx_data(0x0002c4, 1)
+mmctx_data(0x000300, 6)
+mmctx_data(0x0003d0, 1)
+mmctx_data(0x0003e0, 2)
+mmctx_data(0x000400, 3)
+mmctx_data(0x000420, 3)
+mmctx_data(0x0004b0, 1)
+mmctx_data(0x0004e8, 1)
+mmctx_data(0x0004f4, 1)
+mmctx_data(0x000520, 2)
+mmctx_data(0x000544, 1)
+mmctx_data(0x000604, 4)
+mmctx_data(0x000644, 20)
+mmctx_data(0x000698, 1)
+mmctx_data(0x0006e0, 1)
+mmctx_data(0x000750, 3)
+nvd9_tpc_mmio_tail:
 
-.section nvc0_grgpc_code
-bra init
+.section #nvc0_grgpc_code
+bra #init
 define(`include_code')
 include(`nvc0_graph.fuc')
 
@@ -160,10 +219,10 @@ error:
        push $r14
        mov $r14 -0x67ec        // 0x9814
        sethi $r14 0x400000
-       call nv_wr32            // HUB_CTXCTL_CC_SCRATCH[5] = error code
+       call #nv_wr32           // HUB_CTXCTL_CC_SCRATCH[5] = error code
        add b32 $r14 0x41c
        mov $r15 1
-       call nv_wr32            // HUB_CTXCTL_INTR_UP_SET
+       call #nv_wr32           // HUB_CTXCTL_INTR_UP_SET
        pop $r14
        ret
 
@@ -190,7 +249,7 @@ init:
        iowr I[$r1 + 0x000] $r2         // FIFO_ENABLE
 
        // setup i0 handler, and route all interrupts to it
-       mov $r1 ih
+       mov $r1 #ih
        mov $iv0 $r1
        mov $r1 0x400
        iowr I[$r1 + 0x300] $r0         // INTR_DISPATCH
@@ -210,24 +269,24 @@ init:
        and $r2 0x1f
        shl b32 $r3 $r2
        sub b32 $r3 1
-       st b32 D[$r0 + tpc_count] $r2
-       st b32 D[$r0 + tpc_mask] $r3
+       st b32 D[$r0 + #tpc_count] $r2
+       st b32 D[$r0 + #tpc_mask] $r3
        add b32 $r1 0x400
        iord $r2 I[$r1 + 0x000]         // MYINDEX
-       st b32 D[$r0 + gpc_id] $r2
+       st b32 D[$r0 + #gpc_id] $r2
 
        // find context data for this chipset
        mov $r2 0x800
        shl b32 $r2 6
        iord $r2 I[$r2 + 0x000]         // CC_SCRATCH[0]
-       mov $r1 chipsets - 12
+       mov $r1 #chipsets - 12
        init_find_chipset:
                add b32 $r1 12
                ld b32 $r3 D[$r1 + 0x00]
                cmpu b32 $r3 $r2
-               bra e init_context
+               bra e #init_context
                cmpu b32 $r3 0
-               bra ne init_find_chipset
+               bra ne #init_find_chipset
                // unknown chipset
                ret
 
@@ -253,19 +312,19 @@ init:
        clear b32 $r15
        ld b16 $r14 D[$r1 + 4]
        ld b16 $r15 D[$r1 + 6]
-       st b16 D[$r0 + gpc_mmio_list_head] $r14
-       st b16 D[$r0 + gpc_mmio_list_tail] $r15
-       call mmctx_size
+       st b16 D[$r0 + #gpc_mmio_list_head] $r14
+       st b16 D[$r0 + #gpc_mmio_list_tail] $r15
+       call #mmctx_size
        add b32 $r2 $r15
        add b32 $r3 $r15
 
        // calculate per-TPC mmio context size, store the list pointers
        ld b16 $r14 D[$r1 + 8]
        ld b16 $r15 D[$r1 + 10]
-       st b16 D[$r0 + tpc_mmio_list_head] $r14
-       st b16 D[$r0 + tpc_mmio_list_tail] $r15
-       call mmctx_size
-       ld b32 $r14 D[$r0 + tpc_count]
+       st b16 D[$r0 + #tpc_mmio_list_head] $r14
+       st b16 D[$r0 + #tpc_mmio_list_tail] $r15
+       call #mmctx_size
+       ld b32 $r14 D[$r0 + #tpc_count]
        mulu $r14 $r15
        add b32 $r2 $r14
        add b32 $r3 $r14
@@ -283,7 +342,7 @@ init:
 
        // calculate size of strand context data
        mov b32 $r15 $r2
-       call strand_ctx_init
+       call #strand_ctx_init
        add b32 $r3 $r15
 
        // save context size, and tell HUB we're done
@@ -301,13 +360,13 @@ init:
 main:
        bset $flags $p0
        sleep $p0
-       mov $r13 cmd_queue
-       call queue_get
-       bra $p1 main
+       mov $r13 #cmd_queue
+       call #queue_get
+       bra $p1 #main
 
        // 0x0000-0x0003 are all context transfers
        cmpu b32 $r14 0x04
-       bra nc main_not_ctx_xfer
+       bra nc #main_not_ctx_xfer
                // fetch $flags and mask off $p1/$p2
                mov $r1 $flags
                mov $r2 0x0006
@@ -318,14 +377,14 @@ main:
                or $r1 $r14
                mov $flags $r1
                // transfer context data
-               call ctx_xfer
-               bra main
+               call #ctx_xfer
+               bra #main
 
        main_not_ctx_xfer:
        shl b32 $r15 $r14 16
        or $r15 E_BAD_COMMAND
-       call error
-       bra main
+       call #error
+       bra #main
 
 // interrupt handler
 ih:
@@ -342,13 +401,13 @@ ih:
        // incoming fifo command?
        iord $r10 I[$r0 + 0x200]        // INTR
        and $r11 $r10 0x00000004
-       bra e ih_no_fifo
+       bra e #ih_no_fifo
                // queue incoming fifo command for later processing
                mov $r11 0x1900
-               mov $r13 cmd_queue
+               mov $r13 #cmd_queue
                iord $r14 I[$r11 + 0x100]       // FIFO_CMD
                iord $r15 I[$r11 + 0x000]       // FIFO_DATA
-               call queue_put
+               call #queue_put
                add b32 $r11 0x400
                mov $r14 1
                iowr I[$r11 + 0x000] $r14       // FIFO_ACK
@@ -374,11 +433,11 @@ ih:
 //
 hub_barrier_done:
        mov $r15 1
-       ld b32 $r14 D[$r0 + gpc_id]
+       ld b32 $r14 D[$r0 + #gpc_id]
        shl b32 $r15 $r14
        mov $r14 -0x6be8        // 0x409418 - HUB_BAR_SET
        sethi $r14 0x400000
-       call nv_wr32
+       call #nv_wr32
        ret
 
 // Disables various things, waits a bit, and re-enables them..
@@ -395,7 +454,7 @@ ctx_redswitch:
        mov $r15 8
        ctx_redswitch_delay:
                sub b32 $r15 1
-               bra ne ctx_redswitch_delay
+               bra ne #ctx_redswitch_delay
        mov $r15 0xa20
        iowr I[$r14] $r15       // GPC_RED_SWITCH = UNK11, ENABLE, POWER
        ret
@@ -413,8 +472,8 @@ ctx_xfer:
        mov $r1 0xa04
        shl b32 $r1 6
        iowr I[$r1 + 0x000] $r15// MEM_BASE
-       bra not $p1 ctx_xfer_not_load
-               call ctx_redswitch
+       bra not $p1 #ctx_xfer_not_load
+               call #ctx_redswitch
        ctx_xfer_not_load:
 
        // strands
@@ -422,7 +481,7 @@ ctx_xfer:
        sethi $r1 0x20000
        mov $r2 0xc
        iowr I[$r1] $r2         // STRAND_CMD(0x3f) = 0x0c
-       call strand_wait
+       call #strand_wait
        mov $r2 0x47fc
        sethi $r2 0x20000
        iowr I[$r2] $r0         // STRAND_FIRST_GENE(0x3f) = 0x00
@@ -435,46 +494,46 @@ ctx_xfer:
        or $r10 2               // first
        mov $r11 0x0000
        sethi $r11 0x500000
-       ld b32 $r12 D[$r0 + gpc_id]
+       ld b32 $r12 D[$r0 + #gpc_id]
        shl b32 $r12 15
        add b32 $r11 $r12       // base = NV_PGRAPH_GPCn
-       ld b32 $r12 D[$r0 + gpc_mmio_list_head]
-       ld b32 $r13 D[$r0 + gpc_mmio_list_tail]
+       ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
+       ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
        mov $r14 0              // not multi
-       call mmctx_xfer
+       call #mmctx_xfer
 
        // per-TPC mmio context
        xbit $r10 $flags $p1    // direction
        or $r10 4               // last
        mov $r11 0x4000
        sethi $r11 0x500000     // base = NV_PGRAPH_GPC0_TPC0
-       ld b32 $r12 D[$r0 + gpc_id]
+       ld b32 $r12 D[$r0 + #gpc_id]
        shl b32 $r12 15
        add b32 $r11 $r12       // base = NV_PGRAPH_GPCn_TPC0
-       ld b32 $r12 D[$r0 + tpc_mmio_list_head]
-       ld b32 $r13 D[$r0 + tpc_mmio_list_tail]
-       ld b32 $r15 D[$r0 + tpc_mask]
+       ld b32 $r12 D[$r0 + #tpc_mmio_list_head]
+       ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
+       ld b32 $r15 D[$r0 + #tpc_mask]
        mov $r14 0x800          // stride = 0x800
-       call mmctx_xfer
+       call #mmctx_xfer
 
        // wait for strands to finish
-       call strand_wait
+       call #strand_wait
 
        // if load, or a save without a load following, do some
        // unknown stuff that's done after finishing a block of
        // strand commands
-       bra $p1 ctx_xfer_post
-       bra not $p2 ctx_xfer_done
+       bra $p1 #ctx_xfer_post
+       bra not $p2 #ctx_xfer_done
        ctx_xfer_post:
                mov $r1 0x4afc
                sethi $r1 0x20000
                mov $r2 0xd
                iowr I[$r1] $r2         // STRAND_CMD(0x3f) = 0x0d
-               call strand_wait
+               call #strand_wait
 
        // mark completion in HUB's barrier
        ctx_xfer_done:
-       call hub_barrier_done
+       call #hub_barrier_done
        ret
 
 .align 256
index 6f820324480e4ee02e9c4b9dc33cd9c1d6a7694b..a988b8ad00acf3b1b138138823e5f59559770f11 100644 (file)
@@ -25,26 +25,29 @@ uint32_t nvc0_grgpc_data[] = {
        0x00000000,
        0x00000000,
        0x000000c0,
-       0x011c00bc,
-       0x01700120,
+       0x012800c8,
+       0x01e40194,
        0x000000c1,
-       0x012000bc,
-       0x01840120,
+       0x012c00c8,
+       0x01f80194,
        0x000000c3,
-       0x011c00bc,
-       0x01800120,
+       0x012800c8,
+       0x01f40194,
        0x000000c4,
-       0x011c00bc,
-       0x01800120,
+       0x012800c8,
+       0x01f40194,
        0x000000c8,
-       0x011c00bc,
-       0x01700120,
+       0x012800c8,
+       0x01e40194,
        0x000000ce,
-       0x011c00bc,
-       0x01800120,
+       0x012800c8,
+       0x01f40194,
        0x000000cf,
-       0x011c00bc,
-       0x017c0120,
+       0x012800c8,
+       0x01f00194,
+       0x000000d9,
+       0x0194012c,
+       0x025401f8,
        0x00000000,
        0x00000380,
        0x14000400,
@@ -71,6 +74,32 @@ uint32_t nvc0_grgpc_data[] = {
        0x08001000,
        0x00001014,
        0x00000c6c,
+       0x00000380,
+       0x04000400,
+       0x0800040c,
+       0x20000450,
+       0x00000600,
+       0x00000684,
+       0x10000700,
+       0x00000800,
+       0x08000808,
+       0x00000828,
+       0x00000830,
+       0x000008d8,
+       0x000008e0,
+       0x140008e8,
+       0x0000091c,
+       0x08000924,
+       0x00000b00,
+       0x14000b08,
+       0x00000bb8,
+       0x00000c08,
+       0x1c000c10,
+       0x00000c6c,
+       0x00000c80,
+       0x00000c8c,
+       0x08001000,
+       0x00001014,
        0x00000018,
        0x0000003c,
        0x00000048,
@@ -96,6 +125,29 @@ uint32_t nvc0_grgpc_data[] = {
        0x000006e0,
        0x000004bc,
        0x00000544,
+       0x00000018,
+       0x0000003c,
+       0x00000048,
+       0x00000064,
+       0x00000088,
+       0x14000200,
+       0x0400021c,
+       0x000002c4,
+       0x14000300,
+       0x000003d0,
+       0x040003e0,
+       0x08000400,
+       0x08000420,
+       0x000004b0,
+       0x000004e8,
+       0x000004f4,
+       0x04000520,
+       0x00000544,
+       0x0c000604,
+       0x4c000644,
+       0x00000698,
+       0x000006e0,
+       0x08000750,
 };
 
 uint32_t nvc0_grgpc_code[] = {
index e4f8c7e89ddd9dbfbcf57108a49c1ce2e988b324..98acddb2c5bb9103da4d5294447694f51f90eb01 100644 (file)
@@ -27,7 +27,7 @@
  *    m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h
  */
 
-.section nvc0_grhub_data
+.section #nvc0_grhub_data
 include(`nvc0_graph.fuc')
 gpc_count:             .b32 0
 rop_count:             .b32 0
@@ -39,26 +39,29 @@ ctx_current:                .b32 0
 
 chipsets:
 .b8  0xc0 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
 .b8  0xc1 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc1_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc1_hub_mmio_tail
 .b8  0xc3 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
 .b8  0xc4 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
 .b8  0xc8 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
 .b8  0xce 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
 .b8  0xcf 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
+.b8  0xd9 0 0 0
+.b16 #nvd9_hub_mmio_head
+.b16 #nvd9_hub_mmio_tail
 .b8  0 0 0 0
 
 nvc0_hub_mmio_head:
@@ -105,6 +108,48 @@ nvc0_hub_mmio_tail:
 mmctx_data(0x4064c0, 2)
 nvc1_hub_mmio_tail:
 
+nvd9_hub_mmio_head:
+mmctx_data(0x17e91c, 2)
+mmctx_data(0x400204, 2)
+mmctx_data(0x404004, 10)
+mmctx_data(0x404044, 1)
+mmctx_data(0x404094, 14)
+mmctx_data(0x4040d0, 7)
+mmctx_data(0x4040f8, 1)
+mmctx_data(0x404130, 3)
+mmctx_data(0x404150, 3)
+mmctx_data(0x404164, 2)
+mmctx_data(0x404178, 2)
+mmctx_data(0x404200, 8)
+mmctx_data(0x404404, 14)
+mmctx_data(0x404460, 4)
+mmctx_data(0x404480, 1)
+mmctx_data(0x404498, 1)
+mmctx_data(0x404604, 4)
+mmctx_data(0x404618, 32)
+mmctx_data(0x404698, 21)
+mmctx_data(0x4046f0, 2)
+mmctx_data(0x404700, 22)
+mmctx_data(0x405800, 1)
+mmctx_data(0x405830, 3)
+mmctx_data(0x405854, 1)
+mmctx_data(0x405870, 4)
+mmctx_data(0x405a00, 2)
+mmctx_data(0x405a18, 1)
+mmctx_data(0x406020, 1)
+mmctx_data(0x406028, 4)
+mmctx_data(0x4064a8, 2)
+mmctx_data(0x4064b4, 5)
+mmctx_data(0x407804, 1)
+mmctx_data(0x40780c, 6)
+mmctx_data(0x4078bc, 1)
+mmctx_data(0x408000, 7)
+mmctx_data(0x408064, 1)
+mmctx_data(0x408800, 3)
+mmctx_data(0x408900, 4)
+mmctx_data(0x408980, 1)
+nvd9_hub_mmio_tail:
+
 .align 256
 chan_data:
 chan_mmio_count:       .b32 0
@@ -113,8 +158,8 @@ chan_mmio_address:  .b32 0
 .align 256
 xfer_data:             .b32 0
 
-.section nvc0_grhub_code
-bra init
+.section #nvc0_grhub_code
+bra #init
 define(`include_code')
 include(`nvc0_graph.fuc')
 
@@ -157,7 +202,7 @@ init:
        iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
 
        // setup i0 handler, and route all interrupts to it
-       mov $r1 ih
+       mov $r1 #ih
        mov $iv0 $r1
        mov $r1 0x400
        iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
@@ -201,11 +246,11 @@ init:
        // fetch enabled GPC/ROP counts
        mov $r14 -0x69fc        // 0x409604
        sethi $r14 0x400000
-       call nv_rd32
+       call #nv_rd32
        extr $r1 $r15 16:20
-       st b32 D[$r0 + rop_count] $r1
+       st b32 D[$r0 + #rop_count] $r1
        and $r15 0x1f
-       st b32 D[$r0 + gpc_count] $r15
+       st b32 D[$r0 + #gpc_count] $r15
 
        // set BAR_REQMASK to GPC mask
        mov $r1 1
@@ -220,14 +265,14 @@ init:
        mov $r2 0x800
        shl b32 $r2 6
        iord $r2 I[$r2 + 0x000]         // CC_SCRATCH[0]
-       mov $r15 chipsets - 8
+       mov $r15 #chipsets - 8
        init_find_chipset:
                add b32 $r15 8
                ld b32 $r3 D[$r15 + 0x00]
                cmpu b32 $r3 $r2
-               bra e init_context
+               bra e #init_context
                cmpu b32 $r3 0
-               bra ne init_find_chipset
+               bra ne #init_find_chipset
                // unknown chipset
                ret
 
@@ -239,9 +284,9 @@ init:
        ld b16 $r14 D[$r15 + 4]
        ld b16 $r15 D[$r15 + 6]
        sethi $r14 0
-       st b32 D[$r0 + hub_mmio_list_head] $r14
-       st b32 D[$r0 + hub_mmio_list_tail] $r15
-       call mmctx_size
+       st b32 D[$r0 + #hub_mmio_list_head] $r14
+       st b32 D[$r0 + #hub_mmio_list_tail] $r15
+       call #mmctx_size
 
        // set mmctx base addresses now so we don't have to do it later,
        // they don't (currently) ever change
@@ -260,7 +305,7 @@ init:
        add b32 $r1 1
        shl b32 $r1 8
        mov b32 $r15 $r1
-       call strand_ctx_init
+       call #strand_ctx_init
        add b32 $r1 $r15
 
        // initialise each GPC in sequence by passing in the offset of its
@@ -271,40 +316,40 @@ init:
        // when it has completed, and return the size of its context data
        // in GPCn_CC_SCRATCH[1]
        //
-       ld b32 $r3 D[$r0 + gpc_count]
+       ld b32 $r3 D[$r0 + #gpc_count]
        mov $r4 0x2000
        sethi $r4 0x500000
        init_gpc:
                // setup, and start GPC ucode running
                add b32 $r14 $r4 0x804
                mov b32 $r15 $r1
-               call nv_wr32                    // CC_SCRATCH[1] = ctx offset
+               call #nv_wr32                   // CC_SCRATCH[1] = ctx offset
                add b32 $r14 $r4 0x800
                mov b32 $r15 $r2
-               call nv_wr32                    // CC_SCRATCH[0] = chipset
+               call #nv_wr32                   // CC_SCRATCH[0] = chipset
                add b32 $r14 $r4 0x10c
                clear b32 $r15
-               call nv_wr32
+               call #nv_wr32
                add b32 $r14 $r4 0x104
-               call nv_wr32                    // ENTRY
+               call #nv_wr32                   // ENTRY
                add b32 $r14 $r4 0x100
                mov $r15 2                      // CTRL_START_TRIGGER
-               call nv_wr32                    // CTRL
+               call #nv_wr32                   // CTRL
 
                // wait for it to complete, and adjust context size
                add b32 $r14 $r4 0x800
                init_gpc_wait:
-                       call nv_rd32
+                       call #nv_rd32
                        xbit $r15 $r15 31
-                       bra e init_gpc_wait
+                       bra e #init_gpc_wait
                add b32 $r14 $r4 0x804
-               call nv_rd32
+               call #nv_rd32
                add b32 $r1 $r15
 
                // next!
                add b32 $r4 0x8000
                sub b32 $r3 1
-               bra ne init_gpc
+               bra ne #init_gpc
 
        // save context size, and tell host we're ready
        mov $r2 0x800
@@ -322,13 +367,13 @@ main:
        // sleep until we have something to do
        bset $flags $p0
        sleep $p0
-       mov $r13 cmd_queue
-       call queue_get
-       bra $p1 main
+       mov $r13 #cmd_queue
+       call #queue_get
+       bra $p1 #main
 
        // context switch, requested by GPU?
        cmpu b32 $r14 0x4001
-       bra ne main_not_ctx_switch
+       bra ne #main_not_ctx_switch
                trace_set(T_AUTO)
                mov $r1 0xb00
                shl b32 $r1 6
@@ -336,39 +381,39 @@ main:
                iord $r1 I[$r1 + 0x000]         // CHAN_CUR
 
                xbit $r3 $r1 31
-               bra e chsw_no_prev
+               bra e #chsw_no_prev
                        xbit $r3 $r2 31
-                       bra e chsw_prev_no_next
+                       bra e #chsw_prev_no_next
                                push $r2
                                mov b32 $r2 $r1
                                trace_set(T_SAVE)
                                bclr $flags $p1
                                bset $flags $p2
-                               call ctx_xfer
+                               call #ctx_xfer
                                trace_clr(T_SAVE);
                                pop $r2
                                trace_set(T_LOAD);
                                bset $flags $p1
-                               call ctx_xfer
+                               call #ctx_xfer
                                trace_clr(T_LOAD);
-                               bra chsw_done
+                               bra #chsw_done
                        chsw_prev_no_next:
                                push $r2
                                mov b32 $r2 $r1
                                bclr $flags $p1
                                bclr $flags $p2
-                               call ctx_xfer
+                               call #ctx_xfer
                                pop $r2
                                mov $r1 0xb00
                                shl b32 $r1 6
                                iowr I[$r1] $r2
-                               bra chsw_done
+                               bra #chsw_done
                chsw_no_prev:
                        xbit $r3 $r2 31
-                       bra e chsw_done
+                       bra e #chsw_done
                                bset $flags $p1
                                bclr $flags $p2
-                               call ctx_xfer
+                               call #ctx_xfer
 
                // ack the context switch request
                chsw_done:
@@ -377,32 +422,32 @@ main:
                mov $r2 1
                iowr I[$r1 + 0x000] $r2         // 0x409b0c
                trace_clr(T_AUTO)
-               bra main
+               bra #main
 
        // request to set current channel? (*not* a context switch)
        main_not_ctx_switch:
        cmpu b32 $r14 0x0001
-       bra ne main_not_ctx_chan
+       bra ne #main_not_ctx_chan
                mov b32 $r2 $r15
-               call ctx_chan
-               bra main_done
+               call #ctx_chan
+               bra #main_done
 
        // request to store current channel context?
        main_not_ctx_chan:
        cmpu b32 $r14 0x0002
-       bra ne main_not_ctx_save
+       bra ne #main_not_ctx_save
                trace_set(T_SAVE)
                bclr $flags $p1
                bclr $flags $p2
-               call ctx_xfer
+               call #ctx_xfer
                trace_clr(T_SAVE)
-               bra main_done
+               bra #main_done
 
        main_not_ctx_save:
                shl b32 $r15 $r14 16
                or $r15 E_BAD_COMMAND
-               call error
-               bra main
+               call #error
+               bra #main
 
        main_done:
        mov $r1 0x820
@@ -410,7 +455,7 @@ main:
        clear b32 $r2
        bset $r2 31
        iowr I[$r1 + 0x000] $r2         // CC_SCRATCH[0] |= 0x80000000
-       bra main
+       bra #main
 
 // interrupt handler
 ih:
@@ -427,13 +472,13 @@ ih:
        // incoming fifo command?
        iord $r10 I[$r0 + 0x200]        // INTR
        and $r11 $r10 0x00000004
-       bra e ih_no_fifo
+       bra e #ih_no_fifo
                // queue incoming fifo command for later processing
                mov $r11 0x1900
-               mov $r13 cmd_queue
+               mov $r13 #cmd_queue
                iord $r14 I[$r11 + 0x100]       // FIFO_CMD
                iord $r15 I[$r11 + 0x000]       // FIFO_DATA
-               call queue_put
+               call #queue_put
                add b32 $r11 0x400
                mov $r14 1
                iowr I[$r11 + 0x000] $r14       // FIFO_ACK
@@ -441,18 +486,18 @@ ih:
        // context switch request?
        ih_no_fifo:
        and $r11 $r10 0x00000100
-       bra e ih_no_ctxsw
+       bra e #ih_no_ctxsw
                // enqueue a context switch for later processing
-               mov $r13 cmd_queue
+               mov $r13 #cmd_queue
                mov $r14 0x4001
-               call queue_put
+               call #queue_put
 
        // anything we didn't handle, bring it to the host's attention
        ih_no_ctxsw:
        mov $r11 0x104
        not b32 $r11
        and $r11 $r10 $r11
-       bra e ih_no_other
+       bra e #ih_no_other
                mov $r10 0xc1c
                shl b32 $r10 6
                iowr I[$r10] $r11       // INTR_UP_SET
@@ -478,11 +523,11 @@ ctx_4160s:
        mov $r14 0x4160
        sethi $r14 0x400000
        mov $r15 1
-       call nv_wr32
+       call #nv_wr32
        ctx_4160s_wait:
-               call nv_rd32
+               call #nv_rd32
                xbit $r15 $r15 4
-               bra e ctx_4160s_wait
+               bra e #ctx_4160s_wait
        ret
 
 // Without clearing again at end of xfer, some things cause PGRAPH
@@ -492,7 +537,7 @@ ctx_4160c:
        mov $r14 0x4160
        sethi $r14 0x400000
        clear b32 $r15
-       call nv_wr32
+       call #nv_wr32
        ret
 
 // Again, not real sure
@@ -503,7 +548,7 @@ ctx_4170s:
        mov $r14 0x4170
        sethi $r14 0x400000
        or $r15 0x10
-       call nv_wr32
+       call #nv_wr32
        ret
 
 // Waits for a ctx_4170s() call to complete
@@ -511,9 +556,9 @@ ctx_4170s:
 ctx_4170w:
        mov $r14 0x4170
        sethi $r14 0x400000
-       call nv_rd32
+       call #nv_rd32
        and $r15 0x10
-       bra ne ctx_4170w
+       bra ne #ctx_4170w
        ret
 
 // Disables various things, waits a bit, and re-enables them..
@@ -530,7 +575,7 @@ ctx_redswitch:
        mov $r15 8
        ctx_redswitch_delay:
                sub b32 $r15 1
-               bra ne ctx_redswitch_delay
+               bra ne #ctx_redswitch_delay
        mov $r15 0x770
        iowr I[$r14] $r15       // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
        ret
@@ -546,10 +591,10 @@ ctx_86c:
        iowr I[$r14] $r15       // HUB(0x86c) = val
        mov $r14 -0x75ec
        sethi $r14 0x400000
-       call nv_wr32            // ROP(0xa14) = val
+       call #nv_wr32           // ROP(0xa14) = val
        mov $r14 -0x5794
        sethi $r14 0x410000
-       call nv_wr32            // GPC(0x86c) = val
+       call #nv_wr32           // GPC(0x86c) = val
        ret
 
 // ctx_load - load's a channel's ctxctl data, and selects its vm
@@ -561,7 +606,7 @@ ctx_load:
 
        // switch to channel, somewhat magic in parts..
        mov $r10 12             // DONE_UNK12
-       call wait_donez
+       call #wait_donez
        mov $r1 0xa24
        shl b32 $r1 6
        iowr I[$r1 + 0x000] $r0 // 0x409a24
@@ -576,7 +621,7 @@ ctx_load:
        ctx_chan_wait_0:
                iord $r4 I[$r1 + 0x100]
                and $r4 0x1f
-               bra ne ctx_chan_wait_0
+               bra ne #ctx_chan_wait_0
        iowr I[$r3 + 0x000] $r2 // CHAN_CUR
 
        // load channel header, fetch PGRAPH context pointer
@@ -595,19 +640,19 @@ ctx_load:
        sethi $r2 0x80000000
        iowr I[$r1 + 0x000] $r2         // MEM_TARGET = vram
        mov $r1 0x10                    // chan + 0x0210
-       mov $r2 xfer_data
+       mov $r2 #xfer_data
        sethi $r2 0x00020000            // 16 bytes
        xdld $r1 $r2
        xdwait
        trace_clr(T_LCHAN)
 
        // update current context
-       ld b32 $r1 D[$r0 + xfer_data + 4]
+       ld b32 $r1 D[$r0 + #xfer_data + 4]
        shl b32 $r1 24
-       ld b32 $r2 D[$r0 + xfer_data + 0]
+       ld b32 $r2 D[$r0 + #xfer_data + 0]
        shr b32 $r2 8
        or $r1 $r2
-       st b32 D[$r0 + ctx_current] $r1
+       st b32 D[$r0 + #ctx_current] $r1
 
        // set transfer base to start of context, and fetch context header
        trace_set(T_LCTXH)
@@ -618,7 +663,7 @@ ctx_load:
        mov $r1 0xa20
        shl b32 $r1 6
        iowr I[$r1 + 0x000] $r2         // MEM_TARGET = vm
-       mov $r1 chan_data
+       mov $r1 #chan_data
        sethi $r1 0x00060000            // 256 bytes
        xdld $r0 $r1
        xdwait
@@ -635,10 +680,10 @@ ctx_load:
 // In: $r2 channel address
 //
 ctx_chan:
-       call ctx_4160s
-       call ctx_load
+       call #ctx_4160s
+       call #ctx_load
        mov $r10 12                     // DONE_UNK12
-       call wait_donez
+       call #wait_donez
        mov $r1 0xa10
        shl b32 $r1 6
        mov $r2 5
@@ -646,8 +691,8 @@ ctx_chan:
        ctx_chan_wait:
                iord $r2 I[$r1 + 0x000]
                or $r2 $r2
-               bra ne ctx_chan_wait
-       call ctx_4160c
+               bra ne #ctx_chan_wait
+       call #ctx_4160c
        ret
 
 // Execute per-context state overrides list
@@ -661,7 +706,7 @@ ctx_chan:
 //
 ctx_mmio_exec:
        // set transfer base to be the mmio list
-       ld b32 $r3 D[$r0 + chan_mmio_address]
+       ld b32 $r3 D[$r0 + #chan_mmio_address]
        mov $r2 0xa04
        shl b32 $r2 6
        iowr I[$r2 + 0x000] $r3         // MEM_BASE
@@ -670,31 +715,31 @@ ctx_mmio_exec:
        ctx_mmio_loop:
                // fetch next 256 bytes of mmio list if necessary
                and $r4 $r3 0xff
-               bra ne ctx_mmio_pull
-                       mov $r5 xfer_data
+               bra ne #ctx_mmio_pull
+                       mov $r5 #xfer_data
                        sethi $r5 0x00060000    // 256 bytes
                        xdld $r3 $r5
                        xdwait
 
                // execute a single list entry
                ctx_mmio_pull:
-               ld b32 $r14 D[$r4 + xfer_data + 0x00]
-               ld b32 $r15 D[$r4 + xfer_data + 0x04]
-               call nv_wr32
+               ld b32 $r14 D[$r4 + #xfer_data + 0x00]
+               ld b32 $r15 D[$r4 + #xfer_data + 0x04]
+               call #nv_wr32
 
                // next!
                add b32 $r3 8
                sub b32 $r1 1
-               bra ne ctx_mmio_loop
+               bra ne #ctx_mmio_loop
 
        // set transfer base back to the current context
        ctx_mmio_done:
-       ld b32 $r3 D[$r0 + ctx_current]
+       ld b32 $r3 D[$r0 + #ctx_current]
        iowr I[$r2 + 0x000] $r3         // MEM_BASE
 
        // disable the mmio list now, we don't need/want to execute it again
-       st b32 D[$r0 + chan_mmio_count] $r0
-       mov $r1 chan_data
+       st b32 D[$r0 + #chan_mmio_count] $r0
+       mov $r1 #chan_data
        sethi $r1 0x00060000            // 256 bytes
        xdst $r0 $r1
        xdwait
@@ -709,46 +754,46 @@ ctx_mmio_exec:
 //             on load it means: "a save preceeded this load"
 //
 ctx_xfer:
-       bra not $p1 ctx_xfer_pre
-       bra $p2 ctx_xfer_pre_load
+       bra not $p1 #ctx_xfer_pre
+       bra $p2 #ctx_xfer_pre_load
        ctx_xfer_pre:
                mov $r15 0x10
-               call ctx_86c
-               call ctx_4160s
-               bra not $p1 ctx_xfer_exec
+               call #ctx_86c
+               call #ctx_4160s
+               bra not $p1 #ctx_xfer_exec
 
        ctx_xfer_pre_load:
                mov $r15 2
-               call ctx_4170s
-               call ctx_4170w
-               call ctx_redswitch
+               call #ctx_4170s
+               call #ctx_4170w
+               call #ctx_redswitch
                clear b32 $r15
-               call ctx_4170s
-               call ctx_load
+               call #ctx_4170s
+               call #ctx_load
 
        // fetch context pointer, and initiate xfer on all GPCs
        ctx_xfer_exec:
-       ld b32 $r1 D[$r0 + ctx_current]
+       ld b32 $r1 D[$r0 + #ctx_current]
        mov $r2 0x414
        shl b32 $r2 6
        iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
        mov $r14 -0x5b00
        sethi $r14 0x410000
        mov b32 $r15 $r1
-       call nv_wr32            // GPC_BCAST_WRCMD_DATA = ctx pointer
+       call #nv_wr32           // GPC_BCAST_WRCMD_DATA = ctx pointer
        add b32 $r14 4
        xbit $r15 $flags $p1
        xbit $r2 $flags $p2
        shl b32 $r2 1
        or $r15 $r2
-       call nv_wr32            // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
+       call #nv_wr32           // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
 
        // strands
        mov $r1 0x4afc
        sethi $r1 0x20000
        mov $r2 0xc
        iowr I[$r1] $r2         // STRAND_CMD(0x3f) = 0x0c
-       call strand_wait
+       call #strand_wait
        mov $r2 0x47fc
        sethi $r2 0x20000
        iowr I[$r2] $r0         // STRAND_FIRST_GENE(0x3f) = 0x00
@@ -760,22 +805,22 @@ ctx_xfer:
        xbit $r10 $flags $p1    // direction
        or $r10 6               // first, last
        mov $r11 0              // base = 0
-       ld b32 $r12 D[$r0 + hub_mmio_list_head]
-       ld b32 $r13 D[$r0 + hub_mmio_list_tail]
+       ld b32 $r12 D[$r0 + #hub_mmio_list_head]
+       ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
        mov $r14 0              // not multi
-       call mmctx_xfer
+       call #mmctx_xfer
 
        // wait for GPCs to all complete
        mov $r10 8              // DONE_BAR
-       call wait_doneo
+       call #wait_doneo
 
        // wait for strand xfer to complete
-       call strand_wait
+       call #strand_wait
 
        // post-op
-       bra $p1 ctx_xfer_post
+       bra $p1 #ctx_xfer_post
                mov $r10 12             // DONE_UNK12
-               call wait_donez
+               call #wait_donez
                mov $r1 0xa10
                shl b32 $r1 6
                mov $r2 5
@@ -783,27 +828,27 @@ ctx_xfer:
                ctx_xfer_post_save_wait:
                        iord $r2 I[$r1]
                        or $r2 $r2
-                       bra ne ctx_xfer_post_save_wait
+                       bra ne #ctx_xfer_post_save_wait
 
-       bra $p2 ctx_xfer_done
+       bra $p2 #ctx_xfer_done
        ctx_xfer_post:
                mov $r15 2
-               call ctx_4170s
+               call #ctx_4170s
                clear b32 $r15
-               call ctx_86c
-               call strand_post
-               call ctx_4170w
+               call #ctx_86c
+               call #strand_post
+               call #ctx_4170w
                clear b32 $r15
-               call ctx_4170s
+               call #ctx_4170s
 
-               bra not $p1 ctx_xfer_no_post_mmio
-               ld b32 $r1 D[$r0 + chan_mmio_count]
+               bra not $p1 #ctx_xfer_no_post_mmio
+               ld b32 $r1 D[$r0 + #chan_mmio_count]
                or $r1 $r1
-               bra e ctx_xfer_no_post_mmio
-                       call ctx_mmio_exec
+               bra e #ctx_xfer_no_post_mmio
+                       call #ctx_mmio_exec
 
                ctx_xfer_no_post_mmio:
-               call ctx_4160c
+               call #ctx_4160c
 
        ctx_xfer_done:
        ret
index 241d3263f1e56af9359df3db2d113505e4f764f2..c5ed307abeb9ca80ec9b366fc0f7da1ae323464c 100644 (file)
@@ -23,19 +23,21 @@ uint32_t nvc0_grhub_data[] = {
        0x00000000,
        0x00000000,
        0x000000c0,
-       0x01340098,
+       0x013c00a0,
        0x000000c1,
-       0x01380098,
+       0x014000a0,
        0x000000c3,
-       0x01340098,
+       0x013c00a0,
        0x000000c4,
-       0x01340098,
+       0x013c00a0,
        0x000000c8,
-       0x01340098,
+       0x013c00a0,
        0x000000ce,
-       0x01340098,
+       0x013c00a0,
        0x000000cf,
-       0x01340098,
+       0x013c00a0,
+       0x000000d9,
+       0x01dc0140,
        0x00000000,
        0x0417e91c,
        0x04400204,
@@ -77,47 +79,45 @@ uint32_t nvc0_grhub_data[] = {
        0x0c408900,
        0x00408980,
        0x044064c0,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+       0x0417e91c,
+       0x04400204,
+       0x24404004,
+       0x00404044,
+       0x34404094,
+       0x184040d0,
+       0x004040f8,
+       0x08404130,
+       0x08404150,
+       0x04404164,
+       0x04404178,
+       0x1c404200,
+       0x34404404,
+       0x0c404460,
+       0x00404480,
+       0x00404498,
+       0x0c404604,
+       0x7c404618,
+       0x50404698,
+       0x044046f0,
+       0x54404700,
+       0x00405800,
+       0x08405830,
+       0x00405854,
+       0x0c405870,
+       0x04405a00,
+       0x00405a18,
+       0x00406020,
+       0x0c406028,
+       0x044064a8,
+       0x104064b4,
+       0x00407804,
+       0x1440780c,
+       0x004078bc,
+       0x18408000,
+       0x00408064,
+       0x08408800,
+       0x0c408900,
+       0x00408980,
        0x00000000,
        0x00000000,
        0x00000000,
index 929aded35cb572e9751e3ba0190a329cb2768613..e9992f62c1c0f97da00b8c04c5fdaca6d5c0f13b 100644 (file)
@@ -153,3 +153,240 @@ nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
        perflvl->vdec   = read_clk(dev, 0x0e);
        return 0;
 }
+
+struct nvc0_pm_clock {
+       u32 freq;
+       u32 ssel;
+       u32 mdiv;
+       u32 dsrc;
+       u32 ddiv;
+       u32 coef;
+};
+
+struct nvc0_pm_state {
+       struct nvc0_pm_clock eng[16];
+};
+
+static u32
+calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv)
+{
+       u32 div = min((ref * 2) / freq, (u32)65);
+       if (div < 2)
+               div = 2;
+
+       *ddiv = div - 2;
+       return (ref * 2) / div;
+}
+
+static u32
+calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+{
+       u32 sclk;
+
+       /* use one of the fixed frequencies if possible */
+       *ddiv = 0x00000000;
+       switch (freq) {
+       case  27000:
+       case 108000:
+               *dsrc = 0x00000000;
+               if (freq == 108000)
+                       *dsrc |= 0x00030000;
+               return freq;
+       case 100000:
+               *dsrc = 0x00000002;
+               return freq;
+       default:
+               *dsrc = 0x00000003;
+               break;
+       }
+
+       /* otherwise, calculate the closest divider */
+       sclk = read_vco(dev, clk);
+       if (clk < 7)
+               sclk = calc_div(dev, clk, sclk, freq, ddiv);
+       return sclk;
+}
+
+static u32
+calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
+{
+       struct pll_lims limits;
+       int N, M, P, ret;
+
+       ret = get_pll_limits(dev, 0x137000 + (clk * 0x20), &limits);
+       if (ret)
+               return 0;
+
+       limits.refclk = read_div(dev, clk, 0x137120, 0x137140);
+       if (!limits.refclk)
+               return 0;
+
+       ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P);
+       if (ret <= 0)
+               return 0;
+
+       *coef = (P << 16) | (N << 8) | M;
+       return ret;
+}
+
+/* A (likely rather simplified and incomplete) view of the clock tree
+ *
+ * Key:
+ *
+ * S: source select
+ * D: divider
+ * P: pll
+ * F: switch
+ *
+ * Engine clocks:
+ *
+ * 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref
+ *                      (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref
+ *
+ * Not all registers exist for all clocks.  For example: clocks >= 8 don't
+ * have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do
+ * they have the divider at 1371d0, though the source selection at 137160
+ * still exists.  You must use the divider at 137250 for these instead.
+ *
+ * Memory clock:
+ *
+ * TBD, read_mem() above is likely very wrong...
+ *
+ */
+
+static int
+calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
+{
+       u32 src0, div0, div1D, div1P = 0;
+       u32 clk0, clk1 = 0;
+
+       /* invalid clock domain */
+       if (!freq)
+               return 0;
+
+       /* first possible path, using only dividers */
+       clk0 = calc_src(dev, clk, freq, &src0, &div0);
+       clk0 = calc_div(dev, clk, clk0, freq, &div1D);
+
+       /* see if we can get any closer using PLLs */
+       if (clk0 != freq) {
+               if (clk < 7)
+                       clk1 = calc_pll(dev, clk, freq, &info->coef);
+               else
+                       clk1 = read_pll(dev, 0x1370e0);
+               clk1 = calc_div(dev, clk, clk1, freq, &div1P);
+       }
+
+       /* select the method which gets closest to target freq */
+       if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
+               info->dsrc = src0;
+               if (div0) {
+                       info->ddiv |= 0x80000000;
+                       info->ddiv |= div0 << 8;
+                       info->ddiv |= div0;
+               }
+               if (div1D) {
+                       info->mdiv |= 0x80000000;
+                       info->mdiv |= div1D;
+               }
+               info->ssel = 0;
+               info->freq = clk0;
+       } else {
+               if (div1P) {
+                       info->mdiv |= 0x80000000;
+                       info->mdiv |= div1P << 8;
+               }
+               info->ssel = (1 << clk);
+               info->freq = clk1;
+       }
+
+       return 0;
+}
+
+void *
+nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nvc0_pm_state *info;
+       int ret;
+
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return ERR_PTR(-ENOMEM);
+
+       /* NFI why this is still in the performance table, the ROPCs appear
+        * to get their clock from clock 2 ("hub07", actually hub05 on this
+        * chip, but, anyway...) as well.  nvatiming confirms hub05 and ROP
+        * are always the same freq with the binary driver even when the
+        * performance table says they should differ.
+        */
+       if (dev_priv->chipset == 0xd9)
+               perflvl->rop = 0;
+
+       if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
+           (ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) ||
+           (ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) ||
+           (ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) ||
+           (ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) ||
+           (ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) ||
+           (ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) ||
+           (ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) {
+               kfree(info);
+               return ERR_PTR(ret);
+       }
+
+       return info;
+}
+
+static void
+prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
+{
+       /* program dividers at 137160/1371d0 first */
+       if (clk < 7 && !info->ssel) {
+               nv_mask(dev, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
+               nv_wr32(dev, 0x137160 + (clk * 0x04), info->dsrc);
+       }
+
+       /* switch clock to non-pll mode */
+       nv_mask(dev, 0x137100, (1 << clk), 0x00000000);
+       nv_wait(dev, 0x137100, (1 << clk), 0x00000000);
+
+       /* reprogram pll */
+       if (clk < 7) {
+               /* make sure it's disabled first... */
+               u32 base = 0x137000 + (clk * 0x20);
+               u32 ctrl = nv_rd32(dev, base + 0x00);
+               if (ctrl & 0x00000001) {
+                       nv_mask(dev, base + 0x00, 0x00000004, 0x00000000);
+                       nv_mask(dev, base + 0x00, 0x00000001, 0x00000000);
+               }
+               /* program it to new values, if necessary */
+               if (info->ssel) {
+                       nv_wr32(dev, base + 0x04, info->coef);
+                       nv_mask(dev, base + 0x00, 0x00000001, 0x00000001);
+                       nv_wait(dev, base + 0x00, 0x00020000, 0x00020000);
+                       nv_mask(dev, base + 0x00, 0x00020004, 0x00000004);
+               }
+       }
+
+       /* select pll/non-pll mode, and program final clock divider */
+       nv_mask(dev, 0x137100, (1 << clk), info->ssel);
+       nv_wait(dev, 0x137100, (1 << clk), info->ssel);
+       nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+}
+
+int
+nvc0_pm_clocks_set(struct drm_device *dev, void *data)
+{
+       struct nvc0_pm_state *info = data;
+       int i;
+
+       for (i = 0; i < 16; i++) {
+               if (!info->eng[i].freq)
+                       continue;
+               prog_clk(dev, i, &info->eng[i]);
+       }
+
+       kfree(info);
+       return 0;
+}
index cb006a718e700f2c72cf06dfdac0c7f22e8cbdac..d2ba2f07400bff423673ae9f28a729d7bfb52b4a 100644 (file)
 #include "nouveau_fb.h"
 #include "nv50_display.h"
 
+#define EVO_DMA_NR 9
+
+#define EVO_MASTER  (0x00)
+#define EVO_FLIP(c) (0x01 + (c))
+#define EVO_OVLY(c) (0x05 + (c))
+#define EVO_OIMM(c) (0x09 + (c))
+#define EVO_CURS(c) (0x0d + (c))
+
+/* offsets in shared sync bo of various structures */
+#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
+#define EVO_MAST_NTFY     EVO_SYNC(  0, 0x00)
+#define EVO_FLIP_SEM0(c)  EVO_SYNC((c), 0x00)
+#define EVO_FLIP_SEM1(c)  EVO_SYNC((c), 0x10)
+
+struct evo {
+       int idx;
+       dma_addr_t handle;
+       u32 *ptr;
+       struct {
+               u32 offset;
+               u16 value;
+       } sem;
+};
+
 struct nvd0_display {
        struct nouveau_gpuobj *mem;
-       struct {
-               dma_addr_t handle;
-               u32 *ptr;
-       } evo[1];
+       struct nouveau_bo *sync;
+       struct evo evo[9];
 
        struct tasklet_struct tasklet;
        u32 modeset;
@@ -53,6 +75,15 @@ nvd0_display(struct drm_device *dev)
        return dev_priv->engine.display.priv;
 }
 
+static struct drm_crtc *
+nvd0_display_crtc_get(struct drm_encoder *encoder)
+{
+       return nouveau_encoder(encoder)->crtc;
+}
+
+/******************************************************************************
+ * EVO channel helpers
+ *****************************************************************************/
 static inline int
 evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
 {
@@ -84,6 +115,9 @@ evo_wait(struct drm_device *dev, int id, int nr)
                put = 0;
        }
 
+       if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+               NV_INFO(dev, "Evo%d: %p START\n", id, disp->evo[id].ptr + put);
+
        return disp->evo[id].ptr + put;
 }
 
@@ -91,40 +125,264 @@ static void
 evo_kick(u32 *push, struct drm_device *dev, int id)
 {
        struct nvd0_display *disp = nvd0_display(dev);
+
+       if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) {
+               u32 curp = nv_rd32(dev, 0x640000 + (id * 0x1000)) >> 2;
+               u32 *cur = disp->evo[id].ptr + curp;
+
+               while (cur < push)
+                       NV_INFO(dev, "Evo%d: 0x%08x\n", id, *cur++);
+               NV_INFO(dev, "Evo%d: %p KICK!\n", id, push);
+       }
+
        nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
 }
 
 #define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
 #define evo_data(p,d)   *((p)++) = (d)
 
-static struct drm_crtc *
-nvd0_display_crtc_get(struct drm_encoder *encoder)
+static int
+evo_init_dma(struct drm_device *dev, int ch)
 {
-       return nouveau_encoder(encoder)->crtc;
+       struct nvd0_display *disp = nvd0_display(dev);
+       u32 flags;
+
+       flags = 0x00000000;
+       if (ch == EVO_MASTER)
+               flags |= 0x01000000;
+
+       nv_wr32(dev, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
+       nv_wr32(dev, 0x610498 + (ch * 0x0010), 0x00010000);
+       nv_wr32(dev, 0x61049c + (ch * 0x0010), 0x00000001);
+       nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
+       nv_wr32(dev, 0x640000 + (ch * 0x1000), 0x00000000);
+       nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
+       if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
+               NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
+                             nv_rd32(dev, 0x610490 + (ch * 0x0010)));
+               return -EBUSY;
+       }
+
+       nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
+       nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
+       return 0;
+}
+
+static void
+evo_fini_dma(struct drm_device *dev, int ch)
+{
+       if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000010))
+               return;
+
+       nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
+       nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
+       nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
+       nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
+       nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
+}
+
+static inline void
+evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
+{
+       nv_wr32(dev, 0x640000 + (ch * 0x1000) + mthd, data);
+}
+
+static int
+evo_init_pio(struct drm_device *dev, int ch)
+{
+       nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000001);
+       if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
+               NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
+                             nv_rd32(dev, 0x610490 + (ch * 0x0010)));
+               return -EBUSY;
+       }
+
+       nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
+       nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
+       return 0;
+}
+
+static void
+evo_fini_pio(struct drm_device *dev, int ch)
+{
+       if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000001))
+               return;
+
+       nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
+       nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
+       nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
+       nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
+       nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
+}
+
+static bool
+evo_sync_wait(void *data)
+{
+       return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
+}
+
+static int
+evo_sync(struct drm_device *dev, int ch)
+{
+       struct nvd0_display *disp = nvd0_display(dev);
+       u32 *push = evo_wait(dev, ch, 8);
+       if (push) {
+               nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
+               evo_mthd(push, 0x0084, 1);
+               evo_data(push, 0x80000000 | EVO_MAST_NTFY);
+               evo_mthd(push, 0x0080, 2);
+               evo_data(push, 0x00000000);
+               evo_data(push, 0x00000000);
+               evo_kick(push, dev, ch);
+               if (nv_wait_cb(dev, evo_sync_wait, disp->sync))
+                       return 0;
+       }
+
+       return -EBUSY;
+}
+
+/******************************************************************************
+ * Page flipping channel
+ *****************************************************************************/
+struct nouveau_bo *
+nvd0_display_crtc_sema(struct drm_device *dev, int crtc)
+{
+       return nvd0_display(dev)->sync;
+}
+
+void
+nvd0_display_flip_stop(struct drm_crtc *crtc)
+{
+       struct nvd0_display *disp = nvd0_display(crtc->dev);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
+       u32 *push;
+
+       push = evo_wait(crtc->dev, evo->idx, 8);
+       if (push) {
+               evo_mthd(push, 0x0084, 1);
+               evo_data(push, 0x00000000);
+               evo_mthd(push, 0x0094, 1);
+               evo_data(push, 0x00000000);
+               evo_mthd(push, 0x00c0, 1);
+               evo_data(push, 0x00000000);
+               evo_mthd(push, 0x0080, 1);
+               evo_data(push, 0x00000000);
+               evo_kick(push, crtc->dev, evo->idx);
+       }
+}
+
+int
+nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+                      struct nouveau_channel *chan, u32 swap_interval)
+{
+       struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+       struct nvd0_display *disp = nvd0_display(crtc->dev);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
+       u64 offset;
+       u32 *push;
+       int ret;
+
+       swap_interval <<= 4;
+       if (swap_interval == 0)
+               swap_interval |= 0x100;
+
+       push = evo_wait(crtc->dev, evo->idx, 128);
+       if (unlikely(push == NULL))
+               return -EBUSY;
+
+       /* synchronise with the rendering channel, if necessary */
+       if (likely(chan)) {
+               ret = RING_SPACE(chan, 10);
+               if (ret)
+                       return ret;
+
+               offset  = chan->dispc_vma[nv_crtc->index].offset;
+               offset += evo->sem.offset;
+
+               BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+               OUT_RING  (chan, upper_32_bits(offset));
+               OUT_RING  (chan, lower_32_bits(offset));
+               OUT_RING  (chan, 0xf00d0000 | evo->sem.value);
+               OUT_RING  (chan, 0x1002);
+               BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+               OUT_RING  (chan, upper_32_bits(offset));
+               OUT_RING  (chan, lower_32_bits(offset ^ 0x10));
+               OUT_RING  (chan, 0x74b1e000);
+               OUT_RING  (chan, 0x1001);
+               FIRE_RING (chan);
+       } else {
+               nouveau_bo_wr32(disp->sync, evo->sem.offset / 4,
+                               0xf00d0000 | evo->sem.value);
+               evo_sync(crtc->dev, EVO_MASTER);
+       }
+
+       /* queue the flip */
+       evo_mthd(push, 0x0100, 1);
+       evo_data(push, 0xfffe0000);
+       evo_mthd(push, 0x0084, 1);
+       evo_data(push, swap_interval);
+       if (!(swap_interval & 0x00000100)) {
+               evo_mthd(push, 0x00e0, 1);
+               evo_data(push, 0x40000000);
+       }
+       evo_mthd(push, 0x0088, 4);
+       evo_data(push, evo->sem.offset);
+       evo_data(push, 0xf00d0000 | evo->sem.value);
+       evo_data(push, 0x74b1e000);
+       evo_data(push, NvEvoSync);
+       evo_mthd(push, 0x00a0, 2);
+       evo_data(push, 0x00000000);
+       evo_data(push, 0x00000000);
+       evo_mthd(push, 0x00c0, 1);
+       evo_data(push, nv_fb->r_dma);
+       evo_mthd(push, 0x0110, 2);
+       evo_data(push, 0x00000000);
+       evo_data(push, 0x00000000);
+       evo_mthd(push, 0x0400, 5);
+       evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+       evo_data(push, 0);
+       evo_data(push, (fb->height << 16) | fb->width);
+       evo_data(push, nv_fb->r_pitch);
+       evo_data(push, nv_fb->r_format);
+       evo_mthd(push, 0x0080, 1);
+       evo_data(push, 0x00000000);
+       evo_kick(push, crtc->dev, evo->idx);
+
+       evo->sem.offset ^= 0x10;
+       evo->sem.value++;
+       return 0;
 }
 
 /******************************************************************************
  * CRTC
  *****************************************************************************/
 static int
-nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
+nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
 {
        struct drm_device *dev = nv_crtc->base.dev;
-       u32 *push, mode;
+       struct nouveau_connector *nv_connector;
+       struct drm_connector *connector;
+       u32 *push, mode = 0x00;
 
-       mode = 0x00000000;
-       if (on) {
-               /* 0x11: 6bpc dynamic 2x2
-                * 0x13: 8bpc dynamic 2x2
-                * 0x19: 6bpc static 2x2
-                * 0x1b: 8bpc static 2x2
-                * 0x21: 6bpc temporal
-                * 0x23: 8bpc temporal
-                */
-               mode = 0x00000011;
+       nv_connector = nouveau_crtc_connector_get(nv_crtc);
+       connector = &nv_connector->base;
+       if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+               if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+                       mode = DITHERING_MODE_DYNAMIC2X2;
+       } else {
+               mode = nv_connector->dithering_mode;
+       }
+
+       if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+               if (connector->display_info.bpc >= 8)
+                       mode |= DITHERING_DEPTH_8BPC;
+       } else {
+               mode |= nv_connector->dithering_depth;
        }
 
-       push = evo_wait(dev, 0, 4);
+       push = evo_wait(dev, EVO_MASTER, 4);
        if (push) {
                evo_mthd(push, 0x0490 + (nv_crtc->index * 0x300), 1);
                evo_data(push, mode);
@@ -132,63 +390,98 @@ nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
                        evo_mthd(push, 0x0080, 1);
                        evo_data(push, 0x00000000);
                }
-               evo_kick(push, dev, 0);
+               evo_kick(push, dev, EVO_MASTER);
        }
 
        return 0;
 }
 
 static int
-nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, int type, bool update)
+nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
 {
-       struct drm_display_mode *mode = &nv_crtc->base.mode;
+       struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
        struct drm_device *dev = nv_crtc->base.dev;
+       struct drm_crtc *crtc = &nv_crtc->base;
        struct nouveau_connector *nv_connector;
-       u32 *push, outX, outY;
-
-       outX = mode->hdisplay;
-       outY = mode->vdisplay;
+       int mode = DRM_MODE_SCALE_NONE;
+       u32 oX, oY, *push;
 
+       /* start off at the resolution we programmed the crtc for, this
+        * effectively handles NONE/FULL scaling
+        */
        nv_connector = nouveau_crtc_connector_get(nv_crtc);
-       if (nv_connector && nv_connector->native_mode) {
-               struct drm_display_mode *native = nv_connector->native_mode;
-               u32 xratio = (native->hdisplay << 19) / mode->hdisplay;
-               u32 yratio = (native->vdisplay << 19) / mode->vdisplay;
-
-               switch (type) {
-               case DRM_MODE_SCALE_ASPECT:
-                       if (xratio > yratio) {
-                               outX = (mode->hdisplay * yratio) >> 19;
-                               outY = (mode->vdisplay * yratio) >> 19;
-                       } else {
-                               outX = (mode->hdisplay * xratio) >> 19;
-                               outY = (mode->vdisplay * xratio) >> 19;
-                       }
-                       break;
-               case DRM_MODE_SCALE_FULLSCREEN:
-                       outX = native->hdisplay;
-                       outY = native->vdisplay;
-                       break;
-               default:
-                       break;
+       if (nv_connector && nv_connector->native_mode)
+               mode = nv_connector->scaling_mode;
+
+       if (mode != DRM_MODE_SCALE_NONE)
+               omode = nv_connector->native_mode;
+       else
+               omode = umode;
+
+       oX = omode->hdisplay;
+       oY = omode->vdisplay;
+       if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+               oY *= 2;
+
+       /* add overscan compensation if necessary, will keep the aspect
+        * ratio the same as the backend mode unless overridden by the
+        * user setting both hborder and vborder properties.
+        */
+       if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+                            (nv_connector->underscan == UNDERSCAN_AUTO &&
+                             nv_connector->edid &&
+                             drm_detect_hdmi_monitor(nv_connector->edid)))) {
+               u32 bX = nv_connector->underscan_hborder;
+               u32 bY = nv_connector->underscan_vborder;
+               u32 aspect = (oY << 19) / oX;
+
+               if (bX) {
+                       oX -= (bX * 2);
+                       if (bY) oY -= (bY * 2);
+                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
+               } else {
+                       oX -= (oX >> 4) + 32;
+                       if (bY) oY -= (bY * 2);
+                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
+               }
+       }
+
+       /* handle CENTER/ASPECT scaling, taking into account the areas
+        * removed already for overscan compensation
+        */
+       switch (mode) {
+       case DRM_MODE_SCALE_CENTER:
+               oX = min((u32)umode->hdisplay, oX);
+               oY = min((u32)umode->vdisplay, oY);
+               /* fall-through */
+       case DRM_MODE_SCALE_ASPECT:
+               if (oY < oX) {
+                       u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+                       oX = ((oY * aspect) + (aspect / 2)) >> 19;
+               } else {
+                       u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+                       oY = ((oX * aspect) + (aspect / 2)) >> 19;
                }
+               break;
+       default:
+               break;
        }
 
-       push = evo_wait(dev, 0, 16);
+       push = evo_wait(dev, EVO_MASTER, 8);
        if (push) {
                evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
-               evo_data(push, (outY << 16) | outX);
-               evo_data(push, (outY << 16) | outX);
-               evo_data(push, (outY << 16) | outX);
+               evo_data(push, (oY << 16) | oX);
+               evo_data(push, (oY << 16) | oX);
+               evo_data(push, (oY << 16) | oX);
                evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
                evo_data(push, 0x00000000);
                evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, (mode->vdisplay << 16) | mode->hdisplay);
+               evo_data(push, (umode->vdisplay << 16) | umode->hdisplay);
+               evo_kick(push, dev, EVO_MASTER);
                if (update) {
-                       evo_mthd(push, 0x0080, 1);
-                       evo_data(push, 0x00000000);
+                       nvd0_display_flip_stop(crtc);
+                       nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
                }
-               evo_kick(push, dev, 0);
        }
 
        return 0;
@@ -201,7 +494,7 @@ nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
        struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
        u32 *push;
 
-       push = evo_wait(fb->dev, 0, 16);
+       push = evo_wait(fb->dev, EVO_MASTER, 16);
        if (push) {
                evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
                evo_data(push, nvfb->nvbo->bo.offset >> 8);
@@ -216,7 +509,7 @@ nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
                        evo_mthd(push, 0x0080, 1);
                        evo_data(push, 0x00000000);
                }
-               evo_kick(push, fb->dev, 0);
+               evo_kick(push, fb->dev, EVO_MASTER);
        }
 
        nv_crtc->fb.tile_flags = nvfb->r_dma;
@@ -227,7 +520,7 @@ static void
 nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
 {
        struct drm_device *dev = nv_crtc->base.dev;
-       u32 *push = evo_wait(dev, 0, 16);
+       u32 *push = evo_wait(dev, EVO_MASTER, 16);
        if (push) {
                if (show) {
                        evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
@@ -247,7 +540,7 @@ nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
                        evo_data(push, 0x00000000);
                }
 
-               evo_kick(push, dev, 0);
+               evo_kick(push, dev, EVO_MASTER);
        }
 }
 
@@ -262,7 +555,9 @@ nvd0_crtc_prepare(struct drm_crtc *crtc)
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
        u32 *push;
 
-       push = evo_wait(crtc->dev, 0, 2);
+       nvd0_display_flip_stop(crtc);
+
+       push = evo_wait(crtc->dev, EVO_MASTER, 2);
        if (push) {
                evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
                evo_data(push, 0x00000000);
@@ -270,7 +565,7 @@ nvd0_crtc_prepare(struct drm_crtc *crtc)
                evo_data(push, 0x03000000);
                evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
                evo_data(push, 0x00000000);
-               evo_kick(push, crtc->dev, 0);
+               evo_kick(push, crtc->dev, EVO_MASTER);
        }
 
        nvd0_crtc_cursor_show(nv_crtc, false, false);
@@ -282,7 +577,7 @@ nvd0_crtc_commit(struct drm_crtc *crtc)
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
        u32 *push;
 
-       push = evo_wait(crtc->dev, 0, 32);
+       push = evo_wait(crtc->dev, EVO_MASTER, 32);
        if (push) {
                evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
                evo_data(push, nv_crtc->fb.tile_flags);
@@ -295,10 +590,11 @@ nvd0_crtc_commit(struct drm_crtc *crtc)
                evo_data(push, NvEvoVRAM);
                evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
                evo_data(push, 0xffffff00);
-               evo_kick(push, crtc->dev, 0);
+               evo_kick(push, crtc->dev, EVO_MASTER);
        }
 
-       nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
+       nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, false);
+       nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
 }
 
 static bool
@@ -333,21 +629,35 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
 {
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
        struct nouveau_connector *nv_connector;
-       u32 htotal = mode->htotal;
-       u32 vtotal = mode->vtotal;
-       u32 hsyncw = mode->hsync_end - mode->hsync_start - 1;
-       u32 vsyncw = mode->vsync_end - mode->vsync_start - 1;
-       u32 hfrntp = mode->hsync_start - mode->hdisplay;
-       u32 vfrntp = mode->vsync_start - mode->vdisplay;
-       u32 hbackp = mode->htotal - mode->hsync_end;
-       u32 vbackp = mode->vtotal - mode->vsync_end;
-       u32 hss2be = hsyncw + hbackp;
-       u32 vss2be = vsyncw + vbackp;
-       u32 hss2de = htotal - hfrntp;
-       u32 vss2de = vtotal - vfrntp;
+       u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+       u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+       u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+       u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+       u32 vblan2e = 0, vblan2s = 1;
+       u32 magic = 0x31ec6000;
        u32 syncs, *push;
        int ret;
 
+       hactive = mode->htotal;
+       hsynce  = mode->hsync_end - mode->hsync_start - 1;
+       hbackp  = mode->htotal - mode->hsync_end;
+       hblanke = hsynce + hbackp;
+       hfrontp = mode->hsync_start - mode->hdisplay;
+       hblanks = mode->htotal - hfrontp - 1;
+
+       vactive = mode->vtotal * vscan / ilace;
+       vsynce  = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+       vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+       vblanke = vsynce + vbackp;
+       vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+       vblanks = vactive - vfrontp - 1;
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+               vblan2e = vactive + vsynce + vbackp;
+               vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+               vactive = (vactive * 2) + 1;
+               magic  |= 0x00000001;
+       }
+
        syncs = 0x00000001;
        if (mode->flags & DRM_MODE_FLAG_NHSYNC)
                syncs |= 0x00000008;
@@ -358,28 +668,33 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
        if (ret)
                return ret;
 
-       push = evo_wait(crtc->dev, 0, 64);
+       push = evo_wait(crtc->dev, EVO_MASTER, 64);
        if (push) {
-               evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 5);
+               evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
                evo_data(push, 0x00000000);
-               evo_data(push, (vtotal << 16) | htotal);
-               evo_data(push, (vsyncw << 16) | hsyncw);
-               evo_data(push, (vss2be << 16) | hss2be);
-               evo_data(push, (vss2de << 16) | hss2de);
+               evo_data(push, (vactive << 16) | hactive);
+               evo_data(push, ( vsynce << 16) | hsynce);
+               evo_data(push, (vblanke << 16) | hblanke);
+               evo_data(push, (vblanks << 16) | hblanks);
+               evo_data(push, (vblan2e << 16) | vblan2s);
                evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
                evo_data(push, 0x00000000); /* ??? */
                evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
                evo_data(push, mode->clock * 1000);
                evo_data(push, 0x00200000); /* ??? */
                evo_data(push, mode->clock * 1000);
-               evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 1);
+               evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
                evo_data(push, syncs);
-               evo_kick(push, crtc->dev, 0);
+               evo_data(push, magic);
+               evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
+               evo_data(push, 0x00000311);
+               evo_data(push, 0x00000100);
+               evo_kick(push, crtc->dev, EVO_MASTER);
        }
 
        nv_connector = nouveau_crtc_connector_get(nv_crtc);
-       nvd0_crtc_set_dither(nv_crtc, nv_connector->use_dithering, false);
-       nvd0_crtc_set_scale(nv_crtc, nv_connector->scaling_mode, false);
+       nvd0_crtc_set_dither(nv_crtc, false);
+       nvd0_crtc_set_scale(nv_crtc, false);
        nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
        return 0;
 }
@@ -400,7 +715,9 @@ nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
        if (ret)
                return ret;
 
+       nvd0_display_flip_stop(crtc);
        nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
+       nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
        return 0;
 }
 
@@ -410,6 +727,7 @@ nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
                               enum mode_set_atomic state)
 {
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       nvd0_display_flip_stop(crtc);
        nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
        return 0;
 }
@@ -472,10 +790,10 @@ static int
 nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 {
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       const u32 data = (y << 16) | x;
+       int ch = EVO_CURS(nv_crtc->index);
 
-       nv_wr32(crtc->dev, 0x64d084 + (nv_crtc->index * 0x1000), data);
-       nv_wr32(crtc->dev, 0x64d080 + (nv_crtc->index * 0x1000), 0x00000000);
+       evo_piow(crtc->dev, ch, 0x0084, (y << 16) | x);
+       evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
        return 0;
 }
 
@@ -525,6 +843,7 @@ static const struct drm_crtc_funcs nvd0_crtc_func = {
        .gamma_set = nvd0_crtc_gamma_set,
        .set_config = drm_crtc_helper_set_config,
        .destroy = nvd0_crtc_destroy,
+       .page_flip = nouveau_crtc_page_flip,
 };
 
 static void
@@ -659,12 +978,12 @@ nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 
        nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
 
-       push = evo_wait(encoder->dev, 0, 4);
+       push = evo_wait(encoder->dev, EVO_MASTER, 4);
        if (push) {
                evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 2);
                evo_data(push, 1 << nv_crtc->index);
                evo_data(push, 0x00ff);
-               evo_kick(push, encoder->dev, 0);
+               evo_kick(push, encoder->dev, EVO_MASTER);
        }
 
        nv_encoder->crtc = encoder->crtc;
@@ -680,13 +999,13 @@ nvd0_dac_disconnect(struct drm_encoder *encoder)
        if (nv_encoder->crtc) {
                nvd0_crtc_prepare(nv_encoder->crtc);
 
-               push = evo_wait(dev, 0, 4);
+               push = evo_wait(dev, EVO_MASTER, 4);
                if (push) {
                        evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
                        evo_data(push, 0x00000000);
                        evo_mthd(push, 0x0080, 1);
                        evo_data(push, 0x00000000);
-                       evo_kick(push, dev, 0);
+                       evo_kick(push, dev, EVO_MASTER);
                }
 
                nv_encoder->crtc = NULL;
@@ -759,6 +1078,108 @@ nvd0_dac_create(struct drm_connector *connector, struct dcb_entry *dcbe)
        return 0;
 }
 
+/******************************************************************************
+ * Audio
+ *****************************************************************************/
+static void
+nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_connector *nv_connector;
+       struct drm_device *dev = encoder->dev;
+       int i, or = nv_encoder->or * 0x30;
+
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       if (!drm_detect_monitor_audio(nv_connector->edid))
+               return;
+
+       nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000001);
+
+       drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+       if (nv_connector->base.eld[0]) {
+               u8 *eld = nv_connector->base.eld;
+
+               for (i = 0; i < eld[2] * 4; i++)
+                       nv_wr32(dev, 0x10ec00 + or, (i << 8) | eld[i]);
+               for (i = eld[2] * 4; i < 0x60; i++)
+                       nv_wr32(dev, 0x10ec00 + or, (i << 8) | 0x00);
+
+               nv_mask(dev, 0x10ec10 + or, 0x80000002, 0x80000002);
+       }
+}
+
+static void
+nvd0_audio_disconnect(struct drm_encoder *encoder)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct drm_device *dev = encoder->dev;
+       int or = nv_encoder->or * 0x30;
+
+       nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000000);
+}
+
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
+static void
+nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+       struct nouveau_connector *nv_connector;
+       struct drm_device *dev = encoder->dev;
+       int head = nv_crtc->index * 0x800;
+       u32 rekey = 56; /* binary driver, and tegra constant */
+       u32 max_ac_packet;
+
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       if (!drm_detect_hdmi_monitor(nv_connector->edid))
+               return;
+
+       max_ac_packet  = mode->htotal - mode->hdisplay;
+       max_ac_packet -= rekey;
+       max_ac_packet -= 18; /* constant from tegra */
+       max_ac_packet /= 32;
+
+       /* AVI InfoFrame */
+       nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
+       nv_wr32(dev, 0x61671c + head, 0x000d0282);
+       nv_wr32(dev, 0x616720 + head, 0x0000006f);
+       nv_wr32(dev, 0x616724 + head, 0x00000000);
+       nv_wr32(dev, 0x616728 + head, 0x00000000);
+       nv_wr32(dev, 0x61672c + head, 0x00000000);
+       nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000001);
+
+       /* ??? InfoFrame? */
+       nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
+       nv_wr32(dev, 0x6167ac + head, 0x00000010);
+       nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000001);
+
+       /* HDMI_CTRL */
+       nv_mask(dev, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
+                                                 max_ac_packet << 16);
+
+       /* NFI, audio doesn't work without it though.. */
+       nv_mask(dev, 0x616548 + head, 0x00000070, 0x00000000);
+
+       nvd0_audio_mode_set(encoder, mode);
+}
+
+static void
+nvd0_hdmi_disconnect(struct drm_encoder *encoder)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+       struct drm_device *dev = encoder->dev;
+       int head = nv_crtc->index * 0x800;
+
+       nvd0_audio_disconnect(encoder);
+
+       nv_mask(dev, 0x616798 + head, 0x40000000, 0x00000000);
+       nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
+       nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
+}
+
 /******************************************************************************
  * SOR
  *****************************************************************************/
@@ -829,7 +1250,8 @@ static void
 nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
                  struct drm_display_mode *mode)
 {
-       struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+       struct drm_device *dev = encoder->dev;
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
        struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
        struct nouveau_connector *nv_connector;
@@ -852,6 +1274,8 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
                or_config = (mode_ctrl & 0x00000f00) >> 8;
                if (mode->clock >= 165000)
                        or_config |= 0x0100;
+
+               nvd0_hdmi_mode_set(encoder, mode);
                break;
        case OUTPUT_LVDS:
                or_config = (mode_ctrl & 0x00000f00) >> 8;
@@ -861,7 +1285,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
                        if (bios->fp.if_is_24bit)
                                or_config |= 0x0200;
                } else {
-                       if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
+                       if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
                                if (((u8 *)nv_connector->edid)[121] == 2)
                                        or_config |= 0x0100;
                        } else
@@ -889,12 +1313,12 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
 
        nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
 
-       push = evo_wait(encoder->dev, 0, 4);
+       push = evo_wait(dev, EVO_MASTER, 4);
        if (push) {
                evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 2);
                evo_data(push, mode_ctrl);
                evo_data(push, or_config);
-               evo_kick(push, encoder->dev, 0);
+               evo_kick(push, dev, EVO_MASTER);
        }
 
        nv_encoder->crtc = encoder->crtc;
@@ -910,15 +1334,17 @@ nvd0_sor_disconnect(struct drm_encoder *encoder)
        if (nv_encoder->crtc) {
                nvd0_crtc_prepare(nv_encoder->crtc);
 
-               push = evo_wait(dev, 0, 4);
+               push = evo_wait(dev, EVO_MASTER, 4);
                if (push) {
                        evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
                        evo_data(push, 0x00000000);
                        evo_mthd(push, 0x0080, 1);
                        evo_data(push, 0x00000000);
-                       evo_kick(push, dev, 0);
+                       evo_kick(push, dev, EVO_MASTER);
                }
 
+               nvd0_hdmi_disconnect(encoder);
+
                nv_encoder->crtc = NULL;
                nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
        }
@@ -1159,6 +1585,12 @@ nvd0_display_intr(struct drm_device *dev)
        struct nvd0_display *disp = nvd0_display(dev);
        u32 intr = nv_rd32(dev, 0x610088);
 
+       if (intr & 0x00000001) {
+               u32 stat = nv_rd32(dev, 0x61008c);
+               nv_wr32(dev, 0x61008c, stat);
+               intr &= ~0x00000001;
+       }
+
        if (intr & 0x00000002) {
                u32 stat = nv_rd32(dev, 0x61009c);
                int chid = ffs(stat) - 1;
@@ -1215,38 +1647,29 @@ nvd0_display_intr(struct drm_device *dev)
 /******************************************************************************
  * Init
  *****************************************************************************/
-static void
+void
 nvd0_display_fini(struct drm_device *dev)
 {
        int i;
 
-       /* fini cursors */
-       for (i = 14; i >= 13; i--) {
-               if (!(nv_rd32(dev, 0x610490 + (i * 0x10)) & 0x00000001))
-                       continue;
-
-               nv_mask(dev, 0x610490 + (i * 0x10), 0x00000001, 0x00000000);
-               nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00000000);
-               nv_mask(dev, 0x610090, 1 << i, 0x00000000);
-               nv_mask(dev, 0x6100a0, 1 << i, 0x00000000);
+       /* fini cursors + overlays + flips */
+       for (i = 1; i >= 0; i--) {
+               evo_fini_pio(dev, EVO_CURS(i));
+               evo_fini_pio(dev, EVO_OIMM(i));
+               evo_fini_dma(dev, EVO_OVLY(i));
+               evo_fini_dma(dev, EVO_FLIP(i));
        }
 
        /* fini master */
-       if (nv_rd32(dev, 0x610490) & 0x00000010) {
-               nv_mask(dev, 0x610490, 0x00000010, 0x00000000);
-               nv_mask(dev, 0x610490, 0x00000003, 0x00000000);
-               nv_wait(dev, 0x610490, 0x80000000, 0x00000000);
-               nv_mask(dev, 0x610090, 0x00000001, 0x00000000);
-               nv_mask(dev, 0x6100a0, 0x00000001, 0x00000000);
-       }
+       evo_fini_dma(dev, EVO_MASTER);
 }
 
 int
 nvd0_display_init(struct drm_device *dev)
 {
        struct nvd0_display *disp = nvd0_display(dev);
+       int ret, i;
        u32 *push;
-       int i;
 
        if (nv_rd32(dev, 0x6100ac) & 0x00000100) {
                nv_wr32(dev, 0x6100ac, 0x00000100);
@@ -1271,7 +1694,7 @@ nvd0_display_init(struct drm_device *dev)
                nv_wr32(dev, 0x6301c4 + (i * 0x800), sor);
        }
 
-       for (i = 0; i < 2; i++) {
+       for (i = 0; i < dev->mode_config.num_crtc; i++) {
                u32 crtc0 = nv_rd32(dev, 0x616104 + (i * 0x800));
                u32 crtc1 = nv_rd32(dev, 0x616108 + (i * 0x800));
                u32 crtc2 = nv_rd32(dev, 0x61610c + (i * 0x800));
@@ -1285,36 +1708,24 @@ nvd0_display_init(struct drm_device *dev)
        nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307);
 
        /* init master */
-       nv_wr32(dev, 0x610494, (disp->evo[0].handle >> 8) | 3);
-       nv_wr32(dev, 0x610498, 0x00010000);
-       nv_wr32(dev, 0x61049c, 0x00000001);
-       nv_mask(dev, 0x610490, 0x00000010, 0x00000010);
-       nv_wr32(dev, 0x640000, 0x00000000);
-       nv_wr32(dev, 0x610490, 0x01000013);
-       if (!nv_wait(dev, 0x610490, 0x80000000, 0x00000000)) {
-               NV_ERROR(dev, "PDISP: master 0x%08x\n",
-                        nv_rd32(dev, 0x610490));
-               return -EBUSY;
+       ret = evo_init_dma(dev, EVO_MASTER);
+       if (ret)
+               goto error;
+
+       /* init flips + overlays + cursors */
+       for (i = 0; i < dev->mode_config.num_crtc; i++) {
+               if ((ret = evo_init_dma(dev, EVO_FLIP(i))) ||
+                   (ret = evo_init_dma(dev, EVO_OVLY(i))) ||
+                   (ret = evo_init_pio(dev, EVO_OIMM(i))) ||
+                   (ret = evo_init_pio(dev, EVO_CURS(i))))
+                       goto error;
        }
-       nv_mask(dev, 0x610090, 0x00000001, 0x00000001);
-       nv_mask(dev, 0x6100a0, 0x00000001, 0x00000001);
 
-       /* init cursors */
-       for (i = 13; i <= 14; i++) {
-               nv_wr32(dev, 0x610490 + (i * 0x10), 0x00000001);
-               if (!nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00010000)) {
-                       NV_ERROR(dev, "PDISP: curs%d 0x%08x\n", i,
-                                nv_rd32(dev, 0x610490 + (i * 0x10)));
-                       return -EBUSY;
-               }
-
-               nv_mask(dev, 0x610090, 1 << i, 1 << i);
-               nv_mask(dev, 0x6100a0, 1 << i, 1 << i);
+       push = evo_wait(dev, EVO_MASTER, 32);
+       if (!push) {
+               ret = -EBUSY;
+               goto error;
        }
-
-       push = evo_wait(dev, 0, 32);
-       if (!push)
-               return -EBUSY;
        evo_mthd(push, 0x0088, 1);
        evo_data(push, NvEvoSync);
        evo_mthd(push, 0x0084, 1);
@@ -1323,9 +1734,12 @@ nvd0_display_init(struct drm_device *dev)
        evo_data(push, 0x80000000);
        evo_mthd(push, 0x008c, 1);
        evo_data(push, 0x00000000);
-       evo_kick(push, dev, 0);
+       evo_kick(push, dev, EVO_MASTER);
 
-       return 0;
+error:
+       if (ret)
+               nvd0_display_fini(dev);
+       return ret;
 }
 
 void
@@ -1334,11 +1748,16 @@ nvd0_display_destroy(struct drm_device *dev)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nvd0_display *disp = nvd0_display(dev);
        struct pci_dev *pdev = dev->pdev;
+       int i;
 
-       nvd0_display_fini(dev);
+       for (i = 0; i < EVO_DMA_NR; i++) {
+               struct evo *evo = &disp->evo[i];
+               pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle);
+       }
 
-       pci_free_consistent(pdev, PAGE_SIZE, disp->evo[0].ptr, disp->evo[0].handle);
        nouveau_gpuobj_ref(NULL, &disp->mem);
+       nouveau_bo_unmap(disp->sync);
+       nouveau_bo_ref(NULL, &disp->sync);
        nouveau_irq_unregister(dev, 26);
 
        dev_priv->engine.display.priv = NULL;
@@ -1410,61 +1829,83 @@ nvd0_display_create(struct drm_device *dev)
        tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
        nouveau_irq_register(dev, 26, nvd0_display_intr);
 
+       /* small shared memory area we use for notifiers and semaphores */
+       ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+                            0, 0x0000, &disp->sync);
+       if (!ret) {
+               ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
+               if (!ret)
+                       ret = nouveau_bo_map(disp->sync);
+               if (ret)
+                       nouveau_bo_ref(NULL, &disp->sync);
+       }
+
+       if (ret)
+               goto out;
+
        /* hash table and dma objects for the memory areas we care about */
        ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000,
                                 NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
        if (ret)
                goto out;
 
-       nv_wo32(disp->mem, 0x1000, 0x00000049);
-       nv_wo32(disp->mem, 0x1004, (disp->mem->vinst + 0x2000) >> 8);
-       nv_wo32(disp->mem, 0x1008, (disp->mem->vinst + 0x2fff) >> 8);
-       nv_wo32(disp->mem, 0x100c, 0x00000000);
-       nv_wo32(disp->mem, 0x1010, 0x00000000);
-       nv_wo32(disp->mem, 0x1014, 0x00000000);
-       nv_wo32(disp->mem, 0x0000, NvEvoSync);
-       nv_wo32(disp->mem, 0x0004, (0x1000 << 9) | 0x00000001);
-
-       nv_wo32(disp->mem, 0x1020, 0x00000049);
-       nv_wo32(disp->mem, 0x1024, 0x00000000);
-       nv_wo32(disp->mem, 0x1028, (dev_priv->vram_size - 1) >> 8);
-       nv_wo32(disp->mem, 0x102c, 0x00000000);
-       nv_wo32(disp->mem, 0x1030, 0x00000000);
-       nv_wo32(disp->mem, 0x1034, 0x00000000);
-       nv_wo32(disp->mem, 0x0008, NvEvoVRAM);
-       nv_wo32(disp->mem, 0x000c, (0x1020 << 9) | 0x00000001);
-
-       nv_wo32(disp->mem, 0x1040, 0x00000009);
-       nv_wo32(disp->mem, 0x1044, 0x00000000);
-       nv_wo32(disp->mem, 0x1048, (dev_priv->vram_size - 1) >> 8);
-       nv_wo32(disp->mem, 0x104c, 0x00000000);
-       nv_wo32(disp->mem, 0x1050, 0x00000000);
-       nv_wo32(disp->mem, 0x1054, 0x00000000);
-       nv_wo32(disp->mem, 0x0010, NvEvoVRAM_LP);
-       nv_wo32(disp->mem, 0x0014, (0x1040 << 9) | 0x00000001);
-
-       nv_wo32(disp->mem, 0x1060, 0x0fe00009);
-       nv_wo32(disp->mem, 0x1064, 0x00000000);
-       nv_wo32(disp->mem, 0x1068, (dev_priv->vram_size - 1) >> 8);
-       nv_wo32(disp->mem, 0x106c, 0x00000000);
-       nv_wo32(disp->mem, 0x1070, 0x00000000);
-       nv_wo32(disp->mem, 0x1074, 0x00000000);
-       nv_wo32(disp->mem, 0x0018, NvEvoFB32);
-       nv_wo32(disp->mem, 0x001c, (0x1060 << 9) | 0x00000001);
-
-       pinstmem->flush(dev);
+       /* create evo dma channels */
+       for (i = 0; i < EVO_DMA_NR; i++) {
+               struct evo *evo = &disp->evo[i];
+               u64 offset = disp->sync->bo.offset;
+               u32 dmao = 0x1000 + (i * 0x100);
+               u32 hash = 0x0000 + (i * 0x040);
+
+               evo->idx = i;
+               evo->sem.offset = EVO_SYNC(evo->idx, 0x00);
+               evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle);
+               if (!evo->ptr) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
 
-       /* push buffers for evo channels */
-       disp->evo[0].ptr =
-               pci_alloc_consistent(pdev, PAGE_SIZE, &disp->evo[0].handle);
-       if (!disp->evo[0].ptr) {
-               ret = -ENOMEM;
-               goto out;
+               nv_wo32(disp->mem, dmao + 0x00, 0x00000049);
+               nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8);
+               nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8);
+               nv_wo32(disp->mem, dmao + 0x0c, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x10, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x14, 0x00000000);
+               nv_wo32(disp->mem, hash + 0x00, NvEvoSync);
+               nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) |
+                                               ((dmao + 0x00) << 9));
+
+               nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
+               nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x28, (dev_priv->vram_size - 1) >> 8);
+               nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
+               nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM);
+               nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) |
+                                               ((dmao + 0x20) << 9));
+
+               nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
+               nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x48, (dev_priv->vram_size - 1) >> 8);
+               nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
+               nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP);
+               nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) |
+                                               ((dmao + 0x40) << 9));
+
+               nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
+               nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x68, (dev_priv->vram_size - 1) >> 8);
+               nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
+               nv_wo32(disp->mem, hash + 0x18, NvEvoFB32);
+               nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) |
+                                               ((dmao + 0x60) << 9));
        }
 
-       ret = nvd0_display_init(dev);
-       if (ret)
-               goto out;
+       pinstmem->flush(dev);
 
 out:
        if (ret)
index 4c8796ba6dd8d3adf07f0760c436b6b55dfba7f9..6a5f4395838f712bc03bc0c70e9c34777ba37051 100644 (file)
@@ -42,6 +42,20 @@ static struct pci_device_id pciidlist[] = {
        r128_PCI_IDS
 };
 
+static const struct file_operations r128_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = r128_compat_ioctl,
+#endif
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -60,21 +74,7 @@ static struct drm_driver driver = {
        .reclaim_buffers = drm_core_reclaim_buffers,
        .ioctls = r128_ioctls,
        .dma_ioctl = r128_cce_buffers,
-       .fops = {
-               .owner = THIS_MODULE,
-               .open = drm_open,
-               .release = drm_release,
-               .unlocked_ioctl = drm_ioctl,
-               .mmap = drm_mmap,
-               .poll = drm_poll,
-               .fasync = drm_fasync,
-#ifdef CONFIG_COMPAT
-               .compat_ioctl = r128_compat_ioctl,
-#endif
-               .llseek = noop_llseek,
-       },
-
-
+       .fops = &r128_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
index cf8b4bc3e73ddfc6804beba647fb4a8b1988ccc6..2139fe893ec5621b86f4533ee7a1935477035f67 100644 (file)
@@ -70,7 +70,8 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
        r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
        r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
        evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
-       radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o
+       radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \
+       radeon_semaphore.o radeon_sa.o
 
 radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
 radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
@@ -78,4 +79,4 @@ radeon-$(CONFIG_ACPI) += radeon_acpi.o
 
 obj-$(CONFIG_DRM_RADEON)+= radeon.o
 
-CFLAGS_radeon_trace_points.o := -I$(src)
\ No newline at end of file
+CFLAGS_radeon_trace_points.o := -I$(src)
index 14cc88aaf3a757163e8a5c7f371943f945e20912..d1bd239cd9e9eef5f88ef06e4964f158a7490986 100644 (file)
@@ -665,6 +665,8 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
        SDEBUG("   count: %d\n", count);
        if (arg == ATOM_UNIT_MICROSEC)
                udelay(count);
+       else if (!drm_can_sleep())
+               mdelay(count);
        else
                msleep(count);
 }
index 2b97262e3ab14af5af32b4d85711828563450b09..0fda830ef806eb6e5c9b346586a852e818609dd1 100644 (file)
@@ -554,7 +554,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                if (encoder->crtc == crtc) {
                        radeon_encoder = to_radeon_encoder(encoder);
                        connector = radeon_get_connector_for_encoder(encoder);
-                       if (connector)
+                       if (connector && connector->display_info.bpc)
                                bpc = connector->display_info.bpc;
                        encoder_mode = atombios_get_encoder_mode(encoder);
                        if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
@@ -1184,7 +1184,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
        WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
 
-       fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+       fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
        WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
        WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
 
@@ -1353,7 +1353,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
        WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
 
-       fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+       fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
        WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
        WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
 
index 39c04c1b8472199da475db971b8b7b6078a91aad..f1f06ca9f1f533fc89179c5109a60ca9af4f5ebb 100644 (file)
@@ -409,8 +409,6 @@ int
 atombios_get_encoder_mode(struct drm_encoder *encoder)
 {
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       struct radeon_device *rdev = dev->dev_private;
        struct drm_connector *connector;
        struct radeon_connector *radeon_connector;
        struct radeon_connector_atom_dig *dig_connector;
@@ -434,13 +432,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        switch (connector->connector_type) {
        case DRM_MODE_CONNECTOR_DVII:
        case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
-               if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
-                       /* fix me */
-                       if (ASIC_IS_DCE4(rdev))
-                               return ATOM_ENCODER_MODE_DVI;
-                       else
-                               return ATOM_ENCODER_MODE_HDMI;
-               } else if (radeon_connector->use_digital)
+               if (drm_detect_monitor_audio(radeon_connector->edid) &&
+                   radeon_audio)
+                       return ATOM_ENCODER_MODE_HDMI;
+               else if (radeon_connector->use_digital)
                        return ATOM_ENCODER_MODE_DVI;
                else
                        return ATOM_ENCODER_MODE_CRT;
@@ -448,13 +443,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        case DRM_MODE_CONNECTOR_DVID:
        case DRM_MODE_CONNECTOR_HDMIA:
        default:
-               if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
-                       /* fix me */
-                       if (ASIC_IS_DCE4(rdev))
-                               return ATOM_ENCODER_MODE_DVI;
-                       else
-                               return ATOM_ENCODER_MODE_HDMI;
-               } else
+               if (drm_detect_monitor_audio(radeon_connector->edid) &&
+                   radeon_audio)
+                       return ATOM_ENCODER_MODE_HDMI;
+               else
                        return ATOM_ENCODER_MODE_DVI;
                break;
        case DRM_MODE_CONNECTOR_LVDS:
@@ -465,13 +457,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
                if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
                    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
                        return ATOM_ENCODER_MODE_DP;
-               else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
-                       /* fix me */
-                       if (ASIC_IS_DCE4(rdev))
-                               return ATOM_ENCODER_MODE_DVI;
-                       else
-                               return ATOM_ENCODER_MODE_HDMI;
-               } else
+               else if (drm_detect_monitor_audio(radeon_connector->edid) &&
+                        radeon_audio)
+                       return ATOM_ENCODER_MODE_HDMI;
+               else
                        return ATOM_ENCODER_MODE_DVI;
                break;
        case DRM_MODE_CONNECTOR_eDP:
index 92c9628c572daa2495a685b0a7898273957ffc73..636660fca8c246f9e65205fcae1ca379e180131c 100644 (file)
@@ -40,6 +40,8 @@
 static void evergreen_gpu_init(struct radeon_device *rdev);
 void evergreen_fini(struct radeon_device *rdev);
 void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+                                    int ring, u32 cp_int_cntl);
 
 void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
 {
@@ -1311,18 +1313,20 @@ void evergreen_mc_program(struct radeon_device *rdev)
  */
 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 {
+       struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+
        /* set to DX10/11 mode */
-       radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
-       radeon_ring_write(rdev, 1);
+       radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+       radeon_ring_write(ring, 1);
        /* FIXME: implement */
-       radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+       radeon_ring_write(ring,
 #ifdef __BIG_ENDIAN
                          (2 << 0) |
 #endif
                          (ib->gpu_addr & 0xFFFFFFFC));
-       radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
-       radeon_ring_write(rdev, ib->length_dw);
+       radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+       radeon_ring_write(ring, ib->length_dw);
 }
 
 
@@ -1360,71 +1364,73 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
 
 static int evergreen_cp_start(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r, i;
        uint32_t cp_me;
 
-       r = radeon_ring_lock(rdev, 7);
+       r = radeon_ring_lock(rdev, ring, 7);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                return r;
        }
-       radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
-       radeon_ring_write(rdev, 0x1);
-       radeon_ring_write(rdev, 0x0);
-       radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
-       radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+       radeon_ring_write(ring, 0x1);
+       radeon_ring_write(ring, 0x0);
+       radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
+       radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_unlock_commit(rdev, ring);
 
        cp_me = 0xff;
        WREG32(CP_ME_CNTL, cp_me);
 
-       r = radeon_ring_lock(rdev, evergreen_default_size + 19);
+       r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                return r;
        }
 
        /* setup clear context state */
-       radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
-       radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+       radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
 
        for (i = 0; i < evergreen_default_size; i++)
-               radeon_ring_write(rdev, evergreen_default_state[i]);
+               radeon_ring_write(ring, evergreen_default_state[i]);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
-       radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
+       radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
 
        /* set clear context state */
-       radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+       radeon_ring_write(ring, 0);
 
        /* SQ_VTX_BASE_VTX_LOC */
-       radeon_ring_write(rdev, 0xc0026f00);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000000);
+       radeon_ring_write(ring, 0xc0026f00);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000000);
 
        /* Clear consts */
-       radeon_ring_write(rdev, 0xc0036f00);
-       radeon_ring_write(rdev, 0x00000bc4);
-       radeon_ring_write(rdev, 0xffffffff);
-       radeon_ring_write(rdev, 0xffffffff);
-       radeon_ring_write(rdev, 0xffffffff);
+       radeon_ring_write(ring, 0xc0036f00);
+       radeon_ring_write(ring, 0x00000bc4);
+       radeon_ring_write(ring, 0xffffffff);
+       radeon_ring_write(ring, 0xffffffff);
+       radeon_ring_write(ring, 0xffffffff);
 
-       radeon_ring_write(rdev, 0xc0026900);
-       radeon_ring_write(rdev, 0x00000316);
-       radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
-       radeon_ring_write(rdev, 0x00000010); /*  */
+       radeon_ring_write(ring, 0xc0026900);
+       radeon_ring_write(ring, 0x00000316);
+       radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+       radeon_ring_write(ring, 0x00000010); /*  */
 
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_unlock_commit(rdev, ring);
 
        return 0;
 }
 
 int evergreen_cp_resume(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 tmp;
        u32 rb_bufsz;
        int r;
@@ -1442,13 +1448,13 @@ int evergreen_cp_resume(struct radeon_device *rdev)
        RREG32(GRBM_SOFT_RESET);
 
        /* Set ring buffer size */
-       rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+       rb_bufsz = drm_order(ring->ring_size / 8);
        tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
        WREG32(CP_RB_CNTL, tmp);
-       WREG32(CP_SEM_WAIT_TIMER, 0x4);
+       WREG32(CP_SEM_WAIT_TIMER, 0x0);
 
        /* Set the write pointer delay */
        WREG32(CP_RB_WPTR_DELAY, 0);
@@ -1456,8 +1462,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
-       rdev->cp.wptr = 0;
-       WREG32(CP_RB_WPTR, rdev->cp.wptr);
+       ring->wptr = 0;
+       WREG32(CP_RB_WPTR, ring->wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB_RPTR_ADDR,
@@ -1475,16 +1481,16 @@ int evergreen_cp_resume(struct radeon_device *rdev)
        mdelay(1);
        WREG32(CP_RB_CNTL, tmp);
 
-       WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+       WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
        WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 
-       rdev->cp.rptr = RREG32(CP_RB_RPTR);
+       ring->rptr = RREG32(CP_RB_RPTR);
 
        evergreen_cp_start(rdev);
-       rdev->cp.ready = true;
-       r = radeon_ring_test(rdev);
+       ring->ready = true;
+       r = radeon_ring_test(rdev, ring);
        if (r) {
-               rdev->cp.ready = false;
+               ring->ready = false;
                return r;
        }
        return 0;
@@ -2353,7 +2359,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
        return 0;
 }
 
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        u32 srbm_status;
        u32 grbm_status;
@@ -2366,19 +2372,19 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
        grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
        grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
        if (!(grbm_status & GUI_ACTIVE)) {
-               r100_gpu_lockup_update(lockup, &rdev->cp);
+               r100_gpu_lockup_update(lockup, ring);
                return false;
        }
        /* force CP activities */
-       r = radeon_ring_lock(rdev, 2);
+       r = radeon_ring_lock(rdev, ring, 2);
        if (!r) {
                /* PACKET2 NOP */
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_unlock_commit(rdev);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_unlock_commit(rdev, ring);
        }
-       rdev->cp.rptr = RREG32(CP_RB_RPTR);
-       return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+       ring->rptr = RREG32(CP_RB_RPTR);
+       return r100_gpu_cp_is_lockup(rdev, lockup, ring);
 }
 
 static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -2470,7 +2476,13 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
 {
        u32 tmp;
 
-       WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+       if (rdev->family >= CHIP_CAYMAN) {
+               cayman_cp_int_cntl_setup(rdev, 0,
+                                        CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+               cayman_cp_int_cntl_setup(rdev, 1, 0);
+               cayman_cp_int_cntl_setup(rdev, 2, 0);
+       } else
+               WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
        WREG32(GRBM_INT_CNTL, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2515,6 +2527,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
 int evergreen_irq_set(struct radeon_device *rdev)
 {
        u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+       u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
        u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
        u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
        u32 grbm_int_cntl = 0;
@@ -2539,11 +2552,28 @@ int evergreen_irq_set(struct radeon_device *rdev)
        hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
        hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
 
-       if (rdev->irq.sw_int) {
-               DRM_DEBUG("evergreen_irq_set: sw int\n");
-               cp_int_cntl |= RB_INT_ENABLE;
-               cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+       if (rdev->family >= CHIP_CAYMAN) {
+               /* enable CP interrupts on all rings */
+               if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+                       DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+                       cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+               }
+               if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
+                       DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
+                       cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
+               }
+               if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
+                       DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
+                       cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
+               }
+       } else {
+               if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+                       DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+                       cp_int_cntl |= RB_INT_ENABLE;
+                       cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+               }
        }
+
        if (rdev->irq.crtc_vblank_int[0] ||
            rdev->irq.pflip[0]) {
                DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -2603,7 +2633,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
                grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
        }
 
-       WREG32(CP_INT_CNTL, cp_int_cntl);
+       if (rdev->family >= CHIP_CAYMAN) {
+               cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
+               cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
+               cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
+       } else
+               WREG32(CP_INT_CNTL, cp_int_cntl);
        WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3018,11 +3053,24 @@ restart_ih:
                case 177: /* CP_INT in IB1 */
                case 178: /* CP_INT in IB2 */
                        DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
-                       radeon_fence_process(rdev);
+                       radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
                case 181: /* CP EOP event */
                        DRM_DEBUG("IH: CP EOP\n");
-                       radeon_fence_process(rdev);
+                       if (rdev->family >= CHIP_CAYMAN) {
+                               switch (src_data) {
+                               case 0:
+                                       radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+                                       break;
+                               case 1:
+                                       radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+                                       break;
+                               case 2:
+                                       radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+                                       break;
+                               }
+                       } else
+                               radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
                case 233: /* GUI IDLE */
                        DRM_DEBUG("IH: GUI idle\n");
@@ -3052,6 +3100,7 @@ restart_ih:
 
 static int evergreen_startup(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
 
        /* enable pcie gen2 link */
@@ -3106,6 +3155,12 @@ static int evergreen_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -3115,7 +3170,9 @@ static int evergreen_startup(struct radeon_device *rdev)
        }
        evergreen_irq_set(rdev);
 
-       r = radeon_ring_init(rdev, rdev->cp.ring_size);
+       r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+                            R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+                            0, 0xfffff, RADEON_CP_PACKET2);
        if (r)
                return r;
        r = evergreen_cp_load_microcode(rdev);
@@ -3125,6 +3182,22 @@ static int evergreen_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
+       r = r600_audio_init(rdev);
+       if (r) {
+               DRM_ERROR("radeon: audio init failed\n");
+               return r;
+       }
+
        return 0;
 }
 
@@ -3144,31 +3217,30 @@ int evergreen_resume(struct radeon_device *rdev)
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
 
+       rdev->accel_working = true;
        r = evergreen_startup(rdev);
        if (r) {
                DRM_ERROR("evergreen startup failed on resume\n");
                return r;
        }
 
-       r = r600_ib_test(rdev);
-       if (r) {
-               DRM_ERROR("radeon: failed testing IB (%d).\n", r);
-               return r;
-       }
-
        return r;
 
 }
 
 int evergreen_suspend(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+       r600_audio_fini(rdev);
        /* FIXME: we should wait for ring to be empty */
+       radeon_ib_pool_suspend(rdev);
+       r600_blit_suspend(rdev);
        r700_cp_stop(rdev);
-       rdev->cp.ready = false;
+       ring->ready = false;
        evergreen_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        evergreen_pcie_gart_disable(rdev);
-       r600_blit_suspend(rdev);
 
        return 0;
 }
@@ -3243,8 +3315,8 @@ int evergreen_init(struct radeon_device *rdev)
        if (r)
                return r;
 
-       rdev->cp.ring_obj = NULL;
-       r600_ring_init(rdev, 1024 * 1024);
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+       r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
@@ -3253,29 +3325,24 @@ int evergreen_init(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = evergreen_startup(rdev);
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r700_cp_fini(rdev);
                r600_irq_fini(rdev);
                radeon_wb_fini(rdev);
+               r100_ib_fini(rdev);
                radeon_irq_kms_fini(rdev);
                evergreen_pcie_gart_fini(rdev);
                rdev->accel_working = false;
        }
-       if (rdev->accel_working) {
-               r = radeon_ib_pool_init(rdev);
-               if (r) {
-                       DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
-                       rdev->accel_working = false;
-               }
-               r = r600_ib_test(rdev);
-               if (r) {
-                       DRM_ERROR("radeon: failed testing IB (%d).\n", r);
-                       rdev->accel_working = false;
-               }
-       }
 
        /* Don't start up if the MC ucode is missing on BTC parts.
         * The default clocks and voltages before the MC ucode
@@ -3293,15 +3360,17 @@ int evergreen_init(struct radeon_device *rdev)
 
 void evergreen_fini(struct radeon_device *rdev)
 {
+       r600_audio_fini(rdev);
        r600_blit_fini(rdev);
        r700_cp_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
-       radeon_ib_pool_fini(rdev);
+       r100_ib_fini(rdev);
        radeon_irq_kms_fini(rdev);
        evergreen_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
+       radeon_semaphore_driver_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_agp_fini(rdev);
        radeon_bo_fini(rdev);
index 914e5af84163cc38b9e03e214af13db747b7dd80..2379849515c71f7d9de4b240d159df59dba39718 100644 (file)
@@ -49,6 +49,7 @@ static void
 set_render_target(struct radeon_device *rdev, int format,
                  int w, int h, u64 gpu_addr)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 cb_color_info;
        int pitch, slice;
 
@@ -62,23 +63,23 @@ set_render_target(struct radeon_device *rdev, int format,
        pitch = (w / 8) - 1;
        slice = ((w * h) / 64) - 1;
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
-       radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
-       radeon_ring_write(rdev, gpu_addr >> 8);
-       radeon_ring_write(rdev, pitch);
-       radeon_ring_write(rdev, slice);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, cb_color_info);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
+       radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(ring, gpu_addr >> 8);
+       radeon_ring_write(ring, pitch);
+       radeon_ring_write(ring, slice);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, cb_color_info);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
 }
 
 /* emits 5dw */
@@ -87,6 +88,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
                    u32 sync_type, u32 size,
                    u64 mc_addr)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 cp_coher_size;
 
        if (size == 0xffffffff)
@@ -99,39 +101,40 @@ cp_set_surface_sync(struct radeon_device *rdev,
                 * to the RB directly. For IBs, the CP programs this as part of the
                 * surface_sync packet.
                 */
-               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-               radeon_ring_write(rdev, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
-               radeon_ring_write(rdev, 0); /* CP_COHER_CNTL2 */
+               radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+               radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
+               radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
        }
-       radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
-       radeon_ring_write(rdev, sync_type);
-       radeon_ring_write(rdev, cp_coher_size);
-       radeon_ring_write(rdev, mc_addr >> 8);
-       radeon_ring_write(rdev, 10); /* poll interval */
+       radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       radeon_ring_write(ring, sync_type);
+       radeon_ring_write(ring, cp_coher_size);
+       radeon_ring_write(ring, mc_addr >> 8);
+       radeon_ring_write(ring, 10); /* poll interval */
 }
 
 /* emits 11dw + 1 surface sync = 16dw */
 static void
 set_shaders(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u64 gpu_addr;
 
        /* VS */
        gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
-       radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
-       radeon_ring_write(rdev, gpu_addr >> 8);
-       radeon_ring_write(rdev, 2);
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
+       radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(ring, gpu_addr >> 8);
+       radeon_ring_write(ring, 2);
+       radeon_ring_write(ring, 0);
 
        /* PS */
        gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
-       radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
-       radeon_ring_write(rdev, gpu_addr >> 8);
-       radeon_ring_write(rdev, 1);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 2);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
+       radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(ring, gpu_addr >> 8);
+       radeon_ring_write(ring, 1);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 2);
 
        gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
        cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
@@ -141,6 +144,7 @@ set_shaders(struct radeon_device *rdev)
 static void
 set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
 
        /* high addr, stride */
@@ -155,16 +159,16 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
                SQ_VTCX_SEL_Z(SQ_SEL_Z) |
                SQ_VTCX_SEL_W(SQ_SEL_W);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
-       radeon_ring_write(rdev, 0x580);
-       radeon_ring_write(rdev, gpu_addr & 0xffffffff);
-       radeon_ring_write(rdev, 48 - 1); /* size */
-       radeon_ring_write(rdev, sq_vtx_constant_word2);
-       radeon_ring_write(rdev, sq_vtx_constant_word3);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
+       radeon_ring_write(ring, 0x580);
+       radeon_ring_write(ring, gpu_addr & 0xffffffff);
+       radeon_ring_write(ring, 48 - 1); /* size */
+       radeon_ring_write(ring, sq_vtx_constant_word2);
+       radeon_ring_write(ring, sq_vtx_constant_word3);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
 
        if ((rdev->family == CHIP_CEDAR) ||
            (rdev->family == CHIP_PALM) ||
@@ -185,6 +189,7 @@ set_tex_resource(struct radeon_device *rdev,
                 int format, int w, int h, int pitch,
                 u64 gpu_addr, u32 size)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 sq_tex_resource_word0, sq_tex_resource_word1;
        u32 sq_tex_resource_word4, sq_tex_resource_word7;
 
@@ -208,16 +213,16 @@ set_tex_resource(struct radeon_device *rdev,
        cp_set_surface_sync(rdev,
                            PACKET3_TC_ACTION_ENA, size, gpu_addr);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, sq_tex_resource_word0);
-       radeon_ring_write(rdev, sq_tex_resource_word1);
-       radeon_ring_write(rdev, gpu_addr >> 8);
-       radeon_ring_write(rdev, gpu_addr >> 8);
-       radeon_ring_write(rdev, sq_tex_resource_word4);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, sq_tex_resource_word7);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, sq_tex_resource_word0);
+       radeon_ring_write(ring, sq_tex_resource_word1);
+       radeon_ring_write(ring, gpu_addr >> 8);
+       radeon_ring_write(ring, gpu_addr >> 8);
+       radeon_ring_write(ring, sq_tex_resource_word4);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, sq_tex_resource_word7);
 }
 
 /* emits 12 */
@@ -225,6 +230,7 @@ static void
 set_scissors(struct radeon_device *rdev, int x1, int y1,
             int x2, int y2)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        /* workaround some hw bugs */
        if (x2 == 0)
                x1 = 1;
@@ -235,43 +241,44 @@ set_scissors(struct radeon_device *rdev, int x1, int y1,
                        x2 = 2;
        }
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
-       radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
-       radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
-       radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
+       radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
-       radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
-       radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
-       radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+       radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
-       radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
-       radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
-       radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+       radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
 }
 
 /* emits 10 */
 static void
 draw_auto(struct radeon_device *rdev)
 {
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-       radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
-       radeon_ring_write(rdev, DI_PT_RECTLIST);
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
+       radeon_ring_write(ring, DI_PT_RECTLIST);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
+       radeon_ring_write(ring,
 #ifdef __BIG_ENDIAN
                          (2 << 2) |
 #endif
                          DI_INDEX_SIZE_16_BIT);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
-       radeon_ring_write(rdev, 1);
+       radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
+       radeon_ring_write(ring, 1);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
-       radeon_ring_write(rdev, 3);
-       radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
+       radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+       radeon_ring_write(ring, 3);
+       radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
 
 }
 
@@ -279,6 +286,7 @@ draw_auto(struct radeon_device *rdev)
 static void
 set_default_state(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
        u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
        u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
@@ -292,8 +300,8 @@ set_default_state(struct radeon_device *rdev)
        int dwords;
 
        /* set clear context state */
-       radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+       radeon_ring_write(ring, 0);
 
        if (rdev->family < CHIP_CAYMAN) {
                switch (rdev->family) {
@@ -550,60 +558,60 @@ set_default_state(struct radeon_device *rdev)
                                            NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
 
                /* disable dyn gprs */
-               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-               radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
-               radeon_ring_write(rdev, 0);
+               radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+               radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
+               radeon_ring_write(ring, 0);
 
                /* setup LDS */
-               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-               radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
-               radeon_ring_write(rdev, 0x10001000);
+               radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+               radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
+               radeon_ring_write(ring, 0x10001000);
 
                /* SQ config */
-               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
-               radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
-               radeon_ring_write(rdev, sq_config);
-               radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
-               radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
-               radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
-               radeon_ring_write(rdev, 0);
-               radeon_ring_write(rdev, 0);
-               radeon_ring_write(rdev, sq_thread_resource_mgmt);
-               radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
-               radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
-               radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
-               radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
+               radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
+               radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
+               radeon_ring_write(ring, sq_config);
+               radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
+               radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
+               radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
+               radeon_ring_write(ring, 0);
+               radeon_ring_write(ring, 0);
+               radeon_ring_write(ring, sq_thread_resource_mgmt);
+               radeon_ring_write(ring, sq_thread_resource_mgmt_2);
+               radeon_ring_write(ring, sq_stack_resource_mgmt_1);
+               radeon_ring_write(ring, sq_stack_resource_mgmt_2);
+               radeon_ring_write(ring, sq_stack_resource_mgmt_3);
        }
 
        /* CONTEXT_CONTROL */
-       radeon_ring_write(rdev, 0xc0012800);
-       radeon_ring_write(rdev, 0x80000000);
-       radeon_ring_write(rdev, 0x80000000);
+       radeon_ring_write(ring, 0xc0012800);
+       radeon_ring_write(ring, 0x80000000);
+       radeon_ring_write(ring, 0x80000000);
 
        /* SQ_VTX_BASE_VTX_LOC */
-       radeon_ring_write(rdev, 0xc0026f00);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000000);
+       radeon_ring_write(ring, 0xc0026f00);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000000);
 
        /* SET_SAMPLER */
-       radeon_ring_write(rdev, 0xc0036e00);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000012);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000000);
+       radeon_ring_write(ring, 0xc0036e00);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000012);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000000);
 
        /* set to DX10/11 mode */
-       radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
-       radeon_ring_write(rdev, 1);
+       radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+       radeon_ring_write(ring, 1);
 
        /* emit an IB pointing at default state */
        dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
        gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
-       radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-       radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
-       radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
-       radeon_ring_write(rdev, dwords);
+       radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+       radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
+       radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
+       radeon_ring_write(ring, dwords);
 
 }
 
index cd4590aae154154d62906d901a63b8b368fea861..f7442e62c03f676c42afc518eba2be44400c43da 100644 (file)
@@ -520,7 +520,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                break;
        case DB_Z_INFO:
                track->db_z_info = radeon_get_ib_value(p, idx);
-               if (!p->keep_tiling_flags) {
+               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
                        r = evergreen_cs_packet_next_reloc(p, &reloc);
                        if (r) {
                                dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -649,7 +649,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case CB_COLOR7_INFO:
                tmp = (reg - CB_COLOR0_INFO) / 0x3c;
                track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
-               if (!p->keep_tiling_flags) {
+               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
                        r = evergreen_cs_packet_next_reloc(p, &reloc);
                        if (r) {
                                dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -666,7 +666,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case CB_COLOR11_INFO:
                tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
                track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
-               if (!p->keep_tiling_flags) {
+               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
                        r = evergreen_cs_packet_next_reloc(p, &reloc);
                        if (r) {
                                dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -1355,7 +1355,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                        return -EINVAL;
                                }
                                ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
-                               if (!p->keep_tiling_flags) {
+                               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
                                        ib[idx+1+(i*8)+1] |=
                                                TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
                                        if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
@@ -1572,3 +1572,241 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
        return 0;
 }
 
+/* vm parser */
+static bool evergreen_vm_reg_valid(u32 reg)
+{
+       /* context regs are fine */
+       if (reg >= 0x28000)
+               return true;
+
+       /* check config regs */
+       switch (reg) {
+       case GRBM_GFX_INDEX:
+       case VGT_VTX_VECT_EJECT_REG:
+       case VGT_CACHE_INVALIDATION:
+       case VGT_GS_VERTEX_REUSE:
+       case VGT_PRIMITIVE_TYPE:
+       case VGT_INDEX_TYPE:
+       case VGT_NUM_INDICES:
+       case VGT_NUM_INSTANCES:
+       case VGT_COMPUTE_DIM_X:
+       case VGT_COMPUTE_DIM_Y:
+       case VGT_COMPUTE_DIM_Z:
+       case VGT_COMPUTE_START_X:
+       case VGT_COMPUTE_START_Y:
+       case VGT_COMPUTE_START_Z:
+       case VGT_COMPUTE_INDEX:
+       case VGT_COMPUTE_THREAD_GROUP_SIZE:
+       case VGT_HS_OFFCHIP_PARAM:
+       case PA_CL_ENHANCE:
+       case PA_SU_LINE_STIPPLE_VALUE:
+       case PA_SC_LINE_STIPPLE_STATE:
+       case PA_SC_ENHANCE:
+       case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
+       case SQ_DYN_GPR_SIMD_LOCK_EN:
+       case SQ_CONFIG:
+       case SQ_GPR_RESOURCE_MGMT_1:
+       case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
+       case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
+       case SQ_CONST_MEM_BASE:
+       case SQ_STATIC_THREAD_MGMT_1:
+       case SQ_STATIC_THREAD_MGMT_2:
+       case SQ_STATIC_THREAD_MGMT_3:
+       case SPI_CONFIG_CNTL:
+       case SPI_CONFIG_CNTL_1:
+       case TA_CNTL_AUX:
+       case DB_DEBUG:
+       case DB_DEBUG2:
+       case DB_DEBUG3:
+       case DB_DEBUG4:
+       case DB_WATERMARKS:
+       case TD_PS_BORDER_COLOR_INDEX:
+       case TD_PS_BORDER_COLOR_RED:
+       case TD_PS_BORDER_COLOR_GREEN:
+       case TD_PS_BORDER_COLOR_BLUE:
+       case TD_PS_BORDER_COLOR_ALPHA:
+       case TD_VS_BORDER_COLOR_INDEX:
+       case TD_VS_BORDER_COLOR_RED:
+       case TD_VS_BORDER_COLOR_GREEN:
+       case TD_VS_BORDER_COLOR_BLUE:
+       case TD_VS_BORDER_COLOR_ALPHA:
+       case TD_GS_BORDER_COLOR_INDEX:
+       case TD_GS_BORDER_COLOR_RED:
+       case TD_GS_BORDER_COLOR_GREEN:
+       case TD_GS_BORDER_COLOR_BLUE:
+       case TD_GS_BORDER_COLOR_ALPHA:
+       case TD_HS_BORDER_COLOR_INDEX:
+       case TD_HS_BORDER_COLOR_RED:
+       case TD_HS_BORDER_COLOR_GREEN:
+       case TD_HS_BORDER_COLOR_BLUE:
+       case TD_HS_BORDER_COLOR_ALPHA:
+       case TD_LS_BORDER_COLOR_INDEX:
+       case TD_LS_BORDER_COLOR_RED:
+       case TD_LS_BORDER_COLOR_GREEN:
+       case TD_LS_BORDER_COLOR_BLUE:
+       case TD_LS_BORDER_COLOR_ALPHA:
+       case TD_CS_BORDER_COLOR_INDEX:
+       case TD_CS_BORDER_COLOR_RED:
+       case TD_CS_BORDER_COLOR_GREEN:
+       case TD_CS_BORDER_COLOR_BLUE:
+       case TD_CS_BORDER_COLOR_ALPHA:
+       case SQ_ESGS_RING_SIZE:
+       case SQ_GSVS_RING_SIZE:
+       case SQ_ESTMP_RING_SIZE:
+       case SQ_GSTMP_RING_SIZE:
+       case SQ_HSTMP_RING_SIZE:
+       case SQ_LSTMP_RING_SIZE:
+       case SQ_PSTMP_RING_SIZE:
+       case SQ_VSTMP_RING_SIZE:
+       case SQ_ESGS_RING_ITEMSIZE:
+       case SQ_ESTMP_RING_ITEMSIZE:
+       case SQ_GSTMP_RING_ITEMSIZE:
+       case SQ_GSVS_RING_ITEMSIZE:
+       case SQ_GS_VERT_ITEMSIZE:
+       case SQ_GS_VERT_ITEMSIZE_1:
+       case SQ_GS_VERT_ITEMSIZE_2:
+       case SQ_GS_VERT_ITEMSIZE_3:
+       case SQ_GSVS_RING_OFFSET_1:
+       case SQ_GSVS_RING_OFFSET_2:
+       case SQ_GSVS_RING_OFFSET_3:
+       case SQ_HSTMP_RING_ITEMSIZE:
+       case SQ_LSTMP_RING_ITEMSIZE:
+       case SQ_PSTMP_RING_ITEMSIZE:
+       case SQ_VSTMP_RING_ITEMSIZE:
+       case VGT_TF_RING_SIZE:
+       case SQ_ESGS_RING_BASE:
+       case SQ_GSVS_RING_BASE:
+       case SQ_ESTMP_RING_BASE:
+       case SQ_GSTMP_RING_BASE:
+       case SQ_HSTMP_RING_BASE:
+       case SQ_LSTMP_RING_BASE:
+       case SQ_PSTMP_RING_BASE:
+       case SQ_VSTMP_RING_BASE:
+       case CAYMAN_VGT_OFFCHIP_LDS_BASE:
+       case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static int evergreen_vm_packet3_check(struct radeon_device *rdev,
+                                     u32 *ib, struct radeon_cs_packet *pkt)
+{
+       u32 idx = pkt->idx + 1;
+       u32 idx_value = ib[idx];
+       u32 start_reg, end_reg, reg, i;
+
+       switch (pkt->opcode) {
+       case PACKET3_NOP:
+       case PACKET3_SET_BASE:
+       case PACKET3_CLEAR_STATE:
+       case PACKET3_INDEX_BUFFER_SIZE:
+       case PACKET3_DISPATCH_DIRECT:
+       case PACKET3_DISPATCH_INDIRECT:
+       case PACKET3_MODE_CONTROL:
+       case PACKET3_SET_PREDICATION:
+       case PACKET3_COND_EXEC:
+       case PACKET3_PRED_EXEC:
+       case PACKET3_DRAW_INDIRECT:
+       case PACKET3_DRAW_INDEX_INDIRECT:
+       case PACKET3_INDEX_BASE:
+       case PACKET3_DRAW_INDEX_2:
+       case PACKET3_CONTEXT_CONTROL:
+       case PACKET3_DRAW_INDEX_OFFSET:
+       case PACKET3_INDEX_TYPE:
+       case PACKET3_DRAW_INDEX:
+       case PACKET3_DRAW_INDEX_AUTO:
+       case PACKET3_DRAW_INDEX_IMMD:
+       case PACKET3_NUM_INSTANCES:
+       case PACKET3_DRAW_INDEX_MULTI_AUTO:
+       case PACKET3_STRMOUT_BUFFER_UPDATE:
+       case PACKET3_DRAW_INDEX_OFFSET_2:
+       case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
+       case PACKET3_MPEG_INDEX:
+       case PACKET3_WAIT_REG_MEM:
+       case PACKET3_MEM_WRITE:
+       case PACKET3_SURFACE_SYNC:
+       case PACKET3_EVENT_WRITE:
+       case PACKET3_EVENT_WRITE_EOP:
+       case PACKET3_EVENT_WRITE_EOS:
+       case PACKET3_SET_CONTEXT_REG:
+       case PACKET3_SET_BOOL_CONST:
+       case PACKET3_SET_LOOP_CONST:
+       case PACKET3_SET_RESOURCE:
+       case PACKET3_SET_SAMPLER:
+       case PACKET3_SET_CTL_CONST:
+       case PACKET3_SET_RESOURCE_OFFSET:
+       case PACKET3_SET_CONTEXT_REG_INDIRECT:
+       case PACKET3_SET_RESOURCE_INDIRECT:
+       case CAYMAN_PACKET3_DEALLOC_STATE:
+               break;
+       case PACKET3_COND_WRITE:
+               if (idx_value & 0x100) {
+                       reg = ib[idx + 5] * 4;
+                       if (!evergreen_vm_reg_valid(reg))
+                               return -EINVAL;
+               }
+               break;
+       case PACKET3_COPY_DW:
+               if (idx_value & 0x2) {
+                       reg = ib[idx + 3] * 4;
+                       if (!evergreen_vm_reg_valid(reg))
+                               return -EINVAL;
+               }
+               break;
+       case PACKET3_SET_CONFIG_REG:
+               start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+               end_reg = 4 * pkt->count + start_reg - 4;
+               if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+                   (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+                   (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+                       DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+                       return -EINVAL;
+               }
+               for (i = 0; i < pkt->count; i++) {
+                       reg = start_reg + (4 * i);
+                       if (!evergreen_vm_reg_valid(reg))
+                               return -EINVAL;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+       int ret = 0;
+       u32 idx = 0;
+       struct radeon_cs_packet pkt;
+
+       do {
+               pkt.idx = idx;
+               pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
+               pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
+               pkt.one_reg_wr = 0;
+               switch (pkt.type) {
+               case PACKET_TYPE0:
+                       dev_err(rdev->dev, "Packet0 not allowed!\n");
+                       ret = -EINVAL;
+                       break;
+               case PACKET_TYPE2:
+                       break;
+               case PACKET_TYPE3:
+                       pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+                       ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
+                       break;
+               default:
+                       dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
+                       ret = -EINVAL;
+                       break;
+               }
+               if (ret)
+                       break;
+               idx += pkt.count + 2;
+       } while (idx < ib->length_dw);
+
+       return ret;
+}
index 7d7f2155e34c305729f8487c55fc8c2c741ccc44..4215de95477e4a863c83183d90133c9d07358ee4 100644 (file)
 #define EVERGREEN_P1PLL_SS_CNTL                         0x414
 #define EVERGREEN_P2PLL_SS_CNTL                         0x454
 #       define EVERGREEN_PxPLL_SS_EN                    (1 << 12)
+
+#define EVERGREEN_AUDIO_PLL1_MUL                       0x5b0
+#define EVERGREEN_AUDIO_PLL1_DIV                       0x5b4
+#define EVERGREEN_AUDIO_PLL1_UNK                       0x5bc
+
+#define EVERGREEN_AUDIO_ENABLE                         0x5e78
+#define EVERGREEN_AUDIO_VENDOR_ID                      0x5ec0
+
 /* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
 #define EVERGREEN_GRPH_ENABLE                           0x6800
 #define EVERGREEN_GRPH_CONTROL                          0x6804
 #define EVERGREEN_DC_GPIO_HPD_EN                        0x64b8
 #define EVERGREEN_DC_GPIO_HPD_Y                         0x64bc
 
+/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
+#define EVERGREEN_HDMI_BASE                            0x7030
+
+#define EVERGREEN_HDMI_CONFIG_OFFSET                   0xf0
+
 #endif
index e00039e59a75b2bceb45ff69f63b740fd3193f40..b502216d42afdc4638362bceec578adfdf998720 100644 (file)
 #define        PA_CL_ENHANCE                                   0x8A14
 #define                CLIP_VTX_REORDER_ENA                            (1 << 0)
 #define                NUM_CLIP_SEQ(x)                                 ((x) << 1)
+#define        PA_SC_ENHANCE                                   0x8BF0
 #define PA_SC_AA_CONFIG                                        0x28C04
 #define         MSAA_NUM_SAMPLES_SHIFT                  0
 #define         MSAA_NUM_SAMPLES_MASK                   0x3
 #define        SQ_GPR_RESOURCE_MGMT_3                          0x8C0C
 #define                NUM_HS_GPRS(x)                                  ((x) << 0)
 #define                NUM_LS_GPRS(x)                                  ((x) << 16)
+#define        SQ_GLOBAL_GPR_RESOURCE_MGMT_1                   0x8C10
+#define        SQ_GLOBAL_GPR_RESOURCE_MGMT_2                   0x8C14
 #define        SQ_THREAD_RESOURCE_MGMT                         0x8C18
 #define                NUM_PS_THREADS(x)                               ((x) << 0)
 #define                NUM_VS_THREADS(x)                               ((x) << 8)
 #define                NUM_HS_STACK_ENTRIES(x)                         ((x) << 0)
 #define                NUM_LS_STACK_ENTRIES(x)                         ((x) << 16)
 #define        SQ_DYN_GPR_CNTL_PS_FLUSH_REQ                    0x8D8C
+#define        SQ_DYN_GPR_SIMD_LOCK_EN                         0x8D94
+#define        SQ_STATIC_THREAD_MGMT_1                         0x8E20
+#define        SQ_STATIC_THREAD_MGMT_2                         0x8E24
+#define        SQ_STATIC_THREAD_MGMT_3                         0x8E28
 #define        SQ_LDS_RESOURCE_MGMT                            0x8E2C
 
 #define        SQ_MS_FIFO_SIZES                                0x8CF0
 #define        PACKET3_DRAW_INDEX_MULTI_ELEMENT                0x36
 #define        PACKET3_MEM_SEMAPHORE                           0x39
 #define        PACKET3_MPEG_INDEX                              0x3A
+#define        PACKET3_COPY_DW                                 0x3B
 #define        PACKET3_WAIT_REG_MEM                            0x3C
 #define        PACKET3_MEM_WRITE                               0x3D
 #define        PACKET3_INDIRECT_BUFFER                         0x32
 #define                        SQ_TEX_VTX_VALID_TEXTURE                        0x2
 #define                        SQ_TEX_VTX_VALID_BUFFER                         0x3
 
+#define VGT_VTX_VECT_EJECT_REG                         0x88b0
+
 #define SQ_CONST_MEM_BASE                              0x8df8
 
 #define SQ_ESGS_RING_BASE                              0x8c40
 #define PA_SC_SCREEN_SCISSOR_TL                         0x28030
 #define PA_SC_GENERIC_SCISSOR_TL                        0x28240
 #define PA_SC_WINDOW_SCISSOR_TL                         0x28204
-#define VGT_PRIMITIVE_TYPE                              0x8958
 
+#define VGT_PRIMITIVE_TYPE                              0x8958
+#define VGT_INDEX_TYPE                                  0x895C
+
+#define VGT_NUM_INDICES                                 0x8970
+
+#define VGT_COMPUTE_DIM_X                               0x8990
+#define VGT_COMPUTE_DIM_Y                               0x8994
+#define VGT_COMPUTE_DIM_Z                               0x8998
+#define VGT_COMPUTE_START_X                             0x899C
+#define VGT_COMPUTE_START_Y                             0x89A0
+#define VGT_COMPUTE_START_Z                             0x89A4
+#define VGT_COMPUTE_INDEX                               0x89A8
+#define VGT_COMPUTE_THREAD_GROUP_SIZE                   0x89AC
+#define VGT_HS_OFFCHIP_PARAM                            0x89B0
+
+#define DB_DEBUG                                       0x9830
+#define DB_DEBUG2                                      0x9834
+#define DB_DEBUG3                                      0x9838
+#define DB_DEBUG4                                      0x983C
+#define DB_WATERMARKS                                  0x9854
 #define DB_DEPTH_CONTROL                               0x28800
 #define DB_DEPTH_VIEW                                  0x28008
 #define DB_HTILE_DATA_BASE                             0x28014
 #define SQ_VTX_CONSTANT_WORD6_0                         0x30018
 #define SQ_VTX_CONSTANT_WORD7_0                         0x3001c
 
+#define TD_PS_BORDER_COLOR_INDEX                        0xA400
+#define TD_PS_BORDER_COLOR_RED                          0xA404
+#define TD_PS_BORDER_COLOR_GREEN                        0xA408
+#define TD_PS_BORDER_COLOR_BLUE                         0xA40C
+#define TD_PS_BORDER_COLOR_ALPHA                        0xA410
+#define TD_VS_BORDER_COLOR_INDEX                        0xA414
+#define TD_VS_BORDER_COLOR_RED                          0xA418
+#define TD_VS_BORDER_COLOR_GREEN                        0xA41C
+#define TD_VS_BORDER_COLOR_BLUE                         0xA420
+#define TD_VS_BORDER_COLOR_ALPHA                        0xA424
+#define TD_GS_BORDER_COLOR_INDEX                        0xA428
+#define TD_GS_BORDER_COLOR_RED                          0xA42C
+#define TD_GS_BORDER_COLOR_GREEN                        0xA430
+#define TD_GS_BORDER_COLOR_BLUE                         0xA434
+#define TD_GS_BORDER_COLOR_ALPHA                        0xA438
+#define TD_HS_BORDER_COLOR_INDEX                        0xA43C
+#define TD_HS_BORDER_COLOR_RED                          0xA440
+#define TD_HS_BORDER_COLOR_GREEN                        0xA444
+#define TD_HS_BORDER_COLOR_BLUE                         0xA448
+#define TD_HS_BORDER_COLOR_ALPHA                        0xA44C
+#define TD_LS_BORDER_COLOR_INDEX                        0xA450
+#define TD_LS_BORDER_COLOR_RED                          0xA454
+#define TD_LS_BORDER_COLOR_GREEN                        0xA458
+#define TD_LS_BORDER_COLOR_BLUE                         0xA45C
+#define TD_LS_BORDER_COLOR_ALPHA                        0xA460
+#define TD_CS_BORDER_COLOR_INDEX                        0xA464
+#define TD_CS_BORDER_COLOR_RED                          0xA468
+#define TD_CS_BORDER_COLOR_GREEN                        0xA46C
+#define TD_CS_BORDER_COLOR_BLUE                         0xA470
+#define TD_CS_BORDER_COLOR_ALPHA                        0xA474
+
 /* cayman 3D regs */
-#define CAYMAN_VGT_OFFCHIP_LDS_BASE                    0x89B0
+#define CAYMAN_VGT_OFFCHIP_LDS_BASE                    0x89B4
+#define CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS                 0x8E48
 #define CAYMAN_DB_EQAA                                 0x28804
 #define CAYMAN_DB_DEPTH_INFO                           0x2803C
 #define CAYMAN_PA_SC_AA_CONFIG                         0x28BE0
index 0e579985746535d211d26bc1350bdf60e851bdb8..32113729540069f22ce31ffa18e23c7ccc290159 100644 (file)
@@ -934,7 +934,7 @@ void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
 
 int cayman_pcie_gart_enable(struct radeon_device *rdev)
 {
-       int r;
+       int i, r;
 
        if (rdev->gart.robj == NULL) {
                dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
@@ -945,9 +945,12 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
                return r;
        radeon_gart_restore(rdev);
        /* Setup TLB control */
-       WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB |
+       WREG32(MC_VM_MX_L1_TLB_CNTL,
+              (0xA << 7) |
+              ENABLE_L1_TLB |
               ENABLE_L1_FRAGMENT_PROCESSING |
               SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+              ENABLE_ADVANCED_DRIVER_MODEL |
               SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
        /* Setup L2 cache */
        WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
@@ -967,9 +970,26 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(VM_CONTEXT0_CNTL2, 0);
        WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
                                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
-       /* disable context1-7 */
+
+       WREG32(0x15D4, 0);
+       WREG32(0x15D8, 0);
+       WREG32(0x15DC, 0);
+
+       /* empty context1-7 */
+       for (i = 1; i < 8; i++) {
+               WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
+               WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0);
+               WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+                       rdev->gart.table_addr >> 12);
+       }
+
+       /* enable context1-7 */
+       WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+              (u32)(rdev->dummy_page.addr >> 12));
        WREG32(VM_CONTEXT1_CNTL2, 0);
        WREG32(VM_CONTEXT1_CNTL, 0);
+       WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+                               RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
        cayman_pcie_gart_tlb_flush(rdev);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -1006,9 +1026,69 @@ void cayman_pcie_gart_fini(struct radeon_device *rdev)
        radeon_gart_fini(rdev);
 }
 
+void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+                             int ring, u32 cp_int_cntl)
+{
+       u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
+
+       WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
+       WREG32(CP_INT_CNTL, cp_int_cntl);
+}
+
 /*
  * CP.
  */
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+                           struct radeon_fence *fence)
+{
+       struct radeon_ring *ring = &rdev->ring[fence->ring];
+       u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+       /* flush read cache over gart for this vmid */
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+       radeon_ring_write(ring, 0xFFFFFFFF);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 10); /* poll interval */
+       /* EVENT_WRITE_EOP - flush caches, send int */
+       radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+       radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+       radeon_ring_write(ring, addr & 0xffffffff);
+       radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+       radeon_ring_write(ring, fence->seq);
+       radeon_ring_write(ring, 0);
+}
+
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+       struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+
+       /* set to DX10/11 mode */
+       radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+       radeon_ring_write(ring, 1);
+       radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+       radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+                         (2 << 0) |
+#endif
+                         (ib->gpu_addr & 0xFFFFFFFC));
+       radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+       radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
+
+       /* flush read cache over gart for this vmid */
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+       radeon_ring_write(ring, ib->vm_id);
+       radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+       radeon_ring_write(ring, 0xFFFFFFFF);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 10); /* poll interval */
+}
+
 static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
 {
        if (enable)
@@ -1049,63 +1129,64 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev)
 
 static int cayman_cp_start(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r, i;
 
-       r = radeon_ring_lock(rdev, 7);
+       r = radeon_ring_lock(rdev, ring, 7);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                return r;
        }
-       radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
-       radeon_ring_write(rdev, 0x1);
-       radeon_ring_write(rdev, 0x0);
-       radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1);
-       radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+       radeon_ring_write(ring, 0x1);
+       radeon_ring_write(ring, 0x0);
+       radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
+       radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_unlock_commit(rdev, ring);
 
        cayman_cp_enable(rdev, true);
 
-       r = radeon_ring_lock(rdev, cayman_default_size + 19);
+       r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                return r;
        }
 
        /* setup clear context state */
-       radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
-       radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+       radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
 
        for (i = 0; i < cayman_default_size; i++)
-               radeon_ring_write(rdev, cayman_default_state[i]);
+               radeon_ring_write(ring, cayman_default_state[i]);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
-       radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
+       radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
 
        /* set clear context state */
-       radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+       radeon_ring_write(ring, 0);
 
        /* SQ_VTX_BASE_VTX_LOC */
-       radeon_ring_write(rdev, 0xc0026f00);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000000);
+       radeon_ring_write(ring, 0xc0026f00);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000000);
 
        /* Clear consts */
-       radeon_ring_write(rdev, 0xc0036f00);
-       radeon_ring_write(rdev, 0x00000bc4);
-       radeon_ring_write(rdev, 0xffffffff);
-       radeon_ring_write(rdev, 0xffffffff);
-       radeon_ring_write(rdev, 0xffffffff);
+       radeon_ring_write(ring, 0xc0036f00);
+       radeon_ring_write(ring, 0x00000bc4);
+       radeon_ring_write(ring, 0xffffffff);
+       radeon_ring_write(ring, 0xffffffff);
+       radeon_ring_write(ring, 0xffffffff);
 
-       radeon_ring_write(rdev, 0xc0026900);
-       radeon_ring_write(rdev, 0x00000316);
-       radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
-       radeon_ring_write(rdev, 0x00000010); /*  */
+       radeon_ring_write(ring, 0xc0026900);
+       radeon_ring_write(ring, 0x00000316);
+       radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+       radeon_ring_write(ring, 0x00000010); /*  */
 
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_unlock_commit(rdev, ring);
 
        /* XXX init other rings */
 
@@ -1115,11 +1196,12 @@ static int cayman_cp_start(struct radeon_device *rdev)
 static void cayman_cp_fini(struct radeon_device *rdev)
 {
        cayman_cp_enable(rdev, false);
-       radeon_ring_fini(rdev);
+       radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
 }
 
 int cayman_cp_resume(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring;
        u32 tmp;
        u32 rb_bufsz;
        int r;
@@ -1136,7 +1218,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
        WREG32(GRBM_SOFT_RESET, 0);
        RREG32(GRBM_SOFT_RESET);
 
-       WREG32(CP_SEM_WAIT_TIMER, 0x4);
+       WREG32(CP_SEM_WAIT_TIMER, 0x0);
 
        /* Set the write pointer delay */
        WREG32(CP_RB_WPTR_DELAY, 0);
@@ -1145,7 +1227,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
 
        /* ring 0 - compute and gfx */
        /* Set ring buffer size */
-       rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+       ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+       rb_bufsz = drm_order(ring->ring_size / 8);
        tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
@@ -1154,8 +1237,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
 
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
-       rdev->cp.wptr = 0;
-       WREG32(CP_RB0_WPTR, rdev->cp.wptr);
+       ring->wptr = 0;
+       WREG32(CP_RB0_WPTR, ring->wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1172,13 +1255,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
        mdelay(1);
        WREG32(CP_RB0_CNTL, tmp);
 
-       WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
+       WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
 
-       rdev->cp.rptr = RREG32(CP_RB0_RPTR);
+       ring->rptr = RREG32(CP_RB0_RPTR);
 
        /* ring1  - compute only */
        /* Set ring buffer size */
-       rb_bufsz = drm_order(rdev->cp1.ring_size / 8);
+       ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+       rb_bufsz = drm_order(ring->ring_size / 8);
        tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
@@ -1187,8 +1271,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
 
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
-       rdev->cp1.wptr = 0;
-       WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
+       ring->wptr = 0;
+       WREG32(CP_RB1_WPTR, ring->wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1197,13 +1281,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
        mdelay(1);
        WREG32(CP_RB1_CNTL, tmp);
 
-       WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
+       WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
 
-       rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
+       ring->rptr = RREG32(CP_RB1_RPTR);
 
        /* ring2 - compute only */
        /* Set ring buffer size */
-       rb_bufsz = drm_order(rdev->cp2.ring_size / 8);
+       ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+       rb_bufsz = drm_order(ring->ring_size / 8);
        tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
@@ -1212,8 +1297,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
 
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
-       rdev->cp2.wptr = 0;
-       WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
+       ring->wptr = 0;
+       WREG32(CP_RB2_WPTR, ring->wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1222,28 +1307,28 @@ int cayman_cp_resume(struct radeon_device *rdev)
        mdelay(1);
        WREG32(CP_RB2_CNTL, tmp);
 
-       WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
+       WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
 
-       rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
+       ring->rptr = RREG32(CP_RB2_RPTR);
 
        /* start the rings */
        cayman_cp_start(rdev);
-       rdev->cp.ready = true;
-       rdev->cp1.ready = true;
-       rdev->cp2.ready = true;
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
+       rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+       rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
        /* this only test cp0 */
-       r = radeon_ring_test(rdev);
+       r = radeon_ring_test(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
        if (r) {
-               rdev->cp.ready = false;
-               rdev->cp1.ready = false;
-               rdev->cp2.ready = false;
+               rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+               rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+               rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
                return r;
        }
 
        return 0;
 }
 
-bool cayman_gpu_is_lockup(struct radeon_device *rdev)
+bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        u32 srbm_status;
        u32 grbm_status;
@@ -1256,20 +1341,20 @@ bool cayman_gpu_is_lockup(struct radeon_device *rdev)
        grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
        grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
        if (!(grbm_status & GUI_ACTIVE)) {
-               r100_gpu_lockup_update(lockup, &rdev->cp);
+               r100_gpu_lockup_update(lockup, ring);
                return false;
        }
        /* force CP activities */
-       r = radeon_ring_lock(rdev, 2);
+       r = radeon_ring_lock(rdev, ring, 2);
        if (!r) {
                /* PACKET2 NOP */
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_unlock_commit(rdev);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_unlock_commit(rdev, ring);
        }
        /* XXX deal with CP0,1,2 */
-       rdev->cp.rptr = RREG32(CP_RB0_RPTR);
-       return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+       ring->rptr = RREG32(ring->rptr_reg);
+       return r100_gpu_cp_is_lockup(rdev, lockup, ring);
 }
 
 static int cayman_gpu_soft_reset(struct radeon_device *rdev)
@@ -1289,6 +1374,15 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
                RREG32(GRBM_STATUS_SE1));
        dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
                RREG32(SRBM_STATUS));
+       dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_ADDR   0x%08X\n",
+                RREG32(0x14F8));
+       dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
+                RREG32(0x14D8));
+       dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+                RREG32(0x14FC));
+       dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+                RREG32(0x14DC));
+
        evergreen_mc_stop(rdev, &save);
        if (evergreen_mc_wait_for_idle(rdev)) {
                dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
@@ -1319,6 +1413,7 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
        (void)RREG32(GRBM_SOFT_RESET);
        /* Wait a little for things to settle down */
        udelay(50);
+
        dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
                RREG32(GRBM_STATUS));
        dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
@@ -1338,6 +1433,7 @@ int cayman_asic_reset(struct radeon_device *rdev)
 
 static int cayman_startup(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
 
        /* enable pcie gen2 link */
@@ -1378,6 +1474,24 @@ static int cayman_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
+       r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
+       r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -1387,7 +1501,9 @@ static int cayman_startup(struct radeon_device *rdev)
        }
        evergreen_irq_set(rdev);
 
-       r = radeon_ring_init(rdev, rdev->cp.ring_size);
+       r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+                            CP_RB0_RPTR, CP_RB0_WPTR,
+                            0, 0xfffff, RADEON_CP_PACKET2);
        if (r)
                return r;
        r = cayman_cp_load_microcode(rdev);
@@ -1397,6 +1513,21 @@ static int cayman_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
+               return r;
+       }
+
+       r = radeon_vm_manager_start(rdev);
+       if (r)
+               return r;
+
        return 0;
 }
 
@@ -1411,32 +1542,26 @@ int cayman_resume(struct radeon_device *rdev)
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
 
+       rdev->accel_working = true;
        r = cayman_startup(rdev);
        if (r) {
                DRM_ERROR("cayman startup failed on resume\n");
                return r;
        }
-
-       r = r600_ib_test(rdev);
-       if (r) {
-               DRM_ERROR("radeon: failled testing IB (%d).\n", r);
-               return r;
-       }
-
        return r;
-
 }
 
 int cayman_suspend(struct radeon_device *rdev)
 {
        /* FIXME: we should wait for ring to be empty */
+       radeon_ib_pool_suspend(rdev);
+       radeon_vm_manager_suspend(rdev);
+       r600_blit_suspend(rdev);
        cayman_cp_enable(rdev, false);
-       rdev->cp.ready = false;
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
        evergreen_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        cayman_pcie_gart_disable(rdev);
-       r600_blit_suspend(rdev);
-
        return 0;
 }
 
@@ -1448,6 +1573,7 @@ int cayman_suspend(struct radeon_device *rdev)
  */
 int cayman_init(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
 
        /* This don't do much */
@@ -1500,8 +1626,8 @@ int cayman_init(struct radeon_device *rdev)
        if (r)
                return r;
 
-       rdev->cp.ring_obj = NULL;
-       r600_ring_init(rdev, 1024 * 1024);
+       ring->ring_obj = NULL;
+       r600_ring_init(rdev, ring, 1024 * 1024);
 
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
@@ -1510,29 +1636,29 @@ int cayman_init(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+       r = radeon_vm_manager_init(rdev);
+       if (r) {
+               dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
+       }
+
        r = cayman_startup(rdev);
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                cayman_cp_fini(rdev);
                r600_irq_fini(rdev);
                radeon_wb_fini(rdev);
+               r100_ib_fini(rdev);
+               radeon_vm_manager_fini(rdev);
                radeon_irq_kms_fini(rdev);
                cayman_pcie_gart_fini(rdev);
                rdev->accel_working = false;
        }
-       if (rdev->accel_working) {
-               r = radeon_ib_pool_init(rdev);
-               if (r) {
-                       DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
-                       rdev->accel_working = false;
-               }
-               r = r600_ib_test(rdev);
-               if (r) {
-                       DRM_ERROR("radeon: failed testing IB (%d).\n", r);
-                       rdev->accel_working = false;
-               }
-       }
 
        /* Don't start up if the MC ucode is missing.
         * The default clocks and voltages before the MC ucode
@@ -1552,11 +1678,13 @@ void cayman_fini(struct radeon_device *rdev)
        cayman_cp_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
-       radeon_ib_pool_fini(rdev);
+       radeon_vm_manager_fini(rdev);
+       r100_ib_fini(rdev);
        radeon_irq_kms_fini(rdev);
        cayman_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
+       radeon_semaphore_driver_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_bo_fini(rdev);
        radeon_atombios_fini(rdev);
@@ -1564,3 +1692,84 @@ void cayman_fini(struct radeon_device *rdev)
        rdev->bios = NULL;
 }
 
+/*
+ * vm
+ */
+int cayman_vm_init(struct radeon_device *rdev)
+{
+       /* number of VMs */
+       rdev->vm_manager.nvm = 8;
+       /* base offset of vram pages */
+       rdev->vm_manager.vram_base_offset = 0;
+       return 0;
+}
+
+void cayman_vm_fini(struct radeon_device *rdev)
+{
+}
+
+int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id)
+{
+       WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
+       /* flush hdp cache */
+       WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+       /* bits 0-7 are the VM contexts0-7 */
+       WREG32(VM_INVALIDATE_REQUEST, 1 << id);
+       return 0;
+}
+
+void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+       WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
+       /* flush hdp cache */
+       WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+       /* bits 0-7 are the VM contexts0-7 */
+       WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
+}
+
+void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+       if (vm->id == -1)
+               return;
+
+       /* flush hdp cache */
+       WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+       /* bits 0-7 are the VM contexts0-7 */
+       WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
+}
+
+#define R600_PTE_VALID     (1 << 0)
+#define R600_PTE_SYSTEM    (1 << 1)
+#define R600_PTE_SNOOPED   (1 << 2)
+#define R600_PTE_READABLE  (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
+                             struct radeon_vm *vm,
+                             uint32_t flags)
+{
+       uint32_t r600_flags = 0;
+
+       r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+       r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+       r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+       if (flags & RADEON_VM_PAGE_SYSTEM) {
+               r600_flags |= R600_PTE_SYSTEM;
+               r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+       }
+       return r600_flags;
+}
+
+void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
+                       unsigned pfn, uint64_t addr, uint32_t flags)
+{
+       void __iomem *ptr = (void *)vm->pt;
+
+       addr = addr & 0xFFFFFFFFFFFFF000ULL;
+       addr |= flags;
+       writeq(addr, ptr + (pfn * 8));
+}
index 4672869cdb265f0d4f0bd5b58ea99ee5afa761ec..f9df2a645e79caeeab62396e71222b89026e6f8e 100644 (file)
@@ -42,6 +42,9 @@
 #define CAYMAN_MAX_TCC_MASK          0xFF
 
 #define DMIF_ADDR_CONFIG                               0xBD4
+#define        SRBM_GFX_CNTL                                   0x0E44
+#define                RINGID(x)                                       (((x) & 0x3) << 0)
+#define                VMID(x)                                         (((x) & 0x7) << 0)
 #define        SRBM_STATUS                                     0x0E50
 
 #define VM_CONTEXT0_REQUEST_RESPONSE                   0x1470
 #define        SCRATCH_UMSK                                    0x8540
 #define        SCRATCH_ADDR                                    0x8544
 #define        CP_SEM_WAIT_TIMER                               0x85BC
+#define        CP_COHER_CNTL2                                  0x85E8
 #define CP_ME_CNTL                                     0x86D8
 #define                CP_ME_HALT                                      (1 << 28)
 #define                CP_PFP_HALT                                     (1 << 26)
 #define        CP_RB0_RPTR_ADDR                                0xC10C
 #define        CP_RB0_RPTR_ADDR_HI                             0xC110
 #define        CP_RB0_WPTR                                     0xC114
+
+#define CP_INT_CNTL                                     0xC124
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+
 #define        CP_RB1_BASE                                     0xC180
 #define        CP_RB1_CNTL                                     0xC184
 #define        CP_RB1_RPTR_ADDR                                0xC188
 #define        CP_ME_RAM_DATA                                  0xC160
 #define        CP_DEBUG                                        0xC1FC
 
+#define VGT_EVENT_INITIATOR                             0x28a90
+#       define CACHE_FLUSH_AND_INV_EVENT_TS                     (0x14 << 0)
+#       define CACHE_FLUSH_AND_INV_EVENT                        (0x16 << 0)
+
 /*
  * PM4
  */
 #define        PACKET3_DISPATCH_DIRECT                         0x15
 #define        PACKET3_DISPATCH_INDIRECT                       0x16
 #define        PACKET3_INDIRECT_BUFFER_END                     0x17
+#define        PACKET3_MODE_CONTROL                            0x18
 #define        PACKET3_SET_PREDICATION                         0x20
 #define        PACKET3_REG_RMW                                 0x21
 #define        PACKET3_COND_EXEC                               0x22
 #define                PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
 #define        PACKET3_COND_WRITE                              0x45
 #define        PACKET3_EVENT_WRITE                             0x46
+#define                EVENT_TYPE(x)                           ((x) << 0)
+#define                EVENT_INDEX(x)                          ((x) << 8)
+                /* 0 - any non-TS event
+                * 1 - ZPASS_DONE
+                * 2 - SAMPLE_PIPELINESTAT
+                * 3 - SAMPLE_STREAMOUTSTAT*
+                * 4 - *S_PARTIAL_FLUSH
+                * 5 - TS events
+                */
 #define        PACKET3_EVENT_WRITE_EOP                         0x47
+#define                DATA_SEL(x)                             ((x) << 29)
+                /* 0 - discard
+                * 1 - send low 32bit data
+                * 2 - send 64bit data
+                * 3 - send 64bit counter value
+                */
+#define                INT_SEL(x)                              ((x) << 24)
+                /* 0 - none
+                * 1 - interrupt only (DATA_SEL = 0)
+                * 2 - interrupt when data write is confirmed
+                */
 #define        PACKET3_EVENT_WRITE_EOS                         0x48
 #define        PACKET3_PREAMBLE_CNTL                           0x4A
 #              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
index bfc08f6320f83b83569bec08a2968014c9e90a4f..3ec81c3d5108ce24163b00da4f42dbf4df83d7f6 100644 (file)
@@ -667,7 +667,7 @@ int r100_irq_set(struct radeon_device *rdev)
                WREG32(R_000040_GEN_INT_CNTL, 0);
                return -EINVAL;
        }
-       if (rdev->irq.sw_int) {
+       if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
                tmp |= RADEON_SW_INT_ENABLE;
        }
        if (rdev->irq.gui_idle) {
@@ -739,7 +739,7 @@ int r100_irq_process(struct radeon_device *rdev)
        while (status) {
                /* SW interrupt */
                if (status & RADEON_SW_INT_TEST) {
-                       radeon_fence_process(rdev);
+                       radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                }
                /* gui idle interrupt */
                if (status & RADEON_GUI_IDLE_STAT) {
@@ -811,25 +811,36 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
 void r100_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence)
 {
+       struct radeon_ring *ring = &rdev->ring[fence->ring];
+
        /* We have to make sure that caches are flushed before
         * CPU might read something from VRAM. */
-       radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
-       radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
+       radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
+       radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
        /* Wait until IDLE & CLEAN */
-       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
-       radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
-       radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-       radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
+       radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
+       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+       radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
                                RADEON_HDP_READ_BUFFER_INVALIDATE);
-       radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-       radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
+       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+       radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
        /* Emit fence sequence & fire IRQ */
-       radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
-       radeon_ring_write(rdev, fence->seq);
-       radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
-       radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
+       radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+       radeon_ring_write(ring, fence->seq);
+       radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+       radeon_ring_write(ring, RADEON_SW_INT_FIRE);
+}
+
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
+                             struct radeon_ring *ring,
+                             struct radeon_semaphore *semaphore,
+                             bool emit_wait)
+{
+       /* Unused on older asics, since we don't have semaphores or multiple rings */
+       BUG();
 }
 
 int r100_copy_blit(struct radeon_device *rdev,
@@ -838,6 +849,7 @@ int r100_copy_blit(struct radeon_device *rdev,
                   unsigned num_gpu_pages,
                   struct radeon_fence *fence)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        uint32_t cur_pages;
        uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
        uint32_t pitch;
@@ -855,7 +867,7 @@ int r100_copy_blit(struct radeon_device *rdev,
 
        /* Ask for enough room for blit + flush + fence */
        ndw = 64 + (10 * num_loops);
-       r = radeon_ring_lock(rdev, ndw);
+       r = radeon_ring_lock(rdev, ring, ndw);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
                return -EINVAL;
@@ -869,8 +881,8 @@ int r100_copy_blit(struct radeon_device *rdev,
 
                /* pages are in Y direction - height
                   page width in X direction - width */
-               radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
-               radeon_ring_write(rdev,
+               radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
+               radeon_ring_write(ring,
                                  RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
                                  RADEON_GMC_DST_PITCH_OFFSET_CNTL |
                                  RADEON_GMC_SRC_CLIPPING |
@@ -882,26 +894,26 @@ int r100_copy_blit(struct radeon_device *rdev,
                                  RADEON_DP_SRC_SOURCE_MEMORY |
                                  RADEON_GMC_CLR_CMP_CNTL_DIS |
                                  RADEON_GMC_WR_MSK_DIS);
-               radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
-               radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
-               radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
-               radeon_ring_write(rdev, 0);
-               radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
-               radeon_ring_write(rdev, num_gpu_pages);
-               radeon_ring_write(rdev, num_gpu_pages);
-               radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
-       }
-       radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
-       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
-       radeon_ring_write(rdev,
+               radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
+               radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
+               radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+               radeon_ring_write(ring, 0);
+               radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+               radeon_ring_write(ring, num_gpu_pages);
+               radeon_ring_write(ring, num_gpu_pages);
+               radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
+       }
+       radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
+       radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(ring,
                          RADEON_WAIT_2D_IDLECLEAN |
                          RADEON_WAIT_HOST_IDLECLEAN |
                          RADEON_WAIT_DMA_GUI_IDLE);
        if (fence) {
                r = radeon_fence_emit(rdev, fence);
        }
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_unlock_commit(rdev, ring);
        return r;
 }
 
@@ -922,19 +934,20 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev)
 
 void r100_ring_start(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
 
-       r = radeon_ring_lock(rdev, 2);
+       r = radeon_ring_lock(rdev, ring, 2);
        if (r) {
                return;
        }
-       radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+       radeon_ring_write(ring,
                          RADEON_ISYNC_ANY2D_IDLE3D |
                          RADEON_ISYNC_ANY3D_IDLE2D |
                          RADEON_ISYNC_WAIT_IDLEGUI |
                          RADEON_ISYNC_CPSCRATCH_IDLEGUI);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_unlock_commit(rdev, ring);
 }
 
 
@@ -1035,6 +1048,7 @@ static void r100_cp_load_microcode(struct radeon_device *rdev)
 
 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        unsigned rb_bufsz;
        unsigned rb_blksz;
        unsigned max_fetch;
@@ -1060,7 +1074,9 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        rb_bufsz = drm_order(ring_size / 8);
        ring_size = (1 << (rb_bufsz + 1)) * 4;
        r100_cp_load_microcode(rdev);
-       r = radeon_ring_init(rdev, ring_size);
+       r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
+                            RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
+                            0, 0x7fffff, RADEON_CP_PACKET2);
        if (r) {
                return r;
        }
@@ -1069,7 +1085,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        rb_blksz = 9;
        /* cp will read 128bytes at a time (4 dwords) */
        max_fetch = 1;
-       rdev->cp.align_mask = 16 - 1;
+       ring->align_mask = 16 - 1;
        /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
        pre_write_timer = 64;
        /* Force CP_RB_WPTR write if written more than one time before the
@@ -1099,13 +1115,13 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
 
        /* Set ring address */
-       DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
-       WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
+       DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
+       WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
        /* Force read & write ptr to 0 */
        WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
        WREG32(RADEON_CP_RB_RPTR_WR, 0);
-       rdev->cp.wptr = 0;
-       WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
+       ring->wptr = 0;
+       WREG32(RADEON_CP_RB_WPTR, ring->wptr);
 
        /* set the wb address whether it's enabled or not */
        WREG32(R_00070C_CP_RB_RPTR_ADDR,
@@ -1121,7 +1137,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
 
        WREG32(RADEON_CP_RB_CNTL, tmp);
        udelay(10);
-       rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+       ring->rptr = RREG32(RADEON_CP_RB_RPTR);
        /* Set cp mode to bus mastering & enable cp*/
        WREG32(RADEON_CP_CSQ_MODE,
               REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@ -1130,12 +1146,12 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
        WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
        radeon_ring_start(rdev);
-       r = radeon_ring_test(rdev);
+       r = radeon_ring_test(rdev, ring);
        if (r) {
                DRM_ERROR("radeon: cp isn't working (%d).\n", r);
                return r;
        }
-       rdev->cp.ready = true;
+       ring->ready = true;
        radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
        return 0;
 }
@@ -1147,7 +1163,7 @@ void r100_cp_fini(struct radeon_device *rdev)
        }
        /* Disable ring */
        r100_cp_disable(rdev);
-       radeon_ring_fini(rdev);
+       radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
        DRM_INFO("radeon: cp finalized\n");
 }
 
@@ -1155,7 +1171,7 @@ void r100_cp_disable(struct radeon_device *rdev)
 {
        /* Disable ring */
        radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
-       rdev->cp.ready = false;
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
        WREG32(RADEON_CP_CSQ_MODE, 0);
        WREG32(RADEON_CP_CSQ_CNTL, 0);
        WREG32(R_000770_SCRATCH_UMSK, 0);
@@ -1165,13 +1181,6 @@ void r100_cp_disable(struct radeon_device *rdev)
        }
 }
 
-void r100_cp_commit(struct radeon_device *rdev)
-{
-       WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
-       (void)RREG32(RADEON_CP_RB_WPTR);
-}
-
-
 /*
  * CS functions
  */
@@ -2099,9 +2108,9 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
        return -1;
 }
 
-void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
 {
-       lockup->last_cp_rptr = cp->rptr;
+       lockup->last_cp_rptr = ring->rptr;
        lockup->last_jiffies = jiffies;
 }
 
@@ -2126,20 +2135,20 @@ void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp
  * false positive when CP is just gived nothing to do.
  *
  **/
-bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
 {
        unsigned long cjiffies, elapsed;
 
        cjiffies = jiffies;
        if (!time_after(cjiffies, lockup->last_jiffies)) {
                /* likely a wrap around */
-               lockup->last_cp_rptr = cp->rptr;
+               lockup->last_cp_rptr = ring->rptr;
                lockup->last_jiffies = jiffies;
                return false;
        }
-       if (cp->rptr != lockup->last_cp_rptr) {
+       if (ring->rptr != lockup->last_cp_rptr) {
                /* CP is still working no lockup */
-               lockup->last_cp_rptr = cp->rptr;
+               lockup->last_cp_rptr = ring->rptr;
                lockup->last_jiffies = jiffies;
                return false;
        }
@@ -2152,31 +2161,32 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
        return false;
 }
 
-bool r100_gpu_is_lockup(struct radeon_device *rdev)
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        u32 rbbm_status;
        int r;
 
        rbbm_status = RREG32(R_000E40_RBBM_STATUS);
        if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
-               r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
+               r100_gpu_lockup_update(&rdev->config.r100.lockup, ring);
                return false;
        }
        /* force CP activities */
-       r = radeon_ring_lock(rdev, 2);
+       r = radeon_ring_lock(rdev, ring, 2);
        if (!r) {
                /* PACKET2 NOP */
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_unlock_commit(rdev);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_unlock_commit(rdev, ring);
        }
-       rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
-       return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
+       ring->rptr = RREG32(ring->rptr_reg);
+       return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, ring);
 }
 
 void r100_bm_disable(struct radeon_device *rdev)
 {
        u32 tmp;
+       u16 tmp16;
 
        /* disable bus mastering */
        tmp = RREG32(R_000030_BUS_CNTL);
@@ -2187,8 +2197,8 @@ void r100_bm_disable(struct radeon_device *rdev)
        WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
        tmp = RREG32(RADEON_BUS_CNTL);
        mdelay(1);
-       pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
-       pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+       pci_read_config_word(rdev->pdev, 0x4, &tmp16);
+       pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB);
        mdelay(1);
 }
 
@@ -2579,21 +2589,22 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct radeon_device *rdev = dev->dev_private;
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        uint32_t rdp, wdp;
        unsigned count, i, j;
 
-       radeon_ring_free_size(rdev);
+       radeon_ring_free_size(rdev, ring);
        rdp = RREG32(RADEON_CP_RB_RPTR);
        wdp = RREG32(RADEON_CP_RB_WPTR);
-       count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
+       count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
        seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
        seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
        seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
-       seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
+       seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
        seq_printf(m, "%u dwords in ring\n", count);
        for (j = 0; j <= count; j++) {
-               i = (rdp + j) & rdev->cp.ptr_mask;
-               seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
+               i = (rdp + j) & ring->ptr_mask;
+               seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
        }
        return 0;
 }
@@ -3635,7 +3646,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
        }
 }
 
-int r100_ring_test(struct radeon_device *rdev)
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        uint32_t scratch;
        uint32_t tmp = 0;
@@ -3648,15 +3659,15 @@ int r100_ring_test(struct radeon_device *rdev)
                return r;
        }
        WREG32(scratch, 0xCAFEDEAD);
-       r = radeon_ring_lock(rdev, 2);
+       r = radeon_ring_lock(rdev, ring, 2);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                radeon_scratch_free(rdev, scratch);
                return r;
        }
-       radeon_ring_write(rdev, PACKET0(scratch, 0));
-       radeon_ring_write(rdev, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_write(ring, PACKET0(scratch, 0));
+       radeon_ring_write(ring, 0xDEADBEEF);
+       radeon_ring_unlock_commit(rdev, ring);
        for (i = 0; i < rdev->usec_timeout; i++) {
                tmp = RREG32(scratch);
                if (tmp == 0xDEADBEEF) {
@@ -3677,9 +3688,11 @@ int r100_ring_test(struct radeon_device *rdev)
 
 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 {
-       radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
-       radeon_ring_write(rdev, ib->gpu_addr);
-       radeon_ring_write(rdev, ib->length_dw);
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+       radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
+       radeon_ring_write(ring, ib->gpu_addr);
+       radeon_ring_write(ring, ib->length_dw);
 }
 
 int r100_ib_test(struct radeon_device *rdev)
@@ -3696,7 +3709,7 @@ int r100_ib_test(struct radeon_device *rdev)
                return r;
        }
        WREG32(scratch, 0xCAFEDEAD);
-       r = radeon_ib_get(rdev, &ib);
+       r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, 256);
        if (r) {
                return r;
        }
@@ -3740,34 +3753,16 @@ int r100_ib_test(struct radeon_device *rdev)
 
 void r100_ib_fini(struct radeon_device *rdev)
 {
+       radeon_ib_pool_suspend(rdev);
        radeon_ib_pool_fini(rdev);
 }
 
-int r100_ib_init(struct radeon_device *rdev)
-{
-       int r;
-
-       r = radeon_ib_pool_init(rdev);
-       if (r) {
-               dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r);
-               r100_ib_fini(rdev);
-               return r;
-       }
-       r = r100_ib_test(rdev);
-       if (r) {
-               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
-               r100_ib_fini(rdev);
-               return r;
-       }
-       return 0;
-}
-
 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
 {
        /* Shutdown CP we shouldn't need to do that but better be safe than
         * sorry
         */
-       rdev->cp.ready = false;
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
        WREG32(R_000740_CP_CSQ_CNTL, 0);
 
        /* Save few CRTC registers */
@@ -3905,6 +3900,12 @@ static int r100_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r100_irq_set(rdev);
        rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -3914,11 +3915,18 @@ static int r100_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_ib_init(rdev);
+
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r100_ib_test(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
                return r;
        }
+
        return 0;
 }
 
@@ -3941,11 +3949,14 @@ int r100_resume(struct radeon_device *rdev)
        r100_clock_startup(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
+
+       rdev->accel_working = true;
        return r100_startup(rdev);
 }
 
 int r100_suspend(struct radeon_device *rdev)
 {
+       radeon_ib_pool_suspend(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
        r100_irq_disable(rdev);
@@ -4064,7 +4075,14 @@ int r100_init(struct radeon_device *rdev)
                        return r;
        }
        r100_set_safe_registers(rdev);
+
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = r100_startup(rdev);
        if (r) {
                /* Somethings want wront with the accel init stop accel */
index a1f3ba063c2dda0de972f363345f815ab76f84d8..eba4cbfa78f6a10f38949d0810e2732bcb67241d 100644 (file)
@@ -87,6 +87,7 @@ int r200_copy_dma(struct radeon_device *rdev,
                  unsigned num_gpu_pages,
                  struct radeon_fence *fence)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        uint32_t size;
        uint32_t cur_size;
        int i, num_loops;
@@ -95,33 +96,33 @@ int r200_copy_dma(struct radeon_device *rdev,
        /* radeon pitch is /64 */
        size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
        num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
-       r = radeon_ring_lock(rdev, num_loops * 4 + 64);
+       r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
                return r;
        }
        /* Must wait for 2D idle & clean before DMA or hangs might happen */
-       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
-       radeon_ring_write(rdev, (1 << 16));
+       radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(ring, (1 << 16));
        for (i = 0; i < num_loops; i++) {
                cur_size = size;
                if (cur_size > 0x1FFFFF) {
                        cur_size = 0x1FFFFF;
                }
                size -= cur_size;
-               radeon_ring_write(rdev, PACKET0(0x720, 2));
-               radeon_ring_write(rdev, src_offset);
-               radeon_ring_write(rdev, dst_offset);
-               radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
+               radeon_ring_write(ring, PACKET0(0x720, 2));
+               radeon_ring_write(ring, src_offset);
+               radeon_ring_write(ring, dst_offset);
+               radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
                src_offset += cur_size;
                dst_offset += cur_size;
        }
-       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
-       radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
+       radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
        if (fence) {
                r = radeon_fence_emit(rdev, fence);
        }
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_unlock_commit(rdev, ring);
        return r;
 }
 
index c93bc64707e1d6e3b59a5e2a7fe632b19e6efe99..3fc0d29a5f39e439e398a0f1e693a0e19b705c73 100644 (file)
@@ -175,37 +175,40 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev)
 void r300_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence)
 {
+       struct radeon_ring *ring = &rdev->ring[fence->ring];
+
        /* Who ever call radeon_fence_emit should call ring_lock and ask
         * for enough space (today caller are ib schedule and buffer move) */
        /* Write SC register so SC & US assert idle */
-       radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
+       radeon_ring_write(ring, 0);
        /* Flush 3D cache */
-       radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
-       radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, R300_ZC_FLUSH);
+       radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
+       radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, R300_ZC_FLUSH);
        /* Wait until IDLE & CLEAN */
-       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
-       radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
+       radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
                                 RADEON_WAIT_2D_IDLECLEAN |
                                 RADEON_WAIT_DMA_GUI_IDLE));
-       radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-       radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
+       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+       radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
                                RADEON_HDP_READ_BUFFER_INVALIDATE);
-       radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-       radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
+       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+       radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
        /* Emit fence sequence & fire IRQ */
-       radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
-       radeon_ring_write(rdev, fence->seq);
-       radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
-       radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
+       radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+       radeon_ring_write(ring, fence->seq);
+       radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+       radeon_ring_write(ring, RADEON_SW_INT_FIRE);
 }
 
 void r300_ring_start(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        unsigned gb_tile_config;
        int r;
 
@@ -227,44 +230,44 @@ void r300_ring_start(struct radeon_device *rdev)
                break;
        }
 
-       r = radeon_ring_lock(rdev, 64);
+       r = radeon_ring_lock(rdev, ring, 64);
        if (r) {
                return;
        }
-       radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+       radeon_ring_write(ring,
                          RADEON_ISYNC_ANY2D_IDLE3D |
                          RADEON_ISYNC_ANY3D_IDLE2D |
                          RADEON_ISYNC_WAIT_IDLEGUI |
                          RADEON_ISYNC_CPSCRATCH_IDLEGUI);
-       radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
-       radeon_ring_write(rdev, gb_tile_config);
-       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
+       radeon_ring_write(ring, gb_tile_config);
+       radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(ring,
                          RADEON_WAIT_2D_IDLECLEAN |
                          RADEON_WAIT_3D_IDLECLEAN);
-       radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
-       radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
-       radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
-       radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
-       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+       radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+       radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+       radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+       radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(ring,
                          RADEON_WAIT_2D_IDLECLEAN |
                          RADEON_WAIT_3D_IDLECLEAN);
-       radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
-       radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
-       radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+       radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+       radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
+       radeon_ring_write(ring,
                          ((6 << R300_MS_X0_SHIFT) |
                           (6 << R300_MS_Y0_SHIFT) |
                           (6 << R300_MS_X1_SHIFT) |
@@ -273,8 +276,8 @@ void r300_ring_start(struct radeon_device *rdev)
                           (6 << R300_MS_Y2_SHIFT) |
                           (6 << R300_MSBD0_Y_SHIFT) |
                           (6 << R300_MSBD0_X_SHIFT)));
-       radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
+       radeon_ring_write(ring,
                          ((6 << R300_MS_X3_SHIFT) |
                           (6 << R300_MS_Y3_SHIFT) |
                           (6 << R300_MS_X4_SHIFT) |
@@ -282,16 +285,16 @@ void r300_ring_start(struct radeon_device *rdev)
                           (6 << R300_MS_X5_SHIFT) |
                           (6 << R300_MS_Y5_SHIFT) |
                           (6 << R300_MSBD1_SHIFT)));
-       radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
-       radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
-       radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
+       radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
+       radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
+       radeon_ring_write(ring,
                          R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
-       radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
+       radeon_ring_write(ring,
                          R300_GEOMETRY_ROUND_NEAREST |
                          R300_COLOR_ROUND_NEAREST);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_unlock_commit(rdev, ring);
 }
 
 void r300_errata(struct radeon_device *rdev)
@@ -375,26 +378,26 @@ void r300_gpu_init(struct radeon_device *rdev)
                 rdev->num_gb_pipes, rdev->num_z_pipes);
 }
 
-bool r300_gpu_is_lockup(struct radeon_device *rdev)
+bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        u32 rbbm_status;
        int r;
 
        rbbm_status = RREG32(R_000E40_RBBM_STATUS);
        if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
-               r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+               r100_gpu_lockup_update(&rdev->config.r300.lockup, ring);
                return false;
        }
        /* force CP activities */
-       r = radeon_ring_lock(rdev, 2);
+       r = radeon_ring_lock(rdev, ring, 2);
        if (!r) {
                /* PACKET2 NOP */
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_unlock_commit(rdev);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_unlock_commit(rdev, ring);
        }
-       rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
-       return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
+       ring->rptr = RREG32(RADEON_CP_RB_RPTR);
+       return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, ring);
 }
 
 int r300_asic_reset(struct radeon_device *rdev)
@@ -701,7 +704,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        return r;
                }
 
-               if (p->keep_tiling_flags) {
+               if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
                        ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
                                  ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
                } else {
@@ -765,7 +768,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                /* RB3D_COLORPITCH1 */
                /* RB3D_COLORPITCH2 */
                /* RB3D_COLORPITCH3 */
-               if (!p->keep_tiling_flags) {
+               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
                        r = r100_cs_packet_next_reloc(p, &reloc);
                        if (r) {
                                DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
@@ -850,7 +853,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                break;
        case 0x4F24:
                /* ZB_DEPTHPITCH */
-               if (!p->keep_tiling_flags) {
+               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
                        r = r100_cs_packet_next_reloc(p, &reloc);
                        if (r) {
                                DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
@@ -1396,6 +1399,12 @@ static int r300_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r100_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -1405,11 +1414,18 @@ static int r300_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_ib_init(rdev);
+
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r100_ib_test(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
                return r;
        }
+
        return 0;
 }
 
@@ -1434,11 +1450,14 @@ int r300_resume(struct radeon_device *rdev)
        r300_clock_startup(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
+
+       rdev->accel_working = true;
        return r300_startup(rdev);
 }
 
 int r300_suspend(struct radeon_device *rdev)
 {
+       radeon_ib_pool_suspend(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
        r100_irq_disable(rdev);
@@ -1539,7 +1558,14 @@ int r300_init(struct radeon_device *rdev)
                        return r;
        }
        r300_set_reg_safe(rdev);
+
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = r300_startup(rdev);
        if (r) {
                /* Somethings want wront with the accel init stop accel */
index 417fab81812f32b40397de90c7dd2df306322227..666e28fe509c4fb44293af49555a29ab974f9098 100644 (file)
@@ -199,6 +199,8 @@ static void r420_clock_resume(struct radeon_device *rdev)
 
 static void r420_cp_errata_init(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
        /* RV410 and R420 can lock up if CP DMA to host memory happens
         * while the 2D engine is busy.
         *
@@ -206,22 +208,24 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
         * of the CP init, apparently.
         */
        radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
-       radeon_ring_lock(rdev, 8);
-       radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
-       radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
-       radeon_ring_write(rdev, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_lock(rdev, ring, 8);
+       radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
+       radeon_ring_write(ring, rdev->config.r300.resync_scratch);
+       radeon_ring_write(ring, 0xDEADBEEF);
+       radeon_ring_unlock_commit(rdev, ring);
 }
 
 static void r420_cp_errata_fini(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
        /* Catch the RESYNC we dispatched all the way back,
         * at the very beginning of the CP init.
         */
-       radeon_ring_lock(rdev, 8);
-       radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_lock(rdev, ring, 8);
+       radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, R300_RB3D_DC_FINISH);
+       radeon_ring_unlock_commit(rdev, ring);
        radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
 }
 
@@ -254,6 +258,12 @@ static int r420_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r100_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -264,11 +274,18 @@ static int r420_startup(struct radeon_device *rdev)
                return r;
        }
        r420_cp_errata_init(rdev);
-       r = r100_ib_init(rdev);
+
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r100_ib_test(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
                return r;
        }
+
        return 0;
 }
 
@@ -297,11 +314,14 @@ int r420_resume(struct radeon_device *rdev)
        r420_clock_resume(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
+
+       rdev->accel_working = true;
        return r420_startup(rdev);
 }
 
 int r420_suspend(struct radeon_device *rdev)
 {
+       radeon_ib_pool_suspend(rdev);
        r420_cp_errata_fini(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
@@ -414,7 +434,14 @@ int r420_init(struct radeon_device *rdev)
                        return r;
        }
        r420_set_reg_safe(rdev);
+
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = r420_startup(rdev);
        if (r) {
                /* Somethings want wront with the accel init stop accel */
index fc437059918fb800001ccc871df9b7eb81c333bb..3bd8f1b1c606f4098fa959b33bf2b753bded93fe 100644 (file)
 
 #define AVIVO_TMDSA_CNTL                    0x7880
 #   define AVIVO_TMDSA_CNTL_ENABLE               (1 << 0)
+#   define AVIVO_TMDSA_CNTL_HDMI_EN              (1 << 2)
 #   define AVIVO_TMDSA_CNTL_HPD_MASK             (1 << 4)
 #   define AVIVO_TMDSA_CNTL_HPD_SELECT           (1 << 8)
 #   define AVIVO_TMDSA_CNTL_SYNC_PHASE           (1 << 12)
 
 #define AVIVO_LVTMA_CNTL                                       0x7a80
 #   define AVIVO_LVTMA_CNTL_ENABLE               (1 << 0)
+#   define AVIVO_LVTMA_CNTL_HDMI_EN              (1 << 2)
 #   define AVIVO_LVTMA_CNTL_HPD_MASK             (1 << 4)
 #   define AVIVO_LVTMA_CNTL_HPD_SELECT           (1 << 8)
 #   define AVIVO_LVTMA_CNTL_SYNC_PHASE           (1 << 12)
index 3081d07f8de51f839d9a7c3d8b70848de0000fa5..4ae1615e752ff8f6c5ecbfdc5eb83e3dbd120934 100644 (file)
@@ -187,6 +187,12 @@ static int r520_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        rs600_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -196,9 +202,15 @@ static int r520_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_ib_init(rdev);
+
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r100_ib_test(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
                return r;
        }
        return 0;
@@ -223,6 +235,8 @@ int r520_resume(struct radeon_device *rdev)
        rv515_clock_startup(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
+
+       rdev->accel_working = true;
        return r520_startup(rdev);
 }
 
@@ -292,7 +306,14 @@ int r520_init(struct radeon_device *rdev)
        if (r)
                return r;
        rv515_set_safe_registers(rdev);
+
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = r520_startup(rdev);
        if (r) {
                /* Somethings want wront with the accel init stop accel */
index 9cdda0b3b081e879f165f322e23780b338b583d9..4f08e5e6ee9d346c2b432cbf5349b0df07ff05fb 100644 (file)
@@ -1344,7 +1344,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
        return 0;
 }
 
-bool r600_gpu_is_lockup(struct radeon_device *rdev)
+bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        u32 srbm_status;
        u32 grbm_status;
@@ -1361,19 +1361,19 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev)
        grbm_status = RREG32(R_008010_GRBM_STATUS);
        grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
        if (!G_008010_GUI_ACTIVE(grbm_status)) {
-               r100_gpu_lockup_update(lockup, &rdev->cp);
+               r100_gpu_lockup_update(lockup, ring);
                return false;
        }
        /* force CP activities */
-       r = radeon_ring_lock(rdev, 2);
+       r = radeon_ring_lock(rdev, ring, 2);
        if (!r) {
                /* PACKET2 NOP */
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_unlock_commit(rdev);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_unlock_commit(rdev, ring);
        }
-       rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
-       return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+       ring->rptr = RREG32(ring->rptr_reg);
+       return r100_gpu_cp_is_lockup(rdev, lockup, ring);
 }
 
 int r600_asic_reset(struct radeon_device *rdev)
@@ -2144,27 +2144,28 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
 
 int r600_cp_start(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
        uint32_t cp_me;
 
-       r = radeon_ring_lock(rdev, 7);
+       r = radeon_ring_lock(rdev, ring, 7);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                return r;
        }
-       radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
-       radeon_ring_write(rdev, 0x1);
+       radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+       radeon_ring_write(ring, 0x1);
        if (rdev->family >= CHIP_RV770) {
-               radeon_ring_write(rdev, 0x0);
-               radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
+               radeon_ring_write(ring, 0x0);
+               radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
        } else {
-               radeon_ring_write(rdev, 0x3);
-               radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
+               radeon_ring_write(ring, 0x3);
+               radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
        }
-       radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_unlock_commit(rdev, ring);
 
        cp_me = 0xff;
        WREG32(R_0086D8_CP_ME_CNTL, cp_me);
@@ -2173,6 +2174,7 @@ int r600_cp_start(struct radeon_device *rdev)
 
 int r600_cp_resume(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 tmp;
        u32 rb_bufsz;
        int r;
@@ -2184,13 +2186,13 @@ int r600_cp_resume(struct radeon_device *rdev)
        WREG32(GRBM_SOFT_RESET, 0);
 
        /* Set ring buffer size */
-       rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+       rb_bufsz = drm_order(ring->ring_size / 8);
        tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
        WREG32(CP_RB_CNTL, tmp);
-       WREG32(CP_SEM_WAIT_TIMER, 0x4);
+       WREG32(CP_SEM_WAIT_TIMER, 0x0);
 
        /* Set the write pointer delay */
        WREG32(CP_RB_WPTR_DELAY, 0);
@@ -2198,8 +2200,8 @@ int r600_cp_resume(struct radeon_device *rdev)
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
-       rdev->cp.wptr = 0;
-       WREG32(CP_RB_WPTR, rdev->cp.wptr);
+       ring->wptr = 0;
+       WREG32(CP_RB_WPTR, ring->wptr);
 
        /* set the wb address whether it's enabled or not */
        WREG32(CP_RB_RPTR_ADDR,
@@ -2217,42 +2219,36 @@ int r600_cp_resume(struct radeon_device *rdev)
        mdelay(1);
        WREG32(CP_RB_CNTL, tmp);
 
-       WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+       WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
        WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 
-       rdev->cp.rptr = RREG32(CP_RB_RPTR);
+       ring->rptr = RREG32(CP_RB_RPTR);
 
        r600_cp_start(rdev);
-       rdev->cp.ready = true;
-       r = radeon_ring_test(rdev);
+       ring->ready = true;
+       r = radeon_ring_test(rdev, ring);
        if (r) {
-               rdev->cp.ready = false;
+               ring->ready = false;
                return r;
        }
        return 0;
 }
 
-void r600_cp_commit(struct radeon_device *rdev)
-{
-       WREG32(CP_RB_WPTR, rdev->cp.wptr);
-       (void)RREG32(CP_RB_WPTR);
-}
-
-void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
 {
        u32 rb_bufsz;
 
        /* Align ring size */
        rb_bufsz = drm_order(ring_size / 8);
        ring_size = (1 << (rb_bufsz + 1)) * 4;
-       rdev->cp.ring_size = ring_size;
-       rdev->cp.align_mask = 16 - 1;
+       ring->ring_size = ring_size;
+       ring->align_mask = 16 - 1;
 }
 
 void r600_cp_fini(struct radeon_device *rdev)
 {
        r600_cp_stop(rdev);
-       radeon_ring_fini(rdev);
+       radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
 }
 
 
@@ -2271,11 +2267,11 @@ void r600_scratch_init(struct radeon_device *rdev)
        }
 }
 
-int r600_ring_test(struct radeon_device *rdev)
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        uint32_t scratch;
        uint32_t tmp = 0;
-       unsigned i;
+       unsigned i, ridx = radeon_ring_index(rdev, ring);
        int r;
 
        r = radeon_scratch_get(rdev, &scratch);
@@ -2284,16 +2280,16 @@ int r600_ring_test(struct radeon_device *rdev)
                return r;
        }
        WREG32(scratch, 0xCAFEDEAD);
-       r = radeon_ring_lock(rdev, 3);
+       r = radeon_ring_lock(rdev, ring, 3);
        if (r) {
-               DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+               DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
                radeon_scratch_free(rdev, scratch);
                return r;
        }
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-       radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
-       radeon_ring_write(rdev, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+       radeon_ring_write(ring, 0xDEADBEEF);
+       radeon_ring_unlock_commit(rdev, ring);
        for (i = 0; i < rdev->usec_timeout; i++) {
                tmp = RREG32(scratch);
                if (tmp == 0xDEADBEEF)
@@ -2301,10 +2297,10 @@ int r600_ring_test(struct radeon_device *rdev)
                DRM_UDELAY(1);
        }
        if (i < rdev->usec_timeout) {
-               DRM_INFO("ring test succeeded in %d usecs\n", i);
+               DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i);
        } else {
-               DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
-                         scratch, tmp);
+               DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+                         ridx, scratch, tmp);
                r = -EINVAL;
        }
        radeon_scratch_free(rdev, scratch);
@@ -2314,49 +2310,63 @@ int r600_ring_test(struct radeon_device *rdev)
 void r600_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence)
 {
+       struct radeon_ring *ring = &rdev->ring[fence->ring];
+
        if (rdev->wb.use_event) {
-               u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
-                       (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
+               u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
                /* flush read cache over gart */
-               radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
-               radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
+               radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+               radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
                                        PACKET3_VC_ACTION_ENA |
                                        PACKET3_SH_ACTION_ENA);
-               radeon_ring_write(rdev, 0xFFFFFFFF);
-               radeon_ring_write(rdev, 0);
-               radeon_ring_write(rdev, 10); /* poll interval */
+               radeon_ring_write(ring, 0xFFFFFFFF);
+               radeon_ring_write(ring, 0);
+               radeon_ring_write(ring, 10); /* poll interval */
                /* EVENT_WRITE_EOP - flush caches, send int */
-               radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
-               radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
-               radeon_ring_write(rdev, addr & 0xffffffff);
-               radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
-               radeon_ring_write(rdev, fence->seq);
-               radeon_ring_write(rdev, 0);
+               radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+               radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+               radeon_ring_write(ring, addr & 0xffffffff);
+               radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+               radeon_ring_write(ring, fence->seq);
+               radeon_ring_write(ring, 0);
        } else {
                /* flush read cache over gart */
-               radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
-               radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
+               radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+               radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
                                        PACKET3_VC_ACTION_ENA |
                                        PACKET3_SH_ACTION_ENA);
-               radeon_ring_write(rdev, 0xFFFFFFFF);
-               radeon_ring_write(rdev, 0);
-               radeon_ring_write(rdev, 10); /* poll interval */
-               radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
-               radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
+               radeon_ring_write(ring, 0xFFFFFFFF);
+               radeon_ring_write(ring, 0);
+               radeon_ring_write(ring, 10); /* poll interval */
+               radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+               radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
                /* wait for 3D idle clean */
-               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-               radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
-               radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
+               radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+               radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+               radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
                /* Emit fence sequence & fire IRQ */
-               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-               radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
-               radeon_ring_write(rdev, fence->seq);
+               radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+               radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+               radeon_ring_write(ring, fence->seq);
                /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
-               radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
-               radeon_ring_write(rdev, RB_INT_STAT);
+               radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
+               radeon_ring_write(ring, RB_INT_STAT);
        }
 }
 
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
+                             struct radeon_ring *ring,
+                             struct radeon_semaphore *semaphore,
+                             bool emit_wait)
+{
+       uint64_t addr = semaphore->gpu_addr;
+       unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
+
+       radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
+       radeon_ring_write(ring, addr & 0xffffffff);
+       radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
+}
+
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
@@ -2409,6 +2419,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
 
 int r600_startup(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
 
        /* enable pcie gen2 link */
@@ -2447,6 +2458,12 @@ int r600_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -2456,7 +2473,10 @@ int r600_startup(struct radeon_device *rdev)
        }
        r600_irq_set(rdev);
 
-       r = radeon_ring_init(rdev, rdev->cp.ring_size);
+       r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+                            R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+                            0, 0xfffff, RADEON_CP_PACKET2);
+
        if (r)
                return r;
        r = r600_cp_load_microcode(rdev);
@@ -2466,6 +2486,17 @@ int r600_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
+               return r;
+       }
+
        return 0;
 }
 
@@ -2494,18 +2525,13 @@ int r600_resume(struct radeon_device *rdev)
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
 
+       rdev->accel_working = true;
        r = r600_startup(rdev);
        if (r) {
                DRM_ERROR("r600 startup failed on resume\n");
                return r;
        }
 
-       r = r600_ib_test(rdev);
-       if (r) {
-               DRM_ERROR("radeon: failed testing IB (%d).\n", r);
-               return r;
-       }
-
        r = r600_audio_init(rdev);
        if (r) {
                DRM_ERROR("radeon: audio resume failed\n");
@@ -2518,13 +2544,14 @@ int r600_resume(struct radeon_device *rdev)
 int r600_suspend(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
+       radeon_ib_pool_suspend(rdev);
+       r600_blit_suspend(rdev);
        /* FIXME: we should wait for ring to be empty */
        r600_cp_stop(rdev);
-       rdev->cp.ready = false;
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
        r600_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        r600_pcie_gart_disable(rdev);
-       r600_blit_suspend(rdev);
 
        return 0;
 }
@@ -2595,8 +2622,8 @@ int r600_init(struct radeon_device *rdev)
        if (r)
                return r;
 
-       rdev->cp.ring_obj = NULL;
-       r600_ring_init(rdev, 1024 * 1024);
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+       r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
@@ -2605,30 +2632,24 @@ int r600_init(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = r600_startup(rdev);
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r600_cp_fini(rdev);
                r600_irq_fini(rdev);
                radeon_wb_fini(rdev);
+               r100_ib_fini(rdev);
                radeon_irq_kms_fini(rdev);
                r600_pcie_gart_fini(rdev);
                rdev->accel_working = false;
        }
-       if (rdev->accel_working) {
-               r = radeon_ib_pool_init(rdev);
-               if (r) {
-                       dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
-                       rdev->accel_working = false;
-               } else {
-                       r = r600_ib_test(rdev);
-                       if (r) {
-                               dev_err(rdev->dev, "IB test failed (%d).\n", r);
-                               rdev->accel_working = false;
-                       }
-               }
-       }
 
        r = r600_audio_init(rdev);
        if (r)
@@ -2643,12 +2664,13 @@ void r600_fini(struct radeon_device *rdev)
        r600_cp_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
-       radeon_ib_pool_fini(rdev);
+       r100_ib_fini(rdev);
        radeon_irq_kms_fini(rdev);
        r600_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_agp_fini(rdev);
        radeon_gem_fini(rdev);
+       radeon_semaphore_driver_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_bo_fini(rdev);
        radeon_atombios_fini(rdev);
@@ -2662,18 +2684,20 @@ void r600_fini(struct radeon_device *rdev)
  */
 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 {
+       struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+
        /* FIXME: implement */
-       radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+       radeon_ring_write(ring,
 #ifdef __BIG_ENDIAN
                          (2 << 0) |
 #endif
                          (ib->gpu_addr & 0xFFFFFFFC));
-       radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
-       radeon_ring_write(rdev, ib->length_dw);
+       radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+       radeon_ring_write(ring, ib->length_dw);
 }
 
-int r600_ib_test(struct radeon_device *rdev)
+int r600_ib_test(struct radeon_device *rdev, int ring)
 {
        struct radeon_ib *ib;
        uint32_t scratch;
@@ -2687,7 +2711,7 @@ int r600_ib_test(struct radeon_device *rdev)
                return r;
        }
        WREG32(scratch, 0xCAFEDEAD);
-       r = radeon_ib_get(rdev, &ib);
+       r = radeon_ib_get(rdev, ring, &ib, 256);
        if (r) {
                DRM_ERROR("radeon: failed to get ib (%d).\n", r);
                return r;
@@ -2728,7 +2752,7 @@ int r600_ib_test(struct radeon_device *rdev)
                DRM_UDELAY(1);
        }
        if (i < rdev->usec_timeout) {
-               DRM_INFO("ib test succeeded in %u usecs\n", i);
+               DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i);
        } else {
                DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
                          scratch, tmp);
@@ -3075,7 +3099,7 @@ int r600_irq_set(struct radeon_device *rdev)
                hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
        }
 
-       if (rdev->irq.sw_int) {
+       if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
                DRM_DEBUG("r600_irq_set: sw int\n");
                cp_int_cntl |= RB_INT_ENABLE;
                cp_int_cntl |= TIME_STAMP_INT_ENABLE;
@@ -3459,11 +3483,11 @@ restart_ih:
                case 177: /* CP_INT in IB1 */
                case 178: /* CP_INT in IB2 */
                        DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
-                       radeon_fence_process(rdev);
+                       radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
                case 181: /* CP EOP event */
                        DRM_DEBUG("IH: CP EOP\n");
-                       radeon_fence_process(rdev);
+                       radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
                case 233: /* GUI IDLE */
                        DRM_DEBUG("IH: GUI idle\n");
@@ -3496,30 +3520,6 @@ restart_ih:
  */
 #if defined(CONFIG_DEBUG_FS)
 
-static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
-{
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct radeon_device *rdev = dev->dev_private;
-       unsigned count, i, j;
-
-       radeon_ring_free_size(rdev);
-       count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
-       seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
-       seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
-       seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
-       seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
-       seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
-       seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
-       seq_printf(m, "%u dwords in ring\n", count);
-       i = rdev->cp.rptr;
-       for (j = 0; j <= count; j++) {
-               seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
-               i = (i + 1) & rdev->cp.ptr_mask;
-       }
-       return 0;
-}
-
 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -3533,7 +3533,6 @@ static int r600_debugfs_mc_info(struct seq_file *m, void *data)
 
 static struct drm_info_list r600_mc_info_list[] = {
        {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
-       {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
 };
 #endif
 
index 846fae5763998a7ed7ab0140f68deb7b3dae40b8..ba66f3093d46138be6010d299f7b103d49b4e5eb 100644 (file)
@@ -36,7 +36,7 @@
  */
 static int r600_audio_chipset_supported(struct radeon_device *rdev)
 {
-       return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR)
+       return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev))
                || rdev->family == CHIP_RS600
                || rdev->family == CHIP_RS690
                || rdev->family == CHIP_RS740;
@@ -161,8 +161,18 @@ static void r600_audio_update_hdmi(unsigned long param)
  */
 static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
 {
+       u32 value = 0;
        DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
-       WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
+       if (ASIC_IS_DCE4(rdev)) {
+               if (enable) {
+                       value |= 0x81000000; /* Required to enable audio */
+                       value |= 0x0e1000f0; /* fglrx sets that too */
+               }
+               WREG32(EVERGREEN_AUDIO_ENABLE, value);
+       } else {
+               WREG32_P(R600_AUDIO_ENABLE,
+                        enable ? 0x81000000 : 0x0, ~0x81000000);
+       }
        rdev->audio_enabled = enable;
 }
 
@@ -248,22 +258,33 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
                return;
        }
 
-       switch (dig->dig_encoder) {
-       case 0:
-               WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
-               WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
-               WREG32(R600_AUDIO_CLK_SRCSEL, 0);
-               break;
-
-       case 1:
-               WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
-               WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
-               WREG32(R600_AUDIO_CLK_SRCSEL, 1);
-               break;
-       default:
-               dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n",
-                         radeon_encoder->encoder_id);
-               return;
+       if (ASIC_IS_DCE4(rdev)) {
+               /* TODO: other PLLs? */
+               WREG32(EVERGREEN_AUDIO_PLL1_MUL, base_rate * 10);
+               WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
+               WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
+
+               /* Some magic trigger or src sel? */
+               WREG32_P(0x5ac, 0x01, ~0x77);
+       } else {
+               switch (dig->dig_encoder) {
+               case 0:
+                       WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
+                       WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
+                       WREG32(R600_AUDIO_CLK_SRCSEL, 0);
+                       break;
+
+               case 1:
+                       WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
+                       WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
+                       WREG32(R600_AUDIO_CLK_SRCSEL, 1);
+                       break;
+               default:
+                       dev_err(rdev->dev,
+                               "Unsupported DIG on encoder 0x%02X\n",
+                               radeon_encoder->encoder_id);
+                       return;
+               }
        }
 }
 
index e09d2818f949f55615c8e33723f14d73bdb69660..d996f43811302fedd6f6a48169d1858b61d11df2 100644 (file)
@@ -50,6 +50,7 @@ static void
 set_render_target(struct radeon_device *rdev, int format,
                  int w, int h, u64 gpu_addr)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 cb_color_info;
        int pitch, slice;
 
@@ -63,38 +64,38 @@ set_render_target(struct radeon_device *rdev, int format,
        pitch = (w / 8) - 1;
        slice = ((w * h) / 64) - 1;
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, gpu_addr >> 8);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, gpu_addr >> 8);
 
        if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
-               radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
-               radeon_ring_write(rdev, 2 << 0);
+               radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
+               radeon_ring_write(ring, 2 << 0);
        }
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, (pitch << 0) | (slice << 10));
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, (pitch << 0) | (slice << 10));
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, 0);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, cb_color_info);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, cb_color_info);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, 0);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, 0);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, 0);
 }
 
 /* emits 5dw */
@@ -103,6 +104,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
                    u32 sync_type, u32 size,
                    u64 mc_addr)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 cp_coher_size;
 
        if (size == 0xffffffff)
@@ -110,17 +112,18 @@ cp_set_surface_sync(struct radeon_device *rdev,
        else
                cp_coher_size = ((size + 255) >> 8);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
-       radeon_ring_write(rdev, sync_type);
-       radeon_ring_write(rdev, cp_coher_size);
-       radeon_ring_write(rdev, mc_addr >> 8);
-       radeon_ring_write(rdev, 10); /* poll interval */
+       radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       radeon_ring_write(ring, sync_type);
+       radeon_ring_write(ring, cp_coher_size);
+       radeon_ring_write(ring, mc_addr >> 8);
+       radeon_ring_write(ring, 10); /* poll interval */
 }
 
 /* emits 21dw + 1 surface sync = 26dw */
 static void
 set_shaders(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u64 gpu_addr;
        u32 sq_pgm_resources;
 
@@ -129,35 +132,35 @@ set_shaders(struct radeon_device *rdev)
 
        /* VS */
        gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, gpu_addr >> 8);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, gpu_addr >> 8);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, sq_pgm_resources);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, sq_pgm_resources);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, 0);
 
        /* PS */
        gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, gpu_addr >> 8);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, gpu_addr >> 8);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, sq_pgm_resources | (1 << 28));
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, 2);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, 2);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, 0);
 
        gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
        cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
@@ -167,6 +170,7 @@ set_shaders(struct radeon_device *rdev)
 static void
 set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 sq_vtx_constant_word2;
 
        sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
@@ -175,15 +179,15 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
        sq_vtx_constant_word2 |=  SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
 #endif
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
-       radeon_ring_write(rdev, 0x460);
-       radeon_ring_write(rdev, gpu_addr & 0xffffffff);
-       radeon_ring_write(rdev, 48 - 1);
-       radeon_ring_write(rdev, sq_vtx_constant_word2);
-       radeon_ring_write(rdev, 1 << 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
+       radeon_ring_write(ring, 0x460);
+       radeon_ring_write(ring, gpu_addr & 0xffffffff);
+       radeon_ring_write(ring, 48 - 1);
+       radeon_ring_write(ring, sq_vtx_constant_word2);
+       radeon_ring_write(ring, 1 << 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
 
        if ((rdev->family == CHIP_RV610) ||
            (rdev->family == CHIP_RV620) ||
@@ -203,6 +207,7 @@ set_tex_resource(struct radeon_device *rdev,
                 int format, int w, int h, int pitch,
                 u64 gpu_addr, u32 size)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
 
        if (h < 1)
@@ -225,15 +230,15 @@ set_tex_resource(struct radeon_device *rdev,
        cp_set_surface_sync(rdev,
                            PACKET3_TC_ACTION_ENA, size, gpu_addr);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, sq_tex_resource_word0);
-       radeon_ring_write(rdev, sq_tex_resource_word1);
-       radeon_ring_write(rdev, gpu_addr >> 8);
-       radeon_ring_write(rdev, gpu_addr >> 8);
-       radeon_ring_write(rdev, sq_tex_resource_word4);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, SQ_TEX_VTX_VALID_TEXTURE << 30);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, sq_tex_resource_word0);
+       radeon_ring_write(ring, sq_tex_resource_word1);
+       radeon_ring_write(ring, gpu_addr >> 8);
+       radeon_ring_write(ring, gpu_addr >> 8);
+       radeon_ring_write(ring, sq_tex_resource_word4);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
 }
 
 /* emits 12 */
@@ -241,43 +246,45 @@ static void
 set_scissors(struct radeon_device *rdev, int x1, int y1,
             int x2, int y2)
 {
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
-       radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
-       radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
-
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
-       radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
-       radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
-
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
-       radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
-       radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
+       radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+       radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+       radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
 }
 
 /* emits 10 */
 static void
 draw_auto(struct radeon_device *rdev)
 {
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-       radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, DI_PT_RECTLIST);
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, DI_PT_RECTLIST);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
+       radeon_ring_write(ring,
 #ifdef __BIG_ENDIAN
                          (2 << 2) |
 #endif
                          DI_INDEX_SIZE_16_BIT);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
-       radeon_ring_write(rdev, 1);
+       radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
+       radeon_ring_write(ring, 1);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
-       radeon_ring_write(rdev, 3);
-       radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
+       radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+       radeon_ring_write(ring, 3);
+       radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
 
 }
 
@@ -285,6 +292,7 @@ draw_auto(struct radeon_device *rdev)
 static void
 set_default_state(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
        u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
        int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
@@ -440,24 +448,24 @@ set_default_state(struct radeon_device *rdev)
        /* emit an IB pointing at default state */
        dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
        gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
-       radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+       radeon_ring_write(ring,
 #ifdef __BIG_ENDIAN
                          (2 << 0) |
 #endif
                          (gpu_addr & 0xFFFFFFFC));
-       radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
-       radeon_ring_write(rdev, dwords);
+       radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
+       radeon_ring_write(ring, dwords);
 
        /* SQ config */
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
-       radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, sq_config);
-       radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
-       radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
-       radeon_ring_write(rdev, sq_thread_resource_mgmt);
-       radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
-       radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
+       radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, sq_config);
+       radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
+       radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
+       radeon_ring_write(ring, sq_thread_resource_mgmt);
+       radeon_ring_write(ring, sq_stack_resource_mgmt_1);
+       radeon_ring_write(ring, sq_stack_resource_mgmt_2);
 }
 
 static uint32_t i2f(uint32_t input)
@@ -611,16 +619,17 @@ void r600_blit_fini(struct radeon_device *rdev)
        radeon_bo_unref(&rdev->r600_blit.shader_obj);
 }
 
-static int r600_vb_ib_get(struct radeon_device *rdev)
+static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size)
 {
        int r;
-       r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
+       r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX,
+                         &rdev->r600_blit.vb_ib, size);
        if (r) {
                DRM_ERROR("failed to get IB for vertex buffer\n");
                return r;
        }
 
-       rdev->r600_blit.vb_total = 64*1024;
+       rdev->r600_blit.vb_total = size;
        rdev->r600_blit.vb_used = 0;
        return 0;
 }
@@ -679,15 +688,12 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
 
 int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
        int ring_size;
        int num_loops = 0;
        int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
 
-       r = r600_vb_ib_get(rdev);
-       if (r)
-               return r;
-
        /* num loops */
        while (num_gpu_pages) {
                num_gpu_pages -=
@@ -696,10 +702,15 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
                num_loops++;
        }
 
+       /* 48 bytes for vertex per loop */
+       r = r600_vb_ib_get(rdev, (num_loops*48)+256);
+       if (r)
+               return r;
+
        /* calculate number of loops correctly */
        ring_size = num_loops * dwords_per_loop;
        ring_size += rdev->r600_blit.ring_size_common;
-       r = radeon_ring_lock(rdev, ring_size);
+       r = radeon_ring_lock(rdev, ring, ring_size);
        if (r)
                return r;
 
@@ -718,7 +729,7 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
        if (fence)
                r = radeon_fence_emit(rdev, fence);
 
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
 }
 
 void r600_kms_blit_copy(struct radeon_device *rdev,
index c9db4931913f3fd2c1539f56fa50fe1bbe0833e9..84c546250955bfc354456dc858f839c6bb27fcfa 100644 (file)
@@ -1815,7 +1815,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
                     dev_priv->ring.size_l2qw);
 #endif
 
-       RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x4);
+       RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x0);
 
        /* Set the write pointer delay */
        RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
index cb1acffd24303aca4c49b11e62b74719a527d51b..38ce5d0427e34a4bf8388da339864c01ad1bf470 100644 (file)
@@ -941,7 +941,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                track->db_depth_control = radeon_get_ib_value(p, idx);
                break;
        case R_028010_DB_DEPTH_INFO:
-               if (!p->keep_tiling_flags &&
+               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
                    r600_cs_packet_next_is_pkt3_nop(p)) {
                        r = r600_cs_packet_next_reloc(p, &reloc);
                        if (r) {
@@ -993,7 +993,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case R_0280B4_CB_COLOR5_INFO:
        case R_0280B8_CB_COLOR6_INFO:
        case R_0280BC_CB_COLOR7_INFO:
-               if (!p->keep_tiling_flags &&
+               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
                     r600_cs_packet_next_is_pkt3_nop(p)) {
                        r = r600_cs_packet_next_reloc(p, &reloc);
                        if (r) {
@@ -1293,7 +1293,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
        mip_offset <<= 8;
 
        word0 = radeon_get_ib_value(p, idx + 0);
-       if (!p->keep_tiling_flags) {
+       if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
                if (tiling_flags & RADEON_TILING_MACRO)
                        word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
                else if (tiling_flags & RADEON_TILING_MICRO)
@@ -1625,7 +1625,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                        return -EINVAL;
                                }
                                base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
-                               if (!p->keep_tiling_flags) {
+                               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
                                        if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
                                                ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
                                        else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
index f5ac7e788d813b9b744c08cdbc0cee72c0d0ec09..0b59206714505e24aa329b8adc2e0fd0e7dbc8ad 100644 (file)
@@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe(
        frame[0xD] = (right_bar >> 8);
 
        r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+       /* Our header values (type, version, length) should be alright, Intel
+        * is using the same. Checksum function also seems to be OK, it works
+        * fine for audio infoframe. However calculated value is always lower
+        * by 2 in comparison to fglrx. It breaks displaying anything in case
+        * of TVs that strictly check the checksum. Hack it manually here to
+        * workaround this issue. */
+       frame[0x0] += 2;
 
        WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
                frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -313,7 +320,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
        struct radeon_device *rdev = dev->dev_private;
        uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
 
-       if (ASIC_IS_DCE4(rdev))
+       if (ASIC_IS_DCE5(rdev))
                return;
 
        if (!offset)
@@ -455,13 +462,31 @@ static void r600_hdmi_assign_block(struct drm_encoder *encoder)
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 
+       u16 eg_offsets[] = {
+               EVERGREEN_CRTC0_REGISTER_OFFSET,
+               EVERGREEN_CRTC1_REGISTER_OFFSET,
+               EVERGREEN_CRTC2_REGISTER_OFFSET,
+               EVERGREEN_CRTC3_REGISTER_OFFSET,
+               EVERGREEN_CRTC4_REGISTER_OFFSET,
+               EVERGREEN_CRTC5_REGISTER_OFFSET,
+       };
+
        if (!dig) {
                dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
                return;
        }
 
-       if (ASIC_IS_DCE4(rdev)) {
+       if (ASIC_IS_DCE5(rdev)) {
                /* TODO */
+       } else if (ASIC_IS_DCE4(rdev)) {
+               if (dig->dig_encoder >= ARRAY_SIZE(eg_offsets)) {
+                       dev_err(rdev->dev, "Enabling HDMI on unknown dig\n");
+                       return;
+               }
+               radeon_encoder->hdmi_offset = EVERGREEN_HDMI_BASE +
+                                               eg_offsets[dig->dig_encoder];
+               radeon_encoder->hdmi_config_offset = radeon_encoder->hdmi_offset
+                                               + EVERGREEN_HDMI_CONFIG_OFFSET;
        } else if (ASIC_IS_DCE3(rdev)) {
                radeon_encoder->hdmi_offset = dig->dig_encoder ?
                        R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
@@ -484,7 +509,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        uint32_t offset;
 
-       if (ASIC_IS_DCE4(rdev))
+       if (ASIC_IS_DCE5(rdev))
                return;
 
        if (!radeon_encoder->hdmi_offset) {
@@ -497,16 +522,24 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
        }
 
        offset = radeon_encoder->hdmi_offset;
-       if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+       if (ASIC_IS_DCE5(rdev)) {
+               /* TODO */
+       } else if (ASIC_IS_DCE4(rdev)) {
+               WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0x1, ~0x1);
+       } else if (ASIC_IS_DCE32(rdev)) {
                WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
-       } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+       } else if (ASIC_IS_DCE3(rdev)) {
+               /* TODO */
+       } else if (rdev->family >= CHIP_R600) {
                switch (radeon_encoder->encoder_id) {
                case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-                       WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
+                       WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
+                                ~AVIVO_TMDSA_CNTL_HDMI_EN);
                        WREG32(offset + R600_HDMI_ENABLE, 0x101);
                        break;
                case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
-                       WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4);
+                       WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
+                                ~AVIVO_LVTMA_CNTL_HDMI_EN);
                        WREG32(offset + R600_HDMI_ENABLE, 0x105);
                        break;
                default:
@@ -518,8 +551,8 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
        if (rdev->irq.installed
            && rdev->family != CHIP_RS600
            && rdev->family != CHIP_RS690
-           && rdev->family != CHIP_RS740) {
-
+           && rdev->family != CHIP_RS740
+           && !ASIC_IS_DCE4(rdev)) {
                /* if irq is available use it */
                rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
                radeon_irq_set(rdev);
@@ -544,7 +577,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        uint32_t offset;
 
-       if (ASIC_IS_DCE4(rdev))
+       if (ASIC_IS_DCE5(rdev))
                return;
 
        offset = radeon_encoder->hdmi_offset;
@@ -563,16 +596,22 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
        /* disable polling */
        r600_audio_disable_polling(encoder);
 
-       if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+       if (ASIC_IS_DCE5(rdev)) {
+               /* TODO */
+       } else if (ASIC_IS_DCE4(rdev)) {
+               WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0, ~0x1);
+       } else if (ASIC_IS_DCE32(rdev)) {
                WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
        } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
                switch (radeon_encoder->encoder_id) {
                case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-                       WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
+                       WREG32_P(AVIVO_TMDSA_CNTL, 0,
+                                ~AVIVO_TMDSA_CNTL_HDMI_EN);
                        WREG32(offset + R600_HDMI_ENABLE, 0);
                        break;
                case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
-                       WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4);
+                       WREG32_P(AVIVO_LVTMA_CNTL, 0,
+                                ~AVIVO_LVTMA_CNTL_HDMI_EN);
                        WREG32(offset + R600_HDMI_ENABLE, 0);
                        break;
                default:
index bfe1b5d92afe86e093b8c61013a49ae053209be1..3ee1fd7ef3947d1f624ae8a059d9c511ea15eead 100644 (file)
 #define        PACKET3_STRMOUT_BUFFER_UPDATE                   0x34
 #define        PACKET3_INDIRECT_BUFFER_MP                      0x38
 #define        PACKET3_MEM_SEMAPHORE                           0x39
+#              define PACKET3_SEM_SEL_SIGNAL       (0x6 << 29)
+#              define PACKET3_SEM_SEL_WAIT         (0x7 << 29)
 #define        PACKET3_MPEG_INDEX                              0x3A
 #define        PACKET3_WAIT_REG_MEM                            0x3C
 #define        PACKET3_MEM_WRITE                               0x3D
index 8227e76b5c70a1d1b97bcac741f1559272a2250e..73e05cb85eca03cd2c65461ce1af7ca3f82e0bfc 100644 (file)
@@ -107,6 +107,21 @@ extern int radeon_msi;
 #define RADEONFB_CONN_LIMIT            4
 #define RADEON_BIOS_NUM_SCRATCH                8
 
+/* max number of rings */
+#define RADEON_NUM_RINGS 3
+
+/* internal ring indices */
+/* r1xx+ has gfx CP ring */
+#define RADEON_RING_TYPE_GFX_INDEX  0
+
+/* cayman has 2 compute CP rings */
+#define CAYMAN_RING_TYPE_CP1_INDEX 1
+#define CAYMAN_RING_TYPE_CP2_INDEX 2
+
+/* hardcode those limit for now */
+#define RADEON_VA_RESERVED_SIZE                (8 << 20)
+#define RADEON_IB_VM_MAX_SIZE          (64 << 10)
+
 /*
  * Errata workarounds.
  */
@@ -192,14 +207,15 @@ extern int sumo_get_temp(struct radeon_device *rdev);
  */
 struct radeon_fence_driver {
        uint32_t                        scratch_reg;
+       uint64_t                        gpu_addr;
+       volatile uint32_t               *cpu_addr;
        atomic_t                        seq;
        uint32_t                        last_seq;
        unsigned long                   last_jiffies;
        unsigned long                   last_timeout;
        wait_queue_head_t               queue;
-       rwlock_t                        lock;
        struct list_head                created;
-       struct list_head                emited;
+       struct list_head                emitted;
        struct list_head                signaled;
        bool                            initialized;
 };
@@ -210,21 +226,26 @@ struct radeon_fence {
        struct list_head                list;
        /* protected by radeon_fence.lock */
        uint32_t                        seq;
-       bool                            emited;
+       bool                            emitted;
        bool                            signaled;
+       /* RB, DMA, etc. */
+       int                             ring;
+       struct radeon_semaphore         *semaphore;
 };
 
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
 int radeon_fence_driver_init(struct radeon_device *rdev);
 void radeon_fence_driver_fini(struct radeon_device *rdev);
-int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence);
+int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
-void radeon_fence_process(struct radeon_device *rdev);
+void radeon_fence_process(struct radeon_device *rdev, int ring);
 bool radeon_fence_signaled(struct radeon_fence *fence);
 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
-int radeon_fence_wait_next(struct radeon_device *rdev);
-int radeon_fence_wait_last(struct radeon_device *rdev);
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_last(struct radeon_device *rdev, int ring);
 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
 void radeon_fence_unref(struct radeon_fence **fence);
+int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
 
 /*
  * Tiling registers
@@ -246,6 +267,21 @@ struct radeon_mman {
        bool                            initialized;
 };
 
+/* bo virtual address in a specific vm */
+struct radeon_bo_va {
+       /* bo list is protected by bo being reserved */
+       struct list_head                bo_list;
+       /* vm list is protected by vm mutex */
+       struct list_head                vm_list;
+       /* constant after initialization */
+       struct radeon_vm                *vm;
+       struct radeon_bo                *bo;
+       uint64_t                        soffset;
+       uint64_t                        eoffset;
+       uint32_t                        flags;
+       bool                            valid;
+};
+
 struct radeon_bo {
        /* Protected by gem.mutex */
        struct list_head                list;
@@ -259,6 +295,10 @@ struct radeon_bo {
        u32                             tiling_flags;
        u32                             pitch;
        int                             surface_reg;
+       /* list of all virtual address to which this bo
+        * is associated to
+        */
+       struct list_head                va;
        /* Constant after initialization */
        struct radeon_device            *rdev;
        struct drm_gem_object           gem_base;
@@ -274,6 +314,48 @@ struct radeon_bo_list {
        u32                     tiling_flags;
 };
 
+/* sub-allocation manager, it has to be protected by another lock.
+ * By conception this is an helper for other part of the driver
+ * like the indirect buffer or semaphore, which both have their
+ * locking.
+ *
+ * Principe is simple, we keep a list of sub allocation in offset
+ * order (first entry has offset == 0, last entry has the highest
+ * offset).
+ *
+ * When allocating new object we first check if there is room at
+ * the end total_size - (last_object_offset + last_object_size) >=
+ * alloc_size. If so we allocate new object there.
+ *
+ * When there is not enough room at the end, we start waiting for
+ * each sub object until we reach object_offset+object_size >=
+ * alloc_size, this object then become the sub object we return.
+ *
+ * Alignment can't be bigger than page size.
+ *
+ * Hole are not considered for allocation to keep things simple.
+ * Assumption is that there won't be hole (all object on same
+ * alignment).
+ */
+struct radeon_sa_manager {
+       struct radeon_bo        *bo;
+       struct list_head        sa_bo;
+       unsigned                size;
+       uint64_t                gpu_addr;
+       void                    *cpu_ptr;
+       uint32_t                domain;
+};
+
+struct radeon_sa_bo;
+
+/* sub-allocation buffer */
+struct radeon_sa_bo {
+       struct list_head                list;
+       struct radeon_sa_manager        *manager;
+       unsigned                        offset;
+       unsigned                        size;
+};
+
 /*
  * GEM objects.
  */
@@ -302,6 +384,46 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
                             struct drm_device *dev,
                             uint32_t handle);
 
+/*
+ * Semaphores.
+ */
+struct radeon_ring;
+
+#define        RADEON_SEMAPHORE_BO_SIZE        256
+
+struct radeon_semaphore_driver {
+       rwlock_t                        lock;
+       struct list_head                bo;
+};
+
+struct radeon_semaphore_bo;
+
+/* everything here is constant */
+struct radeon_semaphore {
+       struct list_head                list;
+       uint64_t                        gpu_addr;
+       uint32_t                        *cpu_ptr;
+       struct radeon_semaphore_bo      *bo;
+};
+
+struct radeon_semaphore_bo {
+       struct list_head                list;
+       struct radeon_ib                *ib;
+       struct list_head                free;
+       struct radeon_semaphore         semaphores[RADEON_SEMAPHORE_BO_SIZE/8];
+       unsigned                        nused;
+};
+
+void radeon_semaphore_driver_fini(struct radeon_device *rdev);
+int radeon_semaphore_create(struct radeon_device *rdev,
+                           struct radeon_semaphore **semaphore);
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+                                 struct radeon_semaphore *semaphore);
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+                               struct radeon_semaphore *semaphore);
+void radeon_semaphore_free(struct radeon_device *rdev,
+                          struct radeon_semaphore *semaphore);
+
 /*
  * GART structures, functions & helpers
  */
@@ -310,6 +432,7 @@ struct radeon_mc;
 #define RADEON_GPU_PAGE_SIZE 4096
 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
 #define RADEON_GPU_PAGE_SHIFT 12
+#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
 
 struct radeon_gart {
        dma_addr_t                      table_addr;
@@ -320,7 +443,6 @@ struct radeon_gart {
        unsigned                        table_size;
        struct page                     **pages;
        dma_addr_t                      *pages_addr;
-       bool                            *ttm_alloced;
        bool                            ready;
 };
 
@@ -434,7 +556,7 @@ union radeon_irq_stat_regs {
 
 struct radeon_irq {
        bool            installed;
-       bool            sw_int;
+       bool            sw_int[RADEON_NUM_RINGS];
        bool            crtc_vblank_int[RADEON_MAX_CRTCS];
        bool            pflip[RADEON_MAX_CRTCS];
        wait_queue_head_t       vblank_queue;
@@ -444,7 +566,7 @@ struct radeon_irq {
        wait_queue_head_t       idle_queue;
        bool            hdmi[RADEON_MAX_HDMI_BLOCKS];
        spinlock_t sw_lock;
-       int sw_refcount;
+       int sw_refcount[RADEON_NUM_RINGS];
        union radeon_irq_stat_regs stat_regs;
        spinlock_t pflip_lock[RADEON_MAX_CRTCS];
        int pflip_refcount[RADEON_MAX_CRTCS];
@@ -452,22 +574,23 @@ struct radeon_irq {
 
 int radeon_irq_kms_init(struct radeon_device *rdev);
 void radeon_irq_kms_fini(struct radeon_device *rdev);
-void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
-void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
 void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
 void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
 
 /*
- * CP & ring.
+ * CP & rings.
  */
+
 struct radeon_ib {
-       struct list_head        list;
+       struct radeon_sa_bo     sa_bo;
        unsigned                idx;
+       uint32_t                length_dw;
        uint64_t                gpu_addr;
-       struct radeon_fence     *fence;
        uint32_t                *ptr;
-       uint32_t                length_dw;
-       bool                    free;
+       struct radeon_fence     *fence;
+       unsigned                vm_id;
 };
 
 /*
@@ -475,20 +598,22 @@ struct radeon_ib {
  * mutex protects scheduled_ibs, ready, alloc_bm
  */
 struct radeon_ib_pool {
-       struct mutex            mutex;
-       struct radeon_bo        *robj;
-       struct list_head        bogus_ib;
-       struct radeon_ib        ibs[RADEON_IB_POOL_SIZE];
-       bool                    ready;
-       unsigned                head_id;
+       struct mutex                    mutex;
+       struct radeon_sa_manager        sa_manager;
+       struct radeon_ib                ibs[RADEON_IB_POOL_SIZE];
+       bool                            ready;
+       unsigned                        head_id;
 };
 
-struct radeon_cp {
+struct radeon_ring {
        struct radeon_bo        *ring_obj;
        volatile uint32_t       *ring;
        unsigned                rptr;
+       unsigned                rptr_offs;
+       unsigned                rptr_reg;
        unsigned                wptr;
        unsigned                wptr_old;
+       unsigned                wptr_reg;
        unsigned                ring_size;
        unsigned                ring_free_dw;
        int                     count_dw;
@@ -497,6 +622,61 @@ struct radeon_cp {
        uint32_t                ptr_mask;
        struct mutex            mutex;
        bool                    ready;
+       u32                     ptr_reg_shift;
+       u32                     ptr_reg_mask;
+       u32                     nop;
+};
+
+/*
+ * VM
+ */
+struct radeon_vm {
+       struct list_head                list;
+       struct list_head                va;
+       int                             id;
+       unsigned                        last_pfn;
+       u64                             pt_gpu_addr;
+       u64                             *pt;
+       struct radeon_sa_bo             sa_bo;
+       struct mutex                    mutex;
+       /* last fence for cs using this vm */
+       struct radeon_fence             *fence;
+};
+
+struct radeon_vm_funcs {
+       int (*init)(struct radeon_device *rdev);
+       void (*fini)(struct radeon_device *rdev);
+       /* cs mutex must be lock for schedule_ib */
+       int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
+       void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
+       void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
+       uint32_t (*page_flags)(struct radeon_device *rdev,
+                              struct radeon_vm *vm,
+                              uint32_t flags);
+       void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
+                       unsigned pfn, uint64_t addr, uint32_t flags);
+};
+
+struct radeon_vm_manager {
+       struct list_head                lru_vm;
+       uint32_t                        use_bitmap;
+       struct radeon_sa_manager        sa_manager;
+       uint32_t                        max_pfn;
+       /* fields constant after init */
+       const struct radeon_vm_funcs    *funcs;
+       /* number of VMIDs */
+       unsigned                        nvm;
+       /* vram base address for page table entry  */
+       u64                             vram_base_offset;
+       /* is vm enabled? */
+       bool                            enabled;
+};
+
+/*
+ * file private structure
+ */
+struct radeon_fpriv {
+       struct radeon_vm                vm;
 };
 
 /*
@@ -506,6 +686,7 @@ struct r600_ih {
        struct radeon_bo        *ring_obj;
        volatile uint32_t       *ring;
        unsigned                rptr;
+       unsigned                rptr_offs;
        unsigned                wptr;
        unsigned                wptr_old;
        unsigned                ring_size;
@@ -549,23 +730,29 @@ struct r600_blit {
 
 void r600_blit_suspend(struct radeon_device *rdev);
 
-int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+                 struct radeon_ib **ib, unsigned size);
 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
+bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
 int radeon_ib_pool_init(struct radeon_device *rdev);
 void radeon_ib_pool_fini(struct radeon_device *rdev);
+int radeon_ib_pool_start(struct radeon_device *rdev);
+int radeon_ib_pool_suspend(struct radeon_device *rdev);
 int radeon_ib_test(struct radeon_device *rdev);
-extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
 /* Ring access between begin & end cannot sleep */
-void radeon_ring_free_size(struct radeon_device *rdev);
-int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
-void radeon_ring_commit(struct radeon_device *rdev);
-void radeon_ring_unlock_commit(struct radeon_device *rdev);
-void radeon_ring_unlock_undo(struct radeon_device *rdev);
-int radeon_ring_test(struct radeon_device *rdev);
-int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size);
-void radeon_ring_fini(struct radeon_device *rdev);
+int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
+                    unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+                    u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
 
 
 /*
@@ -582,12 +769,12 @@ struct radeon_cs_reloc {
 struct radeon_cs_chunk {
        uint32_t                chunk_id;
        uint32_t                length_dw;
-       int kpage_idx[2];
-       uint32_t                *kpage[2];
+       int                     kpage_idx[2];
+       uint32_t                *kpage[2];
        uint32_t                *kdata;
-       void __user *user_ptr;
-       int last_copied_page;
-       int last_page_index;
+       void __user             *user_ptr;
+       int                     last_copied_page;
+       int                     last_page_index;
 };
 
 struct radeon_cs_parser {
@@ -605,14 +792,18 @@ struct radeon_cs_parser {
        struct radeon_cs_reloc  *relocs;
        struct radeon_cs_reloc  **relocs_ptr;
        struct list_head        validated;
+       bool                    sync_to_ring[RADEON_NUM_RINGS];
        /* indices of various chunks */
        int                     chunk_ib_idx;
        int                     chunk_relocs_idx;
+       int                     chunk_flags_idx;
        struct radeon_ib        *ib;
        void                    *track;
        unsigned                family;
        int                     parser_error;
-       bool                    keep_tiling_flags;
+       u32                     cs_flags;
+       u32                     ring;
+       s32                     priority;
 };
 
 extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
@@ -869,11 +1060,20 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number);
  * Testing
  */
 void radeon_test_moves(struct radeon_device *rdev);
+void radeon_test_ring_sync(struct radeon_device *rdev,
+                          struct radeon_ring *cpA,
+                          struct radeon_ring *cpB);
+void radeon_test_syncing(struct radeon_device *rdev);
 
 
 /*
  * Debugfs
  */
+struct radeon_debugfs {
+       struct drm_info_list    *files;
+       unsigned                num_files;
+};
+
 int radeon_debugfs_add_files(struct radeon_device *rdev,
                             struct drm_info_list *files,
                             unsigned nfiles);
@@ -889,21 +1089,27 @@ struct radeon_asic {
        int (*resume)(struct radeon_device *rdev);
        int (*suspend)(struct radeon_device *rdev);
        void (*vga_set_state)(struct radeon_device *rdev, bool state);
-       bool (*gpu_is_lockup)(struct radeon_device *rdev);
+       bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
        int (*asic_reset)(struct radeon_device *rdev);
        void (*gart_tlb_flush)(struct radeon_device *rdev);
        int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
        int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
        void (*cp_fini)(struct radeon_device *rdev);
        void (*cp_disable)(struct radeon_device *rdev);
-       void (*cp_commit)(struct radeon_device *rdev);
        void (*ring_start)(struct radeon_device *rdev);
-       int (*ring_test)(struct radeon_device *rdev);
-       void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+
+       struct {
+               void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+               int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
+               void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+               void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+                                      struct radeon_semaphore *semaphore, bool emit_wait);
+       } ring[RADEON_NUM_RINGS];
+
+       int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
        int (*irq_set)(struct radeon_device *rdev);
        int (*irq_process)(struct radeon_device *rdev);
        u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
-       void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
        int (*cs_parse)(struct radeon_cs_parser *p);
        int (*copy_blit)(struct radeon_device *rdev,
                         uint64_t src_offset,
@@ -1132,6 +1338,8 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *filp);
 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *filp);
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *filp);
 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *filp);
@@ -1231,11 +1439,10 @@ struct radeon_device {
        struct radeon_mode_info         mode_info;
        struct radeon_scratch           scratch;
        struct radeon_mman              mman;
-       struct radeon_fence_driver      fence_drv;
-       struct radeon_cp                cp;
-       /* cayman compute rings */
-       struct radeon_cp                cp1;
-       struct radeon_cp                cp2;
+       rwlock_t                        fence_lock;
+       struct radeon_fence_driver      fence_drv[RADEON_NUM_RINGS];
+       struct radeon_semaphore_driver  semaphore_drv;
+       struct radeon_ring              ring[RADEON_NUM_RINGS];
        struct radeon_ib_pool           ib_pool;
        struct radeon_irq               irq;
        struct radeon_asic              *asic;
@@ -1279,6 +1486,13 @@ struct radeon_device {
        struct drm_file *cmask_filp;
        /* i2c buses */
        struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
+       /* debugfs */
+       struct radeon_debugfs   debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
+       unsigned                debugfs_count;
+       /* virtual memory */
+       struct radeon_vm_manager        vm_manager;
+       /* ring used for bo copies */
+       u32                             copy_ring;
 };
 
 int radeon_device_init(struct radeon_device *rdev,
@@ -1414,18 +1628,17 @@ void radeon_atombios_fini(struct radeon_device *rdev);
 /*
  * RING helpers.
  */
-
 #if DRM_DEBUG_CODE == 0
-static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
+static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
 {
-       rdev->cp.ring[rdev->cp.wptr++] = v;
-       rdev->cp.wptr &= rdev->cp.ptr_mask;
-       rdev->cp.count_dw--;
-       rdev->cp.ring_free_dw--;
+       ring->ring[ring->wptr++] = v;
+       ring->wptr &= ring->ptr_mask;
+       ring->count_dw--;
+       ring->ring_free_dw--;
 }
 #else
 /* With debugging this is just too big to inline */
-void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
 #endif
 
 /*
@@ -1437,18 +1650,19 @@ void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
 #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
 #define radeon_cs_parse(p) rdev->asic->cs_parse((p))
 #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
+#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp))
 #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
 #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
-#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
 #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
-#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev))
-#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib))
+#define radeon_ring_test(rdev, cp) (rdev)->asic->ring_test((rdev), (cp))
+#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
+#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
 #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
 #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
 #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
-#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
+#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
+#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
 #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
 #define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
 #define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
@@ -1502,6 +1716,33 @@ extern int radeon_resume_kms(struct drm_device *dev);
 extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
 extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
 
+/*
+ * vm
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev);
+void radeon_vm_manager_fini(struct radeon_device *rdev);
+int radeon_vm_manager_start(struct radeon_device *rdev);
+int radeon_vm_manager_suspend(struct radeon_device *rdev);
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+                           struct radeon_vm *vm,
+                           struct radeon_bo *bo,
+                           struct ttm_mem_reg *mem);
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+                            struct radeon_bo *bo);
+int radeon_vm_bo_add(struct radeon_device *rdev,
+                    struct radeon_vm *vm,
+                    struct radeon_bo *bo,
+                    uint64_t offset,
+                    uint32_t flags);
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+                    struct radeon_vm *vm,
+                    struct radeon_bo *bo);
+
+
 /*
  * R600 vram scratch functions
  */
index a2e1eae114ef9ec1048e8b455bd5dee29fe28277..36a6192ce862b5955356aed3466724cb0a696153 100644 (file)
@@ -138,14 +138,18 @@ static struct radeon_asic r100_asic = {
        .asic_reset = &r100_asic_reset,
        .gart_tlb_flush = &r100_pci_gart_tlb_flush,
        .gart_set_page = &r100_pci_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &r100_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r100_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r100_irq_set,
        .irq_process = &r100_irq_process,
        .get_vblank_counter = &r100_get_vblank_counter,
-       .fence_ring_emit = &r100_fence_ring_emit,
        .cs_parse = &r100_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = NULL,
@@ -186,14 +190,18 @@ static struct radeon_asic r200_asic = {
        .asic_reset = &r100_asic_reset,
        .gart_tlb_flush = &r100_pci_gart_tlb_flush,
        .gart_set_page = &r100_pci_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &r100_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r100_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r100_irq_set,
        .irq_process = &r100_irq_process,
        .get_vblank_counter = &r100_get_vblank_counter,
-       .fence_ring_emit = &r100_fence_ring_emit,
        .cs_parse = &r100_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -233,14 +241,18 @@ static struct radeon_asic r300_asic = {
        .asic_reset = &r300_asic_reset,
        .gart_tlb_flush = &r100_pci_gart_tlb_flush,
        .gart_set_page = &r100_pci_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &r300_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r300_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r100_irq_set,
        .irq_process = &r100_irq_process,
        .get_vblank_counter = &r100_get_vblank_counter,
-       .fence_ring_emit = &r300_fence_ring_emit,
        .cs_parse = &r300_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -281,14 +293,18 @@ static struct radeon_asic r300_asic_pcie = {
        .asic_reset = &r300_asic_reset,
        .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
        .gart_set_page = &rv370_pcie_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &r300_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r300_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r100_irq_set,
        .irq_process = &r100_irq_process,
        .get_vblank_counter = &r100_get_vblank_counter,
-       .fence_ring_emit = &r300_fence_ring_emit,
        .cs_parse = &r300_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -328,14 +344,18 @@ static struct radeon_asic r420_asic = {
        .asic_reset = &r300_asic_reset,
        .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
        .gart_set_page = &rv370_pcie_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &r300_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r300_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r100_irq_set,
        .irq_process = &r100_irq_process,
        .get_vblank_counter = &r100_get_vblank_counter,
-       .fence_ring_emit = &r300_fence_ring_emit,
        .cs_parse = &r300_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -376,14 +396,18 @@ static struct radeon_asic rs400_asic = {
        .asic_reset = &r300_asic_reset,
        .gart_tlb_flush = &rs400_gart_tlb_flush,
        .gart_set_page = &rs400_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &r300_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r300_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r100_irq_set,
        .irq_process = &r100_irq_process,
        .get_vblank_counter = &r100_get_vblank_counter,
-       .fence_ring_emit = &r300_fence_ring_emit,
        .cs_parse = &r300_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -424,14 +448,18 @@ static struct radeon_asic rs600_asic = {
        .asic_reset = &rs600_asic_reset,
        .gart_tlb_flush = &rs600_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &r300_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r300_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &rs600_irq_set,
        .irq_process = &rs600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
-       .fence_ring_emit = &r300_fence_ring_emit,
        .cs_parse = &r300_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -472,14 +500,18 @@ static struct radeon_asic rs690_asic = {
        .asic_reset = &rs600_asic_reset,
        .gart_tlb_flush = &rs400_gart_tlb_flush,
        .gart_set_page = &rs400_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &r300_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r300_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &rs600_irq_set,
        .irq_process = &rs600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
-       .fence_ring_emit = &r300_fence_ring_emit,
        .cs_parse = &r300_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -520,14 +552,18 @@ static struct radeon_asic rv515_asic = {
        .asic_reset = &rs600_asic_reset,
        .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
        .gart_set_page = &rv370_pcie_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &rv515_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r300_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &rs600_irq_set,
        .irq_process = &rs600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
-       .fence_ring_emit = &r300_fence_ring_emit,
        .cs_parse = &r300_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -568,14 +604,18 @@ static struct radeon_asic r520_asic = {
        .asic_reset = &rs600_asic_reset,
        .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
        .gart_set_page = &rv370_pcie_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &rv515_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r300_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &rs600_irq_set,
        .irq_process = &rs600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
-       .fence_ring_emit = &r300_fence_ring_emit,
        .cs_parse = &r300_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -611,18 +651,22 @@ static struct radeon_asic r600_asic = {
        .fini = &r600_fini,
        .suspend = &r600_suspend,
        .resume = &r600_resume,
-       .cp_commit = &r600_cp_commit,
        .vga_set_state = &r600_vga_set_state,
        .gpu_is_lockup = &r600_gpu_is_lockup,
        .asic_reset = &r600_asic_reset,
        .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
-       .ring_ib_execute = &r600_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r600_ring_ib_execute,
+                       .emit_fence = &r600_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r600_irq_set,
        .irq_process = &r600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
-       .fence_ring_emit = &r600_fence_ring_emit,
        .cs_parse = &r600_cs_parse,
        .copy_blit = &r600_copy_blit,
        .copy_dma = NULL,
@@ -658,18 +702,22 @@ static struct radeon_asic rs780_asic = {
        .fini = &r600_fini,
        .suspend = &r600_suspend,
        .resume = &r600_resume,
-       .cp_commit = &r600_cp_commit,
        .gpu_is_lockup = &r600_gpu_is_lockup,
        .vga_set_state = &r600_vga_set_state,
        .asic_reset = &r600_asic_reset,
        .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
-       .ring_ib_execute = &r600_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r600_ring_ib_execute,
+                       .emit_fence = &r600_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r600_irq_set,
        .irq_process = &r600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
-       .fence_ring_emit = &r600_fence_ring_emit,
        .cs_parse = &r600_cs_parse,
        .copy_blit = &r600_copy_blit,
        .copy_dma = NULL,
@@ -705,18 +753,22 @@ static struct radeon_asic rv770_asic = {
        .fini = &rv770_fini,
        .suspend = &rv770_suspend,
        .resume = &rv770_resume,
-       .cp_commit = &r600_cp_commit,
        .asic_reset = &r600_asic_reset,
        .gpu_is_lockup = &r600_gpu_is_lockup,
        .vga_set_state = &r600_vga_set_state,
        .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
-       .ring_ib_execute = &r600_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r600_ring_ib_execute,
+                       .emit_fence = &r600_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r600_irq_set,
        .irq_process = &r600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
-       .fence_ring_emit = &r600_fence_ring_emit,
        .cs_parse = &r600_cs_parse,
        .copy_blit = &r600_copy_blit,
        .copy_dma = NULL,
@@ -752,18 +804,22 @@ static struct radeon_asic evergreen_asic = {
        .fini = &evergreen_fini,
        .suspend = &evergreen_suspend,
        .resume = &evergreen_resume,
-       .cp_commit = &r600_cp_commit,
        .gpu_is_lockup = &evergreen_gpu_is_lockup,
        .asic_reset = &evergreen_asic_reset,
        .vga_set_state = &r600_vga_set_state,
        .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
-       .ring_ib_execute = &evergreen_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &evergreen_ring_ib_execute,
+                       .emit_fence = &r600_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               }
+       },
        .irq_set = &evergreen_irq_set,
        .irq_process = &evergreen_irq_process,
        .get_vblank_counter = &evergreen_get_vblank_counter,
-       .fence_ring_emit = &r600_fence_ring_emit,
        .cs_parse = &evergreen_cs_parse,
        .copy_blit = &r600_copy_blit,
        .copy_dma = NULL,
@@ -799,18 +855,22 @@ static struct radeon_asic sumo_asic = {
        .fini = &evergreen_fini,
        .suspend = &evergreen_suspend,
        .resume = &evergreen_resume,
-       .cp_commit = &r600_cp_commit,
        .gpu_is_lockup = &evergreen_gpu_is_lockup,
        .asic_reset = &evergreen_asic_reset,
        .vga_set_state = &r600_vga_set_state,
        .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
-       .ring_ib_execute = &evergreen_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &evergreen_ring_ib_execute,
+                       .emit_fence = &r600_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               }
+       },
        .irq_set = &evergreen_irq_set,
        .irq_process = &evergreen_irq_process,
        .get_vblank_counter = &evergreen_get_vblank_counter,
-       .fence_ring_emit = &r600_fence_ring_emit,
        .cs_parse = &evergreen_cs_parse,
        .copy_blit = &r600_copy_blit,
        .copy_dma = NULL,
@@ -846,18 +906,22 @@ static struct radeon_asic btc_asic = {
        .fini = &evergreen_fini,
        .suspend = &evergreen_suspend,
        .resume = &evergreen_resume,
-       .cp_commit = &r600_cp_commit,
        .gpu_is_lockup = &evergreen_gpu_is_lockup,
        .asic_reset = &evergreen_asic_reset,
        .vga_set_state = &r600_vga_set_state,
        .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
-       .ring_ib_execute = &evergreen_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &evergreen_ring_ib_execute,
+                       .emit_fence = &r600_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               }
+       },
        .irq_set = &evergreen_irq_set,
        .irq_process = &evergreen_irq_process,
        .get_vblank_counter = &evergreen_get_vblank_counter,
-       .fence_ring_emit = &r600_fence_ring_emit,
        .cs_parse = &evergreen_cs_parse,
        .copy_blit = &r600_copy_blit,
        .copy_dma = NULL,
@@ -888,23 +952,50 @@ static struct radeon_asic btc_asic = {
        .post_page_flip = &evergreen_post_page_flip,
 };
 
+static const struct radeon_vm_funcs cayman_vm_funcs = {
+       .init = &cayman_vm_init,
+       .fini = &cayman_vm_fini,
+       .bind = &cayman_vm_bind,
+       .unbind = &cayman_vm_unbind,
+       .tlb_flush = &cayman_vm_tlb_flush,
+       .page_flags = &cayman_vm_page_flags,
+       .set_page = &cayman_vm_set_page,
+};
+
 static struct radeon_asic cayman_asic = {
        .init = &cayman_init,
        .fini = &cayman_fini,
        .suspend = &cayman_suspend,
        .resume = &cayman_resume,
-       .cp_commit = &r600_cp_commit,
        .gpu_is_lockup = &cayman_gpu_is_lockup,
        .asic_reset = &cayman_asic_reset,
        .vga_set_state = &r600_vga_set_state,
        .gart_tlb_flush = &cayman_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
-       .ring_ib_execute = &evergreen_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &cayman_ring_ib_execute,
+                       .ib_parse = &evergreen_ib_parse,
+                       .emit_fence = &cayman_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               },
+               [CAYMAN_RING_TYPE_CP1_INDEX] = {
+                       .ib_execute = &cayman_ring_ib_execute,
+                       .ib_parse = &evergreen_ib_parse,
+                       .emit_fence = &cayman_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               },
+               [CAYMAN_RING_TYPE_CP2_INDEX] = {
+                       .ib_execute = &cayman_ring_ib_execute,
+                       .ib_parse = &evergreen_ib_parse,
+                       .emit_fence = &cayman_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               }
+       },
        .irq_set = &evergreen_irq_set,
        .irq_process = &evergreen_irq_process,
        .get_vblank_counter = &evergreen_get_vblank_counter,
-       .fence_ring_emit = &r600_fence_ring_emit,
        .cs_parse = &evergreen_cs_parse,
        .copy_blit = &r600_copy_blit,
        .copy_dma = NULL,
@@ -945,6 +1036,9 @@ int radeon_asic_init(struct radeon_device *rdev)
        else
                rdev->num_crtc = 2;
 
+       /* set the ring used for bo copies */
+       rdev->copy_ring = RADEON_RING_TYPE_GFX_INDEX;
+
        switch (rdev->family) {
        case CHIP_R100:
        case CHIP_RV100:
@@ -1050,6 +1144,7 @@ int radeon_asic_init(struct radeon_device *rdev)
                rdev->asic = &cayman_asic;
                /* set num crtcs */
                rdev->num_crtc = 6;
+               rdev->vm_manager.funcs = &cayman_vm_funcs;
                break;
        default:
                /* FIXME: not supported yet */
index 59914842a7292c63b5bad6032d3b1a496ed06e7d..6304aef0d9b27339ad44a020ac8a86ca282a631b 100644 (file)
@@ -58,17 +58,20 @@ void r100_fini(struct radeon_device *rdev);
 int r100_suspend(struct radeon_device *rdev);
 int r100_resume(struct radeon_device *rdev);
 void r100_vga_set_state(struct radeon_device *rdev, bool state);
-bool r100_gpu_is_lockup(struct radeon_device *rdev);
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 int r100_asic_reset(struct radeon_device *rdev);
 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
 void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-void r100_cp_commit(struct radeon_device *rdev);
 void r100_ring_start(struct radeon_device *rdev);
 int r100_irq_set(struct radeon_device *rdev);
 int r100_irq_process(struct radeon_device *rdev);
 void r100_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence);
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
+                             struct radeon_ring *cp,
+                             struct radeon_semaphore *semaphore,
+                             bool emit_wait);
 int r100_cs_parse(struct radeon_cs_parser *p);
 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -83,7 +86,7 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
 void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
 void r100_bandwidth_update(struct radeon_device *rdev);
 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int r100_ring_test(struct radeon_device *rdev);
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
 void r100_hpd_init(struct radeon_device *rdev);
 void r100_hpd_fini(struct radeon_device *rdev);
 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -101,12 +104,12 @@ void r100_pci_gart_disable(struct radeon_device *rdev);
 int r100_debugfs_mc_info_init(struct radeon_device *rdev);
 int r100_gui_wait_for_idle(struct radeon_device *rdev);
 void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
-                           struct radeon_cp *cp);
+                           struct radeon_ring *cp);
 bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
                           struct r100_gpu_lockup *lockup,
-                          struct radeon_cp *cp);
+                          struct radeon_ring *cp);
 void r100_ib_fini(struct radeon_device *rdev);
-int r100_ib_init(struct radeon_device *rdev);
+int r100_ib_test(struct radeon_device *rdev);
 void r100_irq_disable(struct radeon_device *rdev);
 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
@@ -154,7 +157,7 @@ extern int r300_init(struct radeon_device *rdev);
 extern void r300_fini(struct radeon_device *rdev);
 extern int r300_suspend(struct radeon_device *rdev);
 extern int r300_resume(struct radeon_device *rdev);
-extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
+extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 extern int r300_asic_reset(struct radeon_device *rdev);
 extern void r300_ring_start(struct radeon_device *rdev);
 extern void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -293,22 +296,25 @@ int r600_resume(struct radeon_device *rdev);
 void r600_vga_set_state(struct radeon_device *rdev, bool state);
 int r600_wb_init(struct radeon_device *rdev);
 void r600_wb_fini(struct radeon_device *rdev);
-void r600_cp_commit(struct radeon_device *rdev);
 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
 uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
 void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 int r600_cs_parse(struct radeon_cs_parser *p);
 void r600_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence);
-bool r600_gpu_is_lockup(struct radeon_device *rdev);
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
+                             struct radeon_ring *cp,
+                             struct radeon_semaphore *semaphore,
+                             bool emit_wait);
+bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 int r600_asic_reset(struct radeon_device *rdev);
 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t tiling_flags, uint32_t pitch,
                         uint32_t offset, uint32_t obj_size);
 void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
-int r600_ib_test(struct radeon_device *rdev);
+int r600_ib_test(struct radeon_device *rdev, int ring);
 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int r600_ring_test(struct radeon_device *rdev);
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset, uint64_t dst_offset,
                   unsigned num_gpu_pages, struct radeon_fence *fence);
@@ -328,7 +334,7 @@ extern int r600_get_pcie_lanes(struct radeon_device *rdev);
 bool r600_card_posted(struct radeon_device *rdev);
 void r600_cp_stop(struct radeon_device *rdev);
 int r600_cp_start(struct radeon_device *rdev);
-void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size);
 int r600_cp_resume(struct radeon_device *rdev);
 void r600_cp_fini(struct radeon_device *rdev);
 int r600_count_pipe_bits(uint32_t val);
@@ -397,7 +403,7 @@ int evergreen_init(struct radeon_device *rdev);
 void evergreen_fini(struct radeon_device *rdev);
 int evergreen_suspend(struct radeon_device *rdev);
 int evergreen_resume(struct radeon_device *rdev);
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 int evergreen_asic_reset(struct radeon_device *rdev);
 void evergreen_bandwidth_update(struct radeon_device *rdev);
 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -423,12 +429,26 @@ int evergreen_blit_init(struct radeon_device *rdev);
 /*
  * cayman
  */
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+                           struct radeon_fence *fence);
 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
 int cayman_init(struct radeon_device *rdev);
 void cayman_fini(struct radeon_device *rdev);
 int cayman_suspend(struct radeon_device *rdev);
 int cayman_resume(struct radeon_device *rdev);
-bool cayman_gpu_is_lockup(struct radeon_device *rdev);
+bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 int cayman_asic_reset(struct radeon_device *rdev);
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int cayman_vm_init(struct radeon_device *rdev);
+void cayman_vm_fini(struct radeon_device *rdev);
+int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
+void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
+void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
+                             struct radeon_vm *vm,
+                             uint32_t flags);
+void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
+                       unsigned pfn, uint64_t addr, uint32_t flags);
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 
 #endif
index 17e1a9b2d8fbf3e1dedce2923a7e5b726aea284c..815f2341ab9491013257987081c4214917f9d0af 100644 (file)
@@ -43,7 +43,7 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
 
        start_jiffies = jiffies;
        for (i = 0; i < n; i++) {
-               r = radeon_fence_create(rdev, &fence);
+               r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
                if (r)
                        return r;
 
@@ -229,21 +229,21 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number)
                break;
        case 6:
                /* GTT to VRAM, buffer size sweep, common modes */
-               for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+               for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
                        radeon_benchmark_move(rdev, common_modes[i],
                                              RADEON_GEM_DOMAIN_GTT,
                                              RADEON_GEM_DOMAIN_VRAM);
                break;
        case 7:
                /* VRAM to GTT, buffer size sweep, common modes */
-               for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+               for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
                        radeon_benchmark_move(rdev, common_modes[i],
                                              RADEON_GEM_DOMAIN_VRAM,
                                              RADEON_GEM_DOMAIN_GTT);
                break;
        case 8:
                /* VRAM to VRAM, buffer size sweep, common modes */
-               for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+               for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
                        radeon_benchmark_move(rdev, common_modes[i],
                                              RADEON_GEM_DOMAIN_VRAM,
                                              RADEON_GEM_DOMAIN_VRAM);
index 29afd71e0840a0b9995b5e30027577adeec45b92..435a3d970ab8b0ef39c45f45ea8c22bdec05abf4 100644 (file)
@@ -58,7 +58,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 
                duplicate = false;
                r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
-               for (j = 0; j < p->nrelocs; j++) {
+               for (j = 0; j < i; j++) {
                        if (r->handle == p->relocs[j].handle) {
                                p->relocs_ptr[i] = &p->relocs[j];
                                duplicate = true;
@@ -84,16 +84,75 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                        p->relocs[i].flags = r->flags;
                        radeon_bo_list_add_object(&p->relocs[i].lobj,
                                                  &p->validated);
-               }
+
+                       if (p->relocs[i].robj->tbo.sync_obj && !(r->flags & RADEON_RELOC_DONT_SYNC)) {
+                               struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj;
+                               if (!radeon_fence_signaled(fence)) {
+                                       p->sync_to_ring[fence->ring] = true;
+                               }
+                       }
+               } else
+                       p->relocs[i].handle = 0;
        }
        return radeon_bo_list_validate(&p->validated);
 }
 
+static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
+{
+       p->priority = priority;
+
+       switch (ring) {
+       default:
+               DRM_ERROR("unknown ring id: %d\n", ring);
+               return -EINVAL;
+       case RADEON_CS_RING_GFX:
+               p->ring = RADEON_RING_TYPE_GFX_INDEX;
+               break;
+       case RADEON_CS_RING_COMPUTE:
+               /* for now */
+               p->ring = RADEON_RING_TYPE_GFX_INDEX;
+               break;
+       }
+       return 0;
+}
+
+static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
+{
+       int i, r;
+
+       for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+               /* no need to sync to our own or unused rings */
+               if (i == p->ring || !p->sync_to_ring[i] || !p->rdev->ring[i].ready)
+                       continue;
+
+               if (!p->ib->fence->semaphore) {
+                       r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
+                       if (r)
+                               return r;
+               }
+
+               r = radeon_ring_lock(p->rdev, &p->rdev->ring[i], 3);
+               if (r)
+                       return r;
+               radeon_semaphore_emit_signal(p->rdev, i, p->ib->fence->semaphore);
+               radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[i]);
+
+               r = radeon_ring_lock(p->rdev, &p->rdev->ring[p->ring], 3);
+               if (r)
+                       return r;
+               radeon_semaphore_emit_wait(p->rdev, p->ring, p->ib->fence->semaphore);
+               radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[p->ring]);
+       }
+       return 0;
+}
+
 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
 {
        struct drm_radeon_cs *cs = data;
        uint64_t *chunk_array_ptr;
-       unsigned size, i, flags = 0;
+       unsigned size, i;
+       u32 ring = RADEON_CS_RING_GFX;
+       s32 priority = 0;
 
        if (!cs->num_chunks) {
                return 0;
@@ -103,6 +162,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
        p->idx = 0;
        p->chunk_ib_idx = -1;
        p->chunk_relocs_idx = -1;
+       p->chunk_flags_idx = -1;
        p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
        if (p->chunks_array == NULL) {
                return -ENOMEM;
@@ -112,6 +172,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                               sizeof(uint64_t)*cs->num_chunks)) {
                return -EFAULT;
        }
+       p->cs_flags = 0;
        p->nchunks = cs->num_chunks;
        p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
        if (p->chunks == NULL) {
@@ -140,16 +201,19 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                        if (p->chunks[i].length_dw == 0)
                                return -EINVAL;
                }
-               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
-                   !p->chunks[i].length_dw) {
-                       return -EINVAL;
+               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+                       p->chunk_flags_idx = i;
+                       /* zero length flags aren't useful */
+                       if (p->chunks[i].length_dw == 0)
+                               return -EINVAL;
                }
 
                p->chunks[i].length_dw = user_chunk.length_dw;
                p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
 
                cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
-               if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
+               if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
+                   (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
                        size = p->chunks[i].length_dw * sizeof(uint32_t);
                        p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
                        if (p->chunks[i].kdata == NULL) {
@@ -160,29 +224,58 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                                return -EFAULT;
                        }
                        if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
-                               flags = p->chunks[i].kdata[0];
+                               p->cs_flags = p->chunks[i].kdata[0];
+                               if (p->chunks[i].length_dw > 1)
+                                       ring = p->chunks[i].kdata[1];
+                               if (p->chunks[i].length_dw > 2)
+                                       priority = (s32)p->chunks[i].kdata[2];
                        }
-               } else {
-                       p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
-                       p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
-                       if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
-                               kfree(p->chunks[i].kpage[0]);
-                               kfree(p->chunks[i].kpage[1]);
-                               return -ENOMEM;
-                       }
-                       p->chunks[i].kpage_idx[0] = -1;
-                       p->chunks[i].kpage_idx[1] = -1;
-                       p->chunks[i].last_copied_page = -1;
-                       p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
                }
        }
-       if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
-               DRM_ERROR("cs IB too big: %d\n",
-                         p->chunks[p->chunk_ib_idx].length_dw);
+
+       if ((p->cs_flags & RADEON_CS_USE_VM) &&
+           !p->rdev->vm_manager.enabled) {
+               DRM_ERROR("VM not active on asic!\n");
+               if (p->chunk_relocs_idx != -1)
+                       kfree(p->chunks[p->chunk_relocs_idx].kdata);
+               if (p->chunk_flags_idx != -1)
+                       kfree(p->chunks[p->chunk_flags_idx].kdata);
                return -EINVAL;
        }
 
-       p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
+       if (radeon_cs_get_ring(p, ring, priority)) {
+               if (p->chunk_relocs_idx != -1)
+                       kfree(p->chunks[p->chunk_relocs_idx].kdata);
+               if (p->chunk_flags_idx != -1)
+                       kfree(p->chunks[p->chunk_flags_idx].kdata);
+               return -EINVAL;
+       }
+
+
+       /* deal with non-vm */
+       if ((p->chunk_ib_idx != -1) &&
+           ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
+           (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
+               if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
+                       DRM_ERROR("cs IB too big: %d\n",
+                                 p->chunks[p->chunk_ib_idx].length_dw);
+                       return -EINVAL;
+               }
+               p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+               p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+               if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
+                   p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
+                       kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
+                       kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
+                       return -ENOMEM;
+               }
+               p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
+               p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
+               p->chunks[p->chunk_ib_idx].last_copied_page = -1;
+               p->chunks[p->chunk_ib_idx].last_page_index =
+                       ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
+       }
+
        return 0;
 }
 
@@ -224,11 +317,139 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
        radeon_ib_free(parser->rdev, &parser->ib);
 }
 
+static int radeon_cs_ib_chunk(struct radeon_device *rdev,
+                             struct radeon_cs_parser *parser)
+{
+       struct radeon_cs_chunk *ib_chunk;
+       int r;
+
+       if (parser->chunk_ib_idx == -1)
+               return 0;
+
+       if (parser->cs_flags & RADEON_CS_USE_VM)
+               return 0;
+
+       ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+       /* Copy the packet into the IB, the parser will read from the
+        * input memory (cached) and write to the IB (which can be
+        * uncached).
+        */
+       r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
+                          ib_chunk->length_dw * 4);
+       if (r) {
+               DRM_ERROR("Failed to get ib !\n");
+               return r;
+       }
+       parser->ib->length_dw = ib_chunk->length_dw;
+       r = radeon_cs_parse(parser);
+       if (r || parser->parser_error) {
+               DRM_ERROR("Invalid command stream !\n");
+               return r;
+       }
+       r = radeon_cs_finish_pages(parser);
+       if (r) {
+               DRM_ERROR("Invalid command stream !\n");
+               return r;
+       }
+       r = radeon_cs_sync_rings(parser);
+       if (r) {
+               DRM_ERROR("Failed to synchronize rings !\n");
+       }
+       parser->ib->vm_id = 0;
+       r = radeon_ib_schedule(rdev, parser->ib);
+       if (r) {
+               DRM_ERROR("Failed to schedule IB !\n");
+       }
+       return 0;
+}
+
+static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
+                                  struct radeon_vm *vm)
+{
+       struct radeon_bo_list *lobj;
+       struct radeon_bo *bo;
+       int r;
+
+       list_for_each_entry(lobj, &parser->validated, tv.head) {
+               bo = lobj->bo;
+               r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
+               if (r) {
+                       return r;
+               }
+       }
+       return 0;
+}
+
+static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
+                                struct radeon_cs_parser *parser)
+{
+       struct radeon_cs_chunk *ib_chunk;
+       struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+       struct radeon_vm *vm = &fpriv->vm;
+       int r;
+
+       if (parser->chunk_ib_idx == -1)
+               return 0;
+
+       if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
+               return 0;
+
+       ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+       if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+               DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
+               return -EINVAL;
+       }
+       r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
+                          ib_chunk->length_dw * 4);
+       if (r) {
+               DRM_ERROR("Failed to get ib !\n");
+               return r;
+       }
+       parser->ib->length_dw = ib_chunk->length_dw;
+       /* Copy the packet into the IB */
+       if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr,
+                              ib_chunk->length_dw * 4)) {
+               return -EFAULT;
+       }
+       r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib);
+       if (r) {
+               return r;
+       }
+
+       mutex_lock(&vm->mutex);
+       r = radeon_vm_bind(rdev, vm);
+       if (r) {
+               goto out;
+       }
+       r = radeon_bo_vm_update_pte(parser, vm);
+       if (r) {
+               goto out;
+       }
+       r = radeon_cs_sync_rings(parser);
+       if (r) {
+               DRM_ERROR("Failed to synchronize rings !\n");
+       }
+       parser->ib->vm_id = vm->id;
+       /* ib pool is bind at 0 in virtual address space to gpu_addr is the
+        * offset inside the pool bo
+        */
+       parser->ib->gpu_addr = parser->ib->sa_bo.offset;
+       r = radeon_ib_schedule(rdev, parser->ib);
+out:
+       if (!r) {
+               if (vm->fence) {
+                       radeon_fence_unref(&vm->fence);
+               }
+               vm->fence = radeon_fence_ref(parser->ib->fence);
+       }
+       mutex_unlock(&fpriv->vm.mutex);
+       return r;
+}
+
 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_cs_parser parser;
-       struct radeon_cs_chunk *ib_chunk;
        int r;
 
        radeon_mutex_lock(&rdev->cs_mutex);
@@ -245,13 +466,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                radeon_mutex_unlock(&rdev->cs_mutex);
                return r;
        }
-       r =  radeon_ib_get(rdev, &parser.ib);
-       if (r) {
-               DRM_ERROR("Failed to get ib !\n");
-               radeon_cs_parser_fini(&parser, r);
-               radeon_mutex_unlock(&rdev->cs_mutex);
-               return r;
-       }
        r = radeon_cs_parser_relocs(&parser);
        if (r) {
                if (r != -ERESTARTSYS)
@@ -260,29 +474,15 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                radeon_mutex_unlock(&rdev->cs_mutex);
                return r;
        }
-       /* Copy the packet into the IB, the parser will read from the
-        * input memory (cached) and write to the IB (which can be
-        * uncached). */
-       ib_chunk = &parser.chunks[parser.chunk_ib_idx];
-       parser.ib->length_dw = ib_chunk->length_dw;
-       r = radeon_cs_parse(&parser);
-       if (r || parser.parser_error) {
-               DRM_ERROR("Invalid command stream !\n");
-               radeon_cs_parser_fini(&parser, r);
-               radeon_mutex_unlock(&rdev->cs_mutex);
-               return r;
-       }
-       r = radeon_cs_finish_pages(&parser);
+       r = radeon_cs_ib_chunk(rdev, &parser);
        if (r) {
-               DRM_ERROR("Invalid command stream !\n");
-               radeon_cs_parser_fini(&parser, r);
-               radeon_mutex_unlock(&rdev->cs_mutex);
-               return r;
+               goto out;
        }
-       r = radeon_ib_schedule(rdev, parser.ib);
+       r = radeon_cs_ib_vm_chunk(rdev, &parser);
        if (r) {
-               DRM_ERROR("Failed to schedule IB !\n");
+               goto out;
        }
+out:
        radeon_cs_parser_fini(&parser, r);
        radeon_mutex_unlock(&rdev->cs_mutex);
        return r;
index c4d00a171411890fe701ea853ca084c39e4ac3b7..0afb13bd8dcad47ef5572991fec27e126185416d 100644 (file)
@@ -224,8 +224,11 @@ int radeon_wb_init(struct radeon_device *rdev)
        if (radeon_no_wb == 1)
                rdev->wb.enabled = false;
        else {
-               /* often unreliable on AGP */
                if (rdev->flags & RADEON_IS_AGP) {
+                       /* often unreliable on AGP */
+                       rdev->wb.enabled = false;
+               } else if (rdev->family < CHIP_R300) {
+                       /* often unreliable on pre-r300 */
                        rdev->wb.enabled = false;
                } else {
                        rdev->wb.enabled = true;
@@ -718,17 +721,24 @@ int radeon_device_init(struct radeon_device *rdev,
         * can recall function without having locking issues */
        radeon_mutex_init(&rdev->cs_mutex);
        mutex_init(&rdev->ib_pool.mutex);
-       mutex_init(&rdev->cp.mutex);
+       for (i = 0; i < RADEON_NUM_RINGS; ++i)
+               mutex_init(&rdev->ring[i].mutex);
        mutex_init(&rdev->dc_hw_i2c_mutex);
        if (rdev->family >= CHIP_R600)
                spin_lock_init(&rdev->ih.lock);
        mutex_init(&rdev->gem.mutex);
        mutex_init(&rdev->pm.mutex);
        mutex_init(&rdev->vram_mutex);
-       rwlock_init(&rdev->fence_drv.lock);
+       rwlock_init(&rdev->fence_lock);
+       rwlock_init(&rdev->semaphore_drv.lock);
        INIT_LIST_HEAD(&rdev->gem.objects);
        init_waitqueue_head(&rdev->irq.vblank_queue);
        init_waitqueue_head(&rdev->irq.idle_queue);
+       INIT_LIST_HEAD(&rdev->semaphore_drv.bo);
+       /* initialize vm here */
+       rdev->vm_manager.use_bitmap = 1;
+       rdev->vm_manager.max_pfn = 1 << 20;
+       INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
 
        /* Set asic functions */
        r = radeon_asic_init(rdev);
@@ -765,8 +775,14 @@ int radeon_device_init(struct radeon_device *rdev,
        r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
        if (r) {
                rdev->need_dma32 = true;
+               dma_bits = 32;
                printk(KERN_WARNING "radeon: No suitable DMA available.\n");
        }
+       r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
+       if (r) {
+               pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
+               printk(KERN_WARNING "radeon: No coherent DMA available.\n");
+       }
 
        /* Registers mapping */
        /* TODO: block userspace mapping of io register */
@@ -814,15 +830,20 @@ int radeon_device_init(struct radeon_device *rdev,
                if (r)
                        return r;
        }
-       if (radeon_testing) {
+       if ((radeon_testing & 1)) {
                radeon_test_moves(rdev);
        }
+       if ((radeon_testing & 2)) {
+               radeon_test_syncing(rdev);
+       }
        if (radeon_benchmarking) {
                radeon_benchmark(rdev, radeon_benchmarking);
        }
        return 0;
 }
 
+static void radeon_debugfs_remove_files(struct radeon_device *rdev);
+
 void radeon_device_fini(struct radeon_device *rdev)
 {
        DRM_INFO("radeon: finishing device.\n");
@@ -837,6 +858,7 @@ void radeon_device_fini(struct radeon_device *rdev)
        rdev->rio_mem = NULL;
        iounmap(rdev->rmmio);
        rdev->rmmio = NULL;
+       radeon_debugfs_remove_files(rdev);
 }
 
 
@@ -848,7 +870,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
        struct radeon_device *rdev;
        struct drm_crtc *crtc;
        struct drm_connector *connector;
-       int r;
+       int i, r;
 
        if (dev == NULL || dev->dev_private == NULL) {
                return -ENODEV;
@@ -887,7 +909,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
        /* evict vram memory */
        radeon_bo_evict_vram(rdev);
        /* wait for gpu to finish processing current batch */
-       radeon_fence_wait_last(rdev);
+       for (i = 0; i < RADEON_NUM_RINGS; i++)
+               radeon_fence_wait_last(rdev, i);
 
        radeon_save_bios_scratch_regs(rdev);
 
@@ -986,36 +1009,29 @@ int radeon_gpu_reset(struct radeon_device *rdev)
 /*
  * Debugfs
  */
-struct radeon_debugfs {
-       struct drm_info_list    *files;
-       unsigned                num_files;
-};
-static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
-static unsigned _radeon_debugfs_count = 0;
-
 int radeon_debugfs_add_files(struct radeon_device *rdev,
                             struct drm_info_list *files,
                             unsigned nfiles)
 {
        unsigned i;
 
-       for (i = 0; i < _radeon_debugfs_count; i++) {
-               if (_radeon_debugfs[i].files == files) {
+       for (i = 0; i < rdev->debugfs_count; i++) {
+               if (rdev->debugfs[i].files == files) {
                        /* Already registered */
                        return 0;
                }
        }
 
-       i = _radeon_debugfs_count + 1;
+       i = rdev->debugfs_count + 1;
        if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
                DRM_ERROR("Reached maximum number of debugfs components.\n");
                DRM_ERROR("Report so we increase "
                          "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
                return -EINVAL;
        }
-       _radeon_debugfs[_radeon_debugfs_count].files = files;
-       _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
-       _radeon_debugfs_count = i;
+       rdev->debugfs[rdev->debugfs_count].files = files;
+       rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
+       rdev->debugfs_count = i;
 #if defined(CONFIG_DEBUG_FS)
        drm_debugfs_create_files(files, nfiles,
                                 rdev->ddev->control->debugfs_root,
@@ -1027,6 +1043,22 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
        return 0;
 }
 
+static void radeon_debugfs_remove_files(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       unsigned i;
+
+       for (i = 0; i < rdev->debugfs_count; i++) {
+               drm_debugfs_remove_files(rdev->debugfs[i].files,
+                                        rdev->debugfs[i].num_files,
+                                        rdev->ddev->control);
+               drm_debugfs_remove_files(rdev->debugfs[i].files,
+                                        rdev->debugfs[i].num_files,
+                                        rdev->ddev->primary);
+       }
+#endif
+}
+
 #if defined(CONFIG_DEBUG_FS)
 int radeon_debugfs_init(struct drm_minor *minor)
 {
@@ -1035,11 +1067,5 @@ int radeon_debugfs_init(struct drm_minor *minor)
 
 void radeon_debugfs_cleanup(struct drm_minor *minor)
 {
-       unsigned i;
-
-       for (i = 0; i < _radeon_debugfs_count; i++) {
-               drm_debugfs_remove_files(_radeon_debugfs[i].files,
-                                        _radeon_debugfs[i].num_files, minor);
-       }
 }
 #endif
index a22d6e6a49a2649796a119c2b642a83659f8edd3..d3ffc18774a611df74a66ea136fbd12906eeaa63 100644 (file)
@@ -406,7 +406,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
        if (!ASIC_IS_AVIVO(rdev)) {
                /* crtc offset is from display base addr not FB location */
                base -= radeon_crtc->legacy_display_base_addr;
-               pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8);
+               pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8);
 
                if (tiling_flags & RADEON_TILING_MACRO) {
                        if (ASIC_IS_R300(rdev)) {
@@ -1081,7 +1081,7 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
 void
 radeon_framebuffer_init(struct drm_device *dev,
                        struct radeon_framebuffer *rfb,
-                       struct drm_mode_fb_cmd *mode_cmd,
+                       struct drm_mode_fb_cmd2 *mode_cmd,
                        struct drm_gem_object *obj)
 {
        rfb->obj = obj;
@@ -1092,15 +1092,15 @@ radeon_framebuffer_init(struct drm_device *dev,
 static struct drm_framebuffer *
 radeon_user_framebuffer_create(struct drm_device *dev,
                               struct drm_file *file_priv,
-                              struct drm_mode_fb_cmd *mode_cmd)
+                              struct drm_mode_fb_cmd2 *mode_cmd)
 {
        struct drm_gem_object *obj;
        struct radeon_framebuffer *radeon_fb;
 
-       obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+       obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
        if (obj ==  NULL) {
                dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
-                       "can't create framebuffer\n", mode_cmd->handle);
+                       "can't create framebuffer\n", mode_cmd->handles[0]);
                return ERR_PTR(-ENOENT);
        }
 
index 71499fc3daf524f8b719692e9ff7aacb6219b2e9..31da622eef63c20e7dc66e821eff0cc5f43ec62d 100644 (file)
  *   2.10.0 - fusion 2D tiling
  *   2.11.0 - backend map, initial compute support for the CS checker
  *   2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
+ *   2.13.0 - virtual memory support
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       12
+#define KMS_DRIVER_MINOR       13
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -84,6 +85,10 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
 int radeon_gem_object_init(struct drm_gem_object *obj);
 void radeon_gem_object_free(struct drm_gem_object *obj);
+int radeon_gem_object_open(struct drm_gem_object *obj,
+                               struct drm_file *file_priv);
+void radeon_gem_object_close(struct drm_gem_object *obj,
+                               struct drm_file *file_priv);
 extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
                                      int *vpos, int *hpos);
 extern struct drm_ioctl_desc radeon_ioctls_kms[];
@@ -206,6 +211,21 @@ static struct pci_device_id pciidlist[] = {
 MODULE_DEVICE_TABLE(pci, pciidlist);
 #endif
 
+static const struct file_operations radeon_driver_old_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .read = drm_read,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = radeon_compat_ioctl,
+#endif
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver_old = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -232,21 +252,7 @@ static struct drm_driver driver_old = {
        .reclaim_buffers = drm_core_reclaim_buffers,
        .ioctls = radeon_ioctls,
        .dma_ioctl = radeon_cp_buffers,
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = drm_ioctl,
-                .mmap = drm_mmap,
-                .poll = drm_poll,
-                .fasync = drm_fasync,
-                .read = drm_read,
-#ifdef CONFIG_COMPAT
-                .compat_ioctl = radeon_compat_ioctl,
-#endif
-                .llseek = noop_llseek,
-       },
-
+       .fops = &radeon_driver_old_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
@@ -304,6 +310,20 @@ radeon_pci_resume(struct pci_dev *pdev)
        return radeon_resume_kms(dev);
 }
 
+static const struct file_operations radeon_driver_kms_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = radeon_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .read = drm_read,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = radeon_kms_compat_ioctl,
+#endif
+};
+
 static struct drm_driver kms_driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -335,24 +355,13 @@ static struct drm_driver kms_driver = {
        .ioctls = radeon_ioctls_kms,
        .gem_init_object = radeon_gem_object_init,
        .gem_free_object = radeon_gem_object_free,
+       .gem_open_object = radeon_gem_object_open,
+       .gem_close_object = radeon_gem_object_close,
        .dma_ioctl = radeon_dma_ioctl_kms,
        .dumb_create = radeon_mode_dumb_create,
        .dumb_map_offset = radeon_mode_dumb_mmap,
        .dumb_destroy = radeon_mode_dumb_destroy,
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = drm_ioctl,
-                .mmap = radeon_mmap,
-                .poll = drm_poll,
-                .fasync = drm_fasync,
-                .read = drm_read,
-#ifdef CONFIG_COMPAT
-                .compat_ioctl = radeon_kms_compat_ioctl,
-#endif
-       },
-
+       .fops = &radeon_driver_kms_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
index 0b7b486c97e880e131fbde8afccec6a6a50538e1..cf2bf35b56b8efea936066b469d3211dbe607de1 100644 (file)
@@ -103,7 +103,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
 }
 
 static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
-                                        struct drm_mode_fb_cmd *mode_cmd,
+                                        struct drm_mode_fb_cmd2 *mode_cmd,
                                         struct drm_gem_object **gobj_p)
 {
        struct radeon_device *rdev = rfbdev->rdev;
@@ -114,13 +114,17 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
        int ret;
        int aligned_size, size;
        int height = mode_cmd->height;
+       u32 bpp, depth;
+
+       drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
 
        /* need to align pitch with crtc limits */
-       mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
+       mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
+                                                 fb_tiled) * ((bpp + 1) / 8);
 
        if (rdev->family >= CHIP_R600)
                height = ALIGN(mode_cmd->height, 8);
-       size = mode_cmd->pitch * height;
+       size = mode_cmd->pitches[0] * height;
        aligned_size = ALIGN(size, PAGE_SIZE);
        ret = radeon_gem_object_create(rdev, aligned_size, 0,
                                       RADEON_GEM_DOMAIN_VRAM,
@@ -137,7 +141,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
                tiling_flags = RADEON_TILING_MACRO;
 
 #ifdef __BIG_ENDIAN
-       switch (mode_cmd->bpp) {
+       switch (bpp) {
        case 32:
                tiling_flags |= RADEON_TILING_SWAP_32BIT;
                break;
@@ -151,7 +155,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
        if (tiling_flags) {
                ret = radeon_bo_set_tiling_flags(rbo,
                                                 tiling_flags | RADEON_TILING_SURFACE,
-                                                mode_cmd->pitch);
+                                                mode_cmd->pitches[0]);
                if (ret)
                        dev_err(rdev->dev, "FB failed to set tiling flags\n");
        }
@@ -187,7 +191,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
        struct radeon_device *rdev = rfbdev->rdev;
        struct fb_info *info;
        struct drm_framebuffer *fb = NULL;
-       struct drm_mode_fb_cmd mode_cmd;
+       struct drm_mode_fb_cmd2 mode_cmd;
        struct drm_gem_object *gobj = NULL;
        struct radeon_bo *rbo = NULL;
        struct device *device = &rdev->pdev->dev;
@@ -201,8 +205,8 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
        if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
                sizes->surface_bpp = 32;
 
-       mode_cmd.bpp = sizes->surface_bpp;
-       mode_cmd.depth = sizes->surface_depth;
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+                                                         sizes->surface_depth);
 
        ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
        rbo = gem_to_radeon_bo(gobj);
@@ -228,7 +232,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
 
        strcpy(info->fix.id, "radeondrmfb");
 
-       drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+       drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
 
        info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
        info->fbops = &radeonfb_ops;
@@ -271,7 +275,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
        DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)rdev->mc.aper_base);
        DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
        DRM_INFO("fb depth is %d\n", fb->depth);
-       DRM_INFO("   pitch is %d\n", fb->pitch);
+       DRM_INFO("   pitch is %d\n", fb->pitches[0]);
 
        vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
        return 0;
index 76ec0e9ed8ae26bbe1ef4a049e9fb4523462199e..64ea3dd9e6ff2ab58ed36476693a9dde4cff6f5a 100644 (file)
 #include "radeon.h"
 #include "radeon_trace.h"
 
-static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
+static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
 {
        if (rdev->wb.enabled) {
-               u32 scratch_index;
-               if (rdev->wb.use_event)
-                       scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
-               else
-                       scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
-               rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);
-       } else
-               WREG32(rdev->fence_drv.scratch_reg, seq);
+               *rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
+       } else {
+               WREG32(rdev->fence_drv[ring].scratch_reg, seq);
+       }
 }
 
-static u32 radeon_fence_read(struct radeon_device *rdev)
+static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
 {
-       u32 seq;
+       u32 seq = 0;
 
        if (rdev->wb.enabled) {
-               u32 scratch_index;
-               if (rdev->wb.use_event)
-                       scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
-               else
-                       scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
-               seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
-       } else
-               seq = RREG32(rdev->fence_drv.scratch_reg);
+               seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
+       } else {
+               seq = RREG32(rdev->fence_drv[ring].scratch_reg);
+       }
        return seq;
 }
 
@@ -73,28 +65,28 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
 {
        unsigned long irq_flags;
 
-       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-       if (fence->emited) {
-               write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       write_lock_irqsave(&rdev->fence_lock, irq_flags);
+       if (fence->emitted) {
+               write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
                return 0;
        }
-       fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
-       if (!rdev->cp.ready)
+       fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
+       if (!rdev->ring[fence->ring].ready)
                /* FIXME: cp is not running assume everythings is done right
                 * away
                 */
-               radeon_fence_write(rdev, fence->seq);
+               radeon_fence_write(rdev, fence->seq, fence->ring);
        else
-               radeon_fence_ring_emit(rdev, fence);
+               radeon_fence_ring_emit(rdev, fence->ring, fence);
 
        trace_radeon_fence_emit(rdev->ddev, fence->seq);
-       fence->emited = true;
-       list_move_tail(&fence->list, &rdev->fence_drv.emited);
-       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       fence->emitted = true;
+       list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
+       write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
        return 0;
 }
 
-static bool radeon_fence_poll_locked(struct radeon_device *rdev)
+static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
 {
        struct radeon_fence *fence;
        struct list_head *i, *n;
@@ -102,34 +94,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
        bool wake = false;
        unsigned long cjiffies;
 
-       seq = radeon_fence_read(rdev);
-       if (seq != rdev->fence_drv.last_seq) {
-               rdev->fence_drv.last_seq = seq;
-               rdev->fence_drv.last_jiffies = jiffies;
-               rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+       seq = radeon_fence_read(rdev, ring);
+       if (seq != rdev->fence_drv[ring].last_seq) {
+               rdev->fence_drv[ring].last_seq = seq;
+               rdev->fence_drv[ring].last_jiffies = jiffies;
+               rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
        } else {
                cjiffies = jiffies;
-               if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
-                       cjiffies -= rdev->fence_drv.last_jiffies;
-                       if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
+               if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
+                       cjiffies -= rdev->fence_drv[ring].last_jiffies;
+                       if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
                                /* update the timeout */
-                               rdev->fence_drv.last_timeout -= cjiffies;
+                               rdev->fence_drv[ring].last_timeout -= cjiffies;
                        } else {
                                /* the 500ms timeout is elapsed we should test
                                 * for GPU lockup
                                 */
-                               rdev->fence_drv.last_timeout = 1;
+                               rdev->fence_drv[ring].last_timeout = 1;
                        }
                } else {
                        /* wrap around update last jiffies, we will just wait
                         * a little longer
                         */
-                       rdev->fence_drv.last_jiffies = cjiffies;
+                       rdev->fence_drv[ring].last_jiffies = cjiffies;
                }
                return false;
        }
        n = NULL;
-       list_for_each(i, &rdev->fence_drv.emited) {
+       list_for_each(i, &rdev->fence_drv[ring].emitted) {
                fence = list_entry(i, struct radeon_fence, list);
                if (fence->seq == seq) {
                        n = i;
@@ -141,11 +133,11 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
                i = n;
                do {
                        n = i->prev;
-                       list_move_tail(i, &rdev->fence_drv.signaled);
+                       list_move_tail(i, &rdev->fence_drv[ring].signaled);
                        fence = list_entry(i, struct radeon_fence, list);
                        fence->signaled = true;
                        i = n;
-               } while (i != &rdev->fence_drv.emited);
+               } while (i != &rdev->fence_drv[ring].emitted);
                wake = true;
        }
        return wake;
@@ -157,14 +149,18 @@ static void radeon_fence_destroy(struct kref *kref)
         struct radeon_fence *fence;
 
        fence = container_of(kref, struct radeon_fence, kref);
-       write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
+       write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
        list_del(&fence->list);
-       fence->emited = false;
-       write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
+       fence->emitted = false;
+       write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
+       if (fence->semaphore)
+               radeon_semaphore_free(fence->rdev, fence->semaphore);
        kfree(fence);
 }
 
-int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
+int radeon_fence_create(struct radeon_device *rdev,
+                       struct radeon_fence **fence,
+                       int ring)
 {
        unsigned long irq_flags;
 
@@ -174,18 +170,19 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
        }
        kref_init(&((*fence)->kref));
        (*fence)->rdev = rdev;
-       (*fence)->emited = false;
+       (*fence)->emitted = false;
        (*fence)->signaled = false;
        (*fence)->seq = 0;
+       (*fence)->ring = ring;
+       (*fence)->semaphore = NULL;
        INIT_LIST_HEAD(&(*fence)->list);
 
-       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-       list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
-       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       write_lock_irqsave(&rdev->fence_lock, irq_flags);
+       list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
+       write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
        return 0;
 }
 
-
 bool radeon_fence_signaled(struct radeon_fence *fence)
 {
        unsigned long irq_flags;
@@ -197,21 +194,21 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
        if (fence->rdev->gpu_lockup)
                return true;
 
-       write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
+       write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
        signaled = fence->signaled;
        /* if we are shuting down report all fence as signaled */
        if (fence->rdev->shutdown) {
                signaled = true;
        }
-       if (!fence->emited) {
-               WARN(1, "Querying an unemited fence : %p !\n", fence);
+       if (!fence->emitted) {
+               WARN(1, "Querying an unemitted fence : %p !\n", fence);
                signaled = true;
        }
        if (!signaled) {
-               radeon_fence_poll_locked(fence->rdev);
+               radeon_fence_poll_locked(fence->rdev, fence->ring);
                signaled = fence->signaled;
        }
-       write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
+       write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
        return signaled;
 }
 
@@ -230,24 +227,24 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
        if (radeon_fence_signaled(fence)) {
                return 0;
        }
-       timeout = rdev->fence_drv.last_timeout;
+       timeout = rdev->fence_drv[fence->ring].last_timeout;
 retry:
        /* save current sequence used to check for GPU lockup */
-       seq = rdev->fence_drv.last_seq;
+       seq = rdev->fence_drv[fence->ring].last_seq;
        trace_radeon_fence_wait_begin(rdev->ddev, seq);
        if (intr) {
-               radeon_irq_kms_sw_irq_get(rdev);
-               r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
+               radeon_irq_kms_sw_irq_get(rdev, fence->ring);
+               r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
                                radeon_fence_signaled(fence), timeout);
-               radeon_irq_kms_sw_irq_put(rdev);
+               radeon_irq_kms_sw_irq_put(rdev, fence->ring);
                if (unlikely(r < 0)) {
                        return r;
                }
        } else {
-               radeon_irq_kms_sw_irq_get(rdev);
-               r = wait_event_timeout(rdev->fence_drv.queue,
+               radeon_irq_kms_sw_irq_get(rdev, fence->ring);
+               r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
                         radeon_fence_signaled(fence), timeout);
-               radeon_irq_kms_sw_irq_put(rdev);
+               radeon_irq_kms_sw_irq_put(rdev, fence->ring);
        }
        trace_radeon_fence_wait_end(rdev->ddev, seq);
        if (unlikely(!radeon_fence_signaled(fence))) {
@@ -258,10 +255,11 @@ retry:
                        timeout = r;
                        goto retry;
                }
-               /* don't protect read access to rdev->fence_drv.last_seq
+               /* don't protect read access to rdev->fence_drv[t].last_seq
                 * if we experiencing a lockup the value doesn't change
                 */
-               if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
+               if (seq == rdev->fence_drv[fence->ring].last_seq &&
+                   radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) {
                        /* good news we believe it's a lockup */
                        printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
                             fence->seq, seq);
@@ -272,20 +270,20 @@ retry:
                        r = radeon_gpu_reset(rdev);
                        if (r)
                                return r;
-                       radeon_fence_write(rdev, fence->seq);
+                       radeon_fence_write(rdev, fence->seq, fence->ring);
                        rdev->gpu_lockup = false;
                }
                timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
-               write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-               rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
-               rdev->fence_drv.last_jiffies = jiffies;
-               write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+               write_lock_irqsave(&rdev->fence_lock, irq_flags);
+               rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+               rdev->fence_drv[fence->ring].last_jiffies = jiffies;
+               write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
                goto retry;
        }
        return 0;
 }
 
-int radeon_fence_wait_next(struct radeon_device *rdev)
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
 {
        unsigned long irq_flags;
        struct radeon_fence *fence;
@@ -294,21 +292,21 @@ int radeon_fence_wait_next(struct radeon_device *rdev)
        if (rdev->gpu_lockup) {
                return 0;
        }
-       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-       if (list_empty(&rdev->fence_drv.emited)) {
-               write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       write_lock_irqsave(&rdev->fence_lock, irq_flags);
+       if (list_empty(&rdev->fence_drv[ring].emitted)) {
+               write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
                return 0;
        }
-       fence = list_entry(rdev->fence_drv.emited.next,
+       fence = list_entry(rdev->fence_drv[ring].emitted.next,
                           struct radeon_fence, list);
        radeon_fence_ref(fence);
-       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
        r = radeon_fence_wait(fence, false);
        radeon_fence_unref(&fence);
        return r;
 }
 
-int radeon_fence_wait_last(struct radeon_device *rdev)
+int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
 {
        unsigned long irq_flags;
        struct radeon_fence *fence;
@@ -317,15 +315,15 @@ int radeon_fence_wait_last(struct radeon_device *rdev)
        if (rdev->gpu_lockup) {
                return 0;
        }
-       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-       if (list_empty(&rdev->fence_drv.emited)) {
-               write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       write_lock_irqsave(&rdev->fence_lock, irq_flags);
+       if (list_empty(&rdev->fence_drv[ring].emitted)) {
+               write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
                return 0;
        }
-       fence = list_entry(rdev->fence_drv.emited.prev,
+       fence = list_entry(rdev->fence_drv[ring].emitted.prev,
                           struct radeon_fence, list);
        radeon_fence_ref(fence);
-       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
        r = radeon_fence_wait(fence, false);
        radeon_fence_unref(&fence);
        return r;
@@ -347,39 +345,95 @@ void radeon_fence_unref(struct radeon_fence **fence)
        }
 }
 
-void radeon_fence_process(struct radeon_device *rdev)
+void radeon_fence_process(struct radeon_device *rdev, int ring)
 {
        unsigned long irq_flags;
        bool wake;
 
-       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-       wake = radeon_fence_poll_locked(rdev);
-       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       write_lock_irqsave(&rdev->fence_lock, irq_flags);
+       wake = radeon_fence_poll_locked(rdev, ring);
+       write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
        if (wake) {
-               wake_up_all(&rdev->fence_drv.queue);
+               wake_up_all(&rdev->fence_drv[ring].queue);
        }
 }
 
-int radeon_fence_driver_init(struct radeon_device *rdev)
+int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
+{
+       unsigned long irq_flags;
+       int not_processed = 0;
+
+       read_lock_irqsave(&rdev->fence_lock, irq_flags);
+       if (!rdev->fence_drv[ring].initialized)
+               return 0;
+
+       if (!list_empty(&rdev->fence_drv[ring].emitted)) {
+               struct list_head *ptr;
+               list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
+                       /* count up to 3, that's enought info */
+                       if (++not_processed >= 3)
+                               break;
+               }
+       }
+       read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+       return not_processed;
+}
+
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
 {
        unsigned long irq_flags;
+       uint64_t index;
        int r;
 
-       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-       r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
-       if (r) {
-               dev_err(rdev->dev, "fence failed to get scratch register\n");
-               write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
-               return r;
+       write_lock_irqsave(&rdev->fence_lock, irq_flags);
+       radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+       if (rdev->wb.use_event) {
+               rdev->fence_drv[ring].scratch_reg = 0;
+               index = R600_WB_EVENT_OFFSET + ring * 4;
+       } else {
+               r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
+               if (r) {
+                       dev_err(rdev->dev, "fence failed to get scratch register\n");
+                       write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+                       return r;
+               }
+               index = RADEON_WB_SCRATCH_OFFSET +
+                       rdev->fence_drv[ring].scratch_reg -
+                       rdev->scratch.reg_base;
        }
-       radeon_fence_write(rdev, 0);
-       atomic_set(&rdev->fence_drv.seq, 0);
-       INIT_LIST_HEAD(&rdev->fence_drv.created);
-       INIT_LIST_HEAD(&rdev->fence_drv.emited);
-       INIT_LIST_HEAD(&rdev->fence_drv.signaled);
-       init_waitqueue_head(&rdev->fence_drv.queue);
-       rdev->fence_drv.initialized = true;
-       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+       rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
+       radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
+       rdev->fence_drv[ring].initialized = true;
+       DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
+                ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
+       write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+       return 0;
+}
+
+static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
+{
+       rdev->fence_drv[ring].scratch_reg = -1;
+       rdev->fence_drv[ring].cpu_addr = NULL;
+       rdev->fence_drv[ring].gpu_addr = 0;
+       atomic_set(&rdev->fence_drv[ring].seq, 0);
+       INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
+       INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
+       INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
+       init_waitqueue_head(&rdev->fence_drv[ring].queue);
+       rdev->fence_drv[ring].initialized = false;
+}
+
+int radeon_fence_driver_init(struct radeon_device *rdev)
+{
+       unsigned long irq_flags;
+       int ring;
+
+       write_lock_irqsave(&rdev->fence_lock, irq_flags);
+       for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+               radeon_fence_driver_init_ring(rdev, ring);
+       }
+       write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
        if (radeon_debugfs_fence_init(rdev)) {
                dev_err(rdev->dev, "fence debugfs file creation failed\n");
        }
@@ -389,14 +443,18 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
 void radeon_fence_driver_fini(struct radeon_device *rdev)
 {
        unsigned long irq_flags;
-
-       if (!rdev->fence_drv.initialized)
-               return;
-       wake_up_all(&rdev->fence_drv.queue);
-       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-       radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
-       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
-       rdev->fence_drv.initialized = false;
+       int ring;
+
+       for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+               if (!rdev->fence_drv[ring].initialized)
+                       continue;
+               radeon_fence_wait_last(rdev, ring);
+               wake_up_all(&rdev->fence_drv[ring].queue);
+               write_lock_irqsave(&rdev->fence_lock, irq_flags);
+               radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+               write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+               rdev->fence_drv[ring].initialized = false;
+       }
 }
 
 
@@ -410,14 +468,21 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_fence *fence;
-
-       seq_printf(m, "Last signaled fence 0x%08X\n",
-                  radeon_fence_read(rdev));
-       if (!list_empty(&rdev->fence_drv.emited)) {
-                  fence = list_entry(rdev->fence_drv.emited.prev,
-                                     struct radeon_fence, list);
-                  seq_printf(m, "Last emited fence %p with 0x%08X\n",
-                             fence,  fence->seq);
+       int i;
+
+       for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+               if (!rdev->fence_drv[i].initialized)
+                       continue;
+
+               seq_printf(m, "--- ring %d ---\n", i);
+               seq_printf(m, "Last signaled fence 0x%08X\n",
+                          radeon_fence_read(rdev, i));
+               if (!list_empty(&rdev->fence_drv[i].emitted)) {
+                       fence = list_entry(rdev->fence_drv[i].emitted.prev,
+                                          struct radeon_fence, list);
+                       seq_printf(m, "Last emitted fence %p with 0x%08X\n",
+                                  fence,  fence->seq);
+               }
        }
        return 0;
 }
index ba7ab79e12c1969793520a0ede951c675a52b469..010dad8b66ae98902f3b7c4ffb61d426103ef07f 100644 (file)
@@ -157,9 +157,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
        p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
        for (i = 0; i < pages; i++, p++) {
                if (rdev->gart.pages[p]) {
-                       if (!rdev->gart.ttm_alloced[p])
-                               pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
-                                               PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
                        rdev->gart.pages[p] = NULL;
                        rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
                        page_base = rdev->gart.pages_addr[p];
@@ -191,23 +188,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
        p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
 
        for (i = 0; i < pages; i++, p++) {
-               /* we reverted the patch using dma_addr in TTM for now but this
-                * code stops building on alpha so just comment it out for now */
-               if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */
-                       rdev->gart.ttm_alloced[p] = true;
-                       rdev->gart.pages_addr[p] = dma_addr[i];
-               } else {
-                       /* we need to support large memory configurations */
-                       /* assume that unbind have already been call on the range */
-                       rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
-                                                       0, PAGE_SIZE,
-                                                       PCI_DMA_BIDIRECTIONAL);
-                       if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
-                               /* FIXME: failed to map page (return -ENOMEM?) */
-                               radeon_gart_unbind(rdev, offset, pages);
-                               return -ENOMEM;
-                       }
-               }
+               rdev->gart.pages_addr[p] = dma_addr[i];
                rdev->gart.pages[p] = pagelist[i];
                if (rdev->gart.ptr) {
                        page_base = rdev->gart.pages_addr[p];
@@ -274,12 +255,6 @@ int radeon_gart_init(struct radeon_device *rdev)
                radeon_gart_fini(rdev);
                return -ENOMEM;
        }
-       rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
-                                        rdev->gart.num_cpu_pages, GFP_KERNEL);
-       if (rdev->gart.ttm_alloced == NULL) {
-               radeon_gart_fini(rdev);
-               return -ENOMEM;
-       }
        /* set GART entry to point to the dummy page by default */
        for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
                rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
@@ -296,10 +271,404 @@ void radeon_gart_fini(struct radeon_device *rdev)
        rdev->gart.ready = false;
        kfree(rdev->gart.pages);
        kfree(rdev->gart.pages_addr);
-       kfree(rdev->gart.ttm_alloced);
        rdev->gart.pages = NULL;
        rdev->gart.pages_addr = NULL;
-       rdev->gart.ttm_alloced = NULL;
 
        radeon_dummy_page_fini(rdev);
 }
+
+/*
+ * vm helpers
+ *
+ * TODO bind a default page at vm initialization for default address
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev)
+{
+       int r;
+
+       rdev->vm_manager.enabled = false;
+
+       /* mark first vm as always in use, it's the system one */
+       r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
+                                     rdev->vm_manager.max_pfn * 8,
+                                     RADEON_GEM_DOMAIN_VRAM);
+       if (r) {
+               dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
+                       (rdev->vm_manager.max_pfn * 8) >> 10);
+               return r;
+       }
+
+       r = rdev->vm_manager.funcs->init(rdev);
+       if (r == 0)
+               rdev->vm_manager.enabled = true;
+
+       return r;
+}
+
+/* cs mutex must be lock */
+static void radeon_vm_unbind_locked(struct radeon_device *rdev,
+                                   struct radeon_vm *vm)
+{
+       struct radeon_bo_va *bo_va;
+
+       if (vm->id == -1) {
+               return;
+       }
+
+       /* wait for vm use to end */
+       if (vm->fence) {
+               radeon_fence_wait(vm->fence, false);
+               radeon_fence_unref(&vm->fence);
+       }
+
+       /* hw unbind */
+       rdev->vm_manager.funcs->unbind(rdev, vm);
+       rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
+       list_del_init(&vm->list);
+       vm->id = -1;
+       radeon_sa_bo_free(rdev, &vm->sa_bo);
+       vm->pt = NULL;
+
+       list_for_each_entry(bo_va, &vm->va, vm_list) {
+               bo_va->valid = false;
+       }
+}
+
+void radeon_vm_manager_fini(struct radeon_device *rdev)
+{
+       if (rdev->vm_manager.sa_manager.bo == NULL)
+               return;
+       radeon_vm_manager_suspend(rdev);
+       rdev->vm_manager.funcs->fini(rdev);
+       radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
+       rdev->vm_manager.enabled = false;
+}
+
+int radeon_vm_manager_start(struct radeon_device *rdev)
+{
+       if (rdev->vm_manager.sa_manager.bo == NULL) {
+               return -EINVAL;
+       }
+       return radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
+}
+
+int radeon_vm_manager_suspend(struct radeon_device *rdev)
+{
+       struct radeon_vm *vm, *tmp;
+
+       radeon_mutex_lock(&rdev->cs_mutex);
+       /* unbind all active vm */
+       list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
+               radeon_vm_unbind_locked(rdev, vm);
+       }
+       rdev->vm_manager.funcs->fini(rdev);
+       radeon_mutex_unlock(&rdev->cs_mutex);
+       return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
+}
+
+/* cs mutex must be lock */
+void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+       mutex_lock(&vm->mutex);
+       radeon_vm_unbind_locked(rdev, vm);
+       mutex_unlock(&vm->mutex);
+}
+
+/* cs mutex must be lock & vm mutex must be lock */
+int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+       struct radeon_vm *vm_evict;
+       unsigned i;
+       int id = -1, r;
+
+       if (vm == NULL) {
+               return -EINVAL;
+       }
+
+       if (vm->id != -1) {
+               /* update lru */
+               list_del_init(&vm->list);
+               list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+               return 0;
+       }
+
+retry:
+       r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
+                            RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
+                            RADEON_GPU_PAGE_SIZE);
+       if (r) {
+               if (list_empty(&rdev->vm_manager.lru_vm)) {
+                       return r;
+               }
+               vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
+               radeon_vm_unbind(rdev, vm_evict);
+               goto retry;
+       }
+       vm->pt = rdev->vm_manager.sa_manager.cpu_ptr;
+       vm->pt += (vm->sa_bo.offset >> 3);
+       vm->pt_gpu_addr = rdev->vm_manager.sa_manager.gpu_addr;
+       vm->pt_gpu_addr += vm->sa_bo.offset;
+       memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
+
+retry_id:
+       /* search for free vm */
+       for (i = 0; i < rdev->vm_manager.nvm; i++) {
+               if (!(rdev->vm_manager.use_bitmap & (1 << i))) {
+                       id = i;
+                       break;
+               }
+       }
+       /* evict vm if necessary */
+       if (id == -1) {
+               vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
+               radeon_vm_unbind(rdev, vm_evict);
+               goto retry_id;
+       }
+
+       /* do hw bind */
+       r = rdev->vm_manager.funcs->bind(rdev, vm, id);
+       if (r) {
+               radeon_sa_bo_free(rdev, &vm->sa_bo);
+               return r;
+       }
+       rdev->vm_manager.use_bitmap |= 1 << id;
+       vm->id = id;
+       list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+       return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo,
+                                      &rdev->ib_pool.sa_manager.bo->tbo.mem);
+}
+
+/* object have to be reserved */
+int radeon_vm_bo_add(struct radeon_device *rdev,
+                    struct radeon_vm *vm,
+                    struct radeon_bo *bo,
+                    uint64_t offset,
+                    uint32_t flags)
+{
+       struct radeon_bo_va *bo_va, *tmp;
+       struct list_head *head;
+       uint64_t size = radeon_bo_size(bo), last_offset = 0;
+       unsigned last_pfn;
+
+       bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+       if (bo_va == NULL) {
+               return -ENOMEM;
+       }
+       bo_va->vm = vm;
+       bo_va->bo = bo;
+       bo_va->soffset = offset;
+       bo_va->eoffset = offset + size;
+       bo_va->flags = flags;
+       bo_va->valid = false;
+       INIT_LIST_HEAD(&bo_va->bo_list);
+       INIT_LIST_HEAD(&bo_va->vm_list);
+       /* make sure object fit at this offset */
+       if (bo_va->soffset >= bo_va->eoffset) {
+               kfree(bo_va);
+               return -EINVAL;
+       }
+
+       last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE;
+       if (last_pfn > rdev->vm_manager.max_pfn) {
+               kfree(bo_va);
+               dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+                       last_pfn, rdev->vm_manager.max_pfn);
+               return -EINVAL;
+       }
+
+       mutex_lock(&vm->mutex);
+       if (last_pfn > vm->last_pfn) {
+               /* grow va space 32M by 32M */
+               unsigned align = ((32 << 20) >> 12) - 1;
+               radeon_mutex_lock(&rdev->cs_mutex);
+               radeon_vm_unbind_locked(rdev, vm);
+               radeon_mutex_unlock(&rdev->cs_mutex);
+               vm->last_pfn = (last_pfn + align) & ~align;
+       }
+       head = &vm->va;
+       last_offset = 0;
+       list_for_each_entry(tmp, &vm->va, vm_list) {
+               if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) {
+                       /* bo can be added before this one */
+                       break;
+               }
+               if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) {
+                       /* bo and tmp overlap, invalid offset */
+                       dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
+                               bo, (unsigned)bo_va->soffset, tmp->bo,
+                               (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
+                       kfree(bo_va);
+                       mutex_unlock(&vm->mutex);
+                       return -EINVAL;
+               }
+               last_offset = tmp->eoffset;
+               head = &tmp->vm_list;
+       }
+       list_add(&bo_va->vm_list, head);
+       list_add_tail(&bo_va->bo_list, &bo->va);
+       mutex_unlock(&vm->mutex);
+       return 0;
+}
+
+static u64 radeon_vm_get_addr(struct radeon_device *rdev,
+                             struct ttm_mem_reg *mem,
+                             unsigned pfn)
+{
+       u64 addr = 0;
+
+       switch (mem->mem_type) {
+       case TTM_PL_VRAM:
+               addr = (mem->start << PAGE_SHIFT);
+               addr += pfn * RADEON_GPU_PAGE_SIZE;
+               addr += rdev->vm_manager.vram_base_offset;
+               break;
+       case TTM_PL_TT:
+               /* offset inside page table */
+               addr = mem->start << PAGE_SHIFT;
+               addr += pfn * RADEON_GPU_PAGE_SIZE;
+               addr = addr >> PAGE_SHIFT;
+               /* page table offset */
+               addr = rdev->gart.pages_addr[addr];
+               /* in case cpu page size != gpu page size*/
+               addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
+               break;
+       default:
+               break;
+       }
+       return addr;
+}
+
+/* object have to be reserved & cs mutex took & vm mutex took */
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+                           struct radeon_vm *vm,
+                           struct radeon_bo *bo,
+                           struct ttm_mem_reg *mem)
+{
+       struct radeon_bo_va *bo_va;
+       unsigned ngpu_pages, i;
+       uint64_t addr = 0, pfn;
+       uint32_t flags;
+
+       /* nothing to do if vm isn't bound */
+       if (vm->id == -1)
+               return 0;;
+
+       bo_va = radeon_bo_va(bo, vm);
+       if (bo_va == NULL) {
+               dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+               return -EINVAL;
+       }
+
+       if (bo_va->valid)
+               return 0;
+
+       ngpu_pages = radeon_bo_ngpu_pages(bo);
+       bo_va->flags &= ~RADEON_VM_PAGE_VALID;
+       bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
+       if (mem) {
+               if (mem->mem_type != TTM_PL_SYSTEM) {
+                       bo_va->flags |= RADEON_VM_PAGE_VALID;
+                       bo_va->valid = true;
+               }
+               if (mem->mem_type == TTM_PL_TT) {
+                       bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+               }
+       }
+       pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
+       flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags);
+       for (i = 0, addr = 0; i < ngpu_pages; i++) {
+               if (mem && bo_va->valid) {
+                       addr = radeon_vm_get_addr(rdev, mem, i);
+               }
+               rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
+       }
+       rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm);
+       return 0;
+}
+
+/* object have to be reserved */
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+                    struct radeon_vm *vm,
+                    struct radeon_bo *bo)
+{
+       struct radeon_bo_va *bo_va;
+
+       bo_va = radeon_bo_va(bo, vm);
+       if (bo_va == NULL)
+               return 0;
+
+       list_del(&bo_va->bo_list);
+       mutex_lock(&vm->mutex);
+       radeon_mutex_lock(&rdev->cs_mutex);
+       radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
+       radeon_mutex_unlock(&rdev->cs_mutex);
+       list_del(&bo_va->vm_list);
+       mutex_unlock(&vm->mutex);
+
+       kfree(bo_va);
+       return 0;
+}
+
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+                            struct radeon_bo *bo)
+{
+       struct radeon_bo_va *bo_va;
+
+       BUG_ON(!atomic_read(&bo->tbo.reserved));
+       list_for_each_entry(bo_va, &bo->va, bo_list) {
+               bo_va->valid = false;
+       }
+}
+
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+       int r;
+
+       vm->id = -1;
+       vm->fence = NULL;
+       mutex_init(&vm->mutex);
+       INIT_LIST_HEAD(&vm->list);
+       INIT_LIST_HEAD(&vm->va);
+       vm->last_pfn = 0;
+       /* map the ib pool buffer at 0 in virtual address space, set
+        * read only
+        */
+       r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0,
+                            RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
+       return r;
+}
+
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+       struct radeon_bo_va *bo_va, *tmp;
+       int r;
+
+       mutex_lock(&vm->mutex);
+
+       radeon_mutex_lock(&rdev->cs_mutex);
+       radeon_vm_unbind_locked(rdev, vm);
+       radeon_mutex_unlock(&rdev->cs_mutex);
+
+       /* remove all bo */
+       r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false);
+       if (!r) {
+               bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm);
+               list_del_init(&bo_va->bo_list);
+               list_del_init(&bo_va->vm_list);
+               radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo);
+               kfree(bo_va);
+       }
+       if (!list_empty(&vm->va)) {
+               dev_err(rdev->dev, "still active bo inside vm\n");
+       }
+       list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
+               list_del_init(&bo_va->vm_list);
+               r = radeon_bo_reserve(bo_va->bo, false);
+               if (!r) {
+                       list_del_init(&bo_va->bo_list);
+                       radeon_bo_unreserve(bo_va->bo);
+                       kfree(bo_va);
+               }
+       }
+       mutex_unlock(&vm->mutex);
+}
index aa1ca2dea42f1623f09c062e4c9171e220c75c0d..7337850af2fa86cff62cd1104c127d65dc056328 100644 (file)
@@ -142,6 +142,44 @@ void radeon_gem_fini(struct radeon_device *rdev)
        radeon_bo_force_delete(rdev);
 }
 
+/*
+ * Call from drm_gem_handle_create which appear in both new and open ioctl
+ * case.
+ */
+int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+       return 0;
+}
+
+void radeon_gem_object_close(struct drm_gem_object *obj,
+                            struct drm_file *file_priv)
+{
+       struct radeon_bo *rbo = gem_to_radeon_bo(obj);
+       struct radeon_device *rdev = rbo->rdev;
+       struct radeon_fpriv *fpriv = file_priv->driver_priv;
+       struct radeon_vm *vm = &fpriv->vm;
+       struct radeon_bo_va *bo_va, *tmp;
+
+       if (rdev->family < CHIP_CAYMAN) {
+               return;
+       }
+
+       if (radeon_bo_reserve(rbo, false)) {
+               return;
+       }
+       list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
+               if (bo_va->vm == vm) {
+                       /* remove from this vm address space */
+                       mutex_lock(&vm->mutex);
+                       list_del(&bo_va->vm_list);
+                       mutex_unlock(&vm->mutex);
+                       list_del(&bo_va->bo_list);
+                       kfree(bo_va);
+               }
+       }
+       radeon_bo_unreserve(rbo);
+}
+
 
 /*
  * GEM ioctls.
@@ -152,6 +190,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
        struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_info *args = data;
        struct ttm_mem_type_manager *man;
+       unsigned i;
 
        man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 
@@ -160,8 +199,9 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
        if (rdev->stollen_vga_memory)
                args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
        args->vram_visible -= radeon_fbdev_total_size(rdev);
-       args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
-               RADEON_IB_POOL_SIZE*64*1024;
+       args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
+       for(i = 0; i < RADEON_NUM_RINGS; ++i)
+               args->gart_size -= rdev->ring[i].ring_size;
        return 0;
 }
 
@@ -352,6 +392,109 @@ out:
        return r;
 }
 
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *filp)
+{
+       struct drm_radeon_gem_va *args = data;
+       struct drm_gem_object *gobj;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_fpriv *fpriv = filp->driver_priv;
+       struct radeon_bo *rbo;
+       struct radeon_bo_va *bo_va;
+       u32 invalid_flags;
+       int r = 0;
+
+       if (!rdev->vm_manager.enabled) {
+               args->operation = RADEON_VA_RESULT_ERROR;
+               return -ENOTTY;
+       }
+
+       /* !! DONT REMOVE !!
+        * We don't support vm_id yet, to be sure we don't have have broken
+        * userspace, reject anyone trying to use non 0 value thus moving
+        * forward we can use those fields without breaking existant userspace
+        */
+       if (args->vm_id) {
+               args->operation = RADEON_VA_RESULT_ERROR;
+               return -EINVAL;
+       }
+
+       if (args->offset < RADEON_VA_RESERVED_SIZE) {
+               dev_err(&dev->pdev->dev,
+                       "offset 0x%lX is in reserved area 0x%X\n",
+                       (unsigned long)args->offset,
+                       RADEON_VA_RESERVED_SIZE);
+               args->operation = RADEON_VA_RESULT_ERROR;
+               return -EINVAL;
+       }
+
+       /* don't remove, we need to enforce userspace to set the snooped flag
+        * otherwise we will endup with broken userspace and we won't be able
+        * to enable this feature without adding new interface
+        */
+       invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
+       if ((args->flags & invalid_flags)) {
+               dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
+                       args->flags, invalid_flags);
+               args->operation = RADEON_VA_RESULT_ERROR;
+               return -EINVAL;
+       }
+       if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
+               dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
+               args->operation = RADEON_VA_RESULT_ERROR;
+               return -EINVAL;
+       }
+
+       switch (args->operation) {
+       case RADEON_VA_MAP:
+       case RADEON_VA_UNMAP:
+               break;
+       default:
+               dev_err(&dev->pdev->dev, "unsupported operation %d\n",
+                       args->operation);
+               args->operation = RADEON_VA_RESULT_ERROR;
+               return -EINVAL;
+       }
+
+       gobj = drm_gem_object_lookup(dev, filp, args->handle);
+       if (gobj == NULL) {
+               args->operation = RADEON_VA_RESULT_ERROR;
+               return -ENOENT;
+       }
+       rbo = gem_to_radeon_bo(gobj);
+       r = radeon_bo_reserve(rbo, false);
+       if (r) {
+               args->operation = RADEON_VA_RESULT_ERROR;
+               drm_gem_object_unreference_unlocked(gobj);
+               return r;
+       }
+       switch (args->operation) {
+       case RADEON_VA_MAP:
+               bo_va = radeon_bo_va(rbo, &fpriv->vm);
+               if (bo_va) {
+                       args->operation = RADEON_VA_RESULT_VA_EXIST;
+                       args->offset = bo_va->soffset;
+                       goto out;
+               }
+               r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo,
+                                    args->offset, args->flags);
+               break;
+       case RADEON_VA_UNMAP:
+               r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo);
+               break;
+       default:
+               break;
+       }
+       args->operation = RADEON_VA_RESULT_OK;
+       if (r) {
+               args->operation = RADEON_VA_RESULT_ERROR;
+       }
+out:
+       radeon_bo_unreserve(rbo);
+       drm_gem_object_unreference_unlocked(gobj);
+       return r;
+}
+
 int radeon_mode_dumb_create(struct drm_file *file_priv,
                            struct drm_device *dev,
                            struct drm_mode_create_dumb *args)
index 8f86aeb266935ef46be2e5dff9022b91c04324c2..be38921bf761a570229afad2109b5aeb32127f29 100644 (file)
@@ -65,7 +65,8 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
        unsigned i;
 
        /* Disable *all* interrupts */
-       rdev->irq.sw_int = false;
+       for (i = 0; i < RADEON_NUM_RINGS; i++)
+               rdev->irq.sw_int[i] = false;
        rdev->irq.gui_idle = false;
        for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
                rdev->irq.hpd[i] = false;
@@ -81,9 +82,11 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
 int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
 {
        struct radeon_device *rdev = dev->dev_private;
+       unsigned i;
 
        dev->max_vblank_count = 0x001fffff;
-       rdev->irq.sw_int = true;
+       for (i = 0; i < RADEON_NUM_RINGS; i++)
+               rdev->irq.sw_int[i] = true;
        radeon_irq_set(rdev);
        return 0;
 }
@@ -97,7 +100,8 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
                return;
        }
        /* Disable *all* interrupts */
-       rdev->irq.sw_int = false;
+       for (i = 0; i < RADEON_NUM_RINGS; i++)
+               rdev->irq.sw_int[i] = false;
        rdev->irq.gui_idle = false;
        for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
                rdev->irq.hpd[i] = false;
@@ -194,26 +198,26 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
        flush_work_sync(&rdev->hotplug_work);
 }
 
-void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
 {
        unsigned long irqflags;
 
        spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
-       if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
-               rdev->irq.sw_int = true;
+       if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) {
+               rdev->irq.sw_int[ring] = true;
                radeon_irq_set(rdev);
        }
        spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
 }
 
-void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
 {
        unsigned long irqflags;
 
        spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
-       BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
-       if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
-               rdev->irq.sw_int = false;
+       BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0);
+       if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) {
+               rdev->irq.sw_int[ring] = false;
                radeon_irq_set(rdev);
        }
        spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
index be2c1224e68ae073f36a971f531d052a8bf2a9e5..d3352889a8709e79650647bb0dc5b2fd31ebf79f 100644 (file)
@@ -250,6 +250,18 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        return -EINVAL;
                }
                break;
+       case RADEON_INFO_VA_START:
+               /* this is where we report if vm is supported or not */
+               if (rdev->family < CHIP_CAYMAN)
+                       return -EINVAL;
+               value = RADEON_VA_RESERVED_SIZE;
+               break;
+       case RADEON_INFO_IB_VM_MAX_SIZE:
+               /* this is where we report if vm is supported or not */
+               if (rdev->family < CHIP_CAYMAN)
+                       return -EINVAL;
+               value = RADEON_IB_VM_MAX_SIZE;
+               break;
        default:
                DRM_DEBUG_KMS("Invalid request %d\n", info->request);
                return -EINVAL;
@@ -270,7 +282,6 @@ int radeon_driver_firstopen_kms(struct drm_device *dev)
        return 0;
 }
 
-
 void radeon_driver_lastclose_kms(struct drm_device *dev)
 {
        vga_switcheroo_process_delayed_switch();
@@ -278,12 +289,45 @@ void radeon_driver_lastclose_kms(struct drm_device *dev)
 
 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
 {
+       struct radeon_device *rdev = dev->dev_private;
+
+       file_priv->driver_priv = NULL;
+
+       /* new gpu have virtual address space support */
+       if (rdev->family >= CHIP_CAYMAN) {
+               struct radeon_fpriv *fpriv;
+               int r;
+
+               fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+               if (unlikely(!fpriv)) {
+                       return -ENOMEM;
+               }
+
+               r = radeon_vm_init(rdev, &fpriv->vm);
+               if (r) {
+                       radeon_vm_fini(rdev, &fpriv->vm);
+                       kfree(fpriv);
+                       return r;
+               }
+
+               file_priv->driver_priv = fpriv;
+       }
        return 0;
 }
 
 void radeon_driver_postclose_kms(struct drm_device *dev,
                                 struct drm_file *file_priv)
 {
+       struct radeon_device *rdev = dev->dev_private;
+
+       /* new gpu have virtual address space support */
+       if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
+               struct radeon_fpriv *fpriv = file_priv->driver_priv;
+
+               radeon_vm_fini(rdev, &fpriv->vm);
+               kfree(fpriv);
+               file_priv->driver_priv = NULL;
+       }
 }
 
 void radeon_driver_preclose_kms(struct drm_device *dev,
@@ -451,5 +495,6 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
        DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
 };
 int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
index daadf2111040875d93df8fef9752428dd35f1cae..25a19c483075650725d8bdb7a6af68ff993f1a5d 100644 (file)
@@ -437,7 +437,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
 
        crtc_offset_cntl = 0;
 
-       pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+       pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
        crtc_pitch  = (((pitch_pixels * target_fb->bits_per_pixel) +
                        ((target_fb->bits_per_pixel * 8) - 1)) /
                       (target_fb->bits_per_pixel * 8));
index 2c2e75ef8a372e1011a37c781e857d8d68c9db50..08ff857c8fd6609e9530b742470c743bde1e5c73 100644 (file)
@@ -643,7 +643,7 @@ extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green
                                     u16 *blue, int regno);
 void radeon_framebuffer_init(struct drm_device *dev,
                             struct radeon_framebuffer *rfb,
-                            struct drm_mode_fb_cmd *mode_cmd,
+                            struct drm_mode_fb_cmd2 *mode_cmd,
                             struct drm_gem_object *obj);
 
 int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
index 1c851521f458dfd515033818a2046e657272e4c0..d45df17635988307ce3aa34cdf9023fe95008e92 100644 (file)
@@ -46,6 +46,20 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
  * function are calling it.
  */
 
+void radeon_bo_clear_va(struct radeon_bo *bo)
+{
+       struct radeon_bo_va *bo_va, *tmp;
+
+       list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
+               /* remove from all vm address space */
+               mutex_lock(&bo_va->vm->mutex);
+               list_del(&bo_va->vm_list);
+               mutex_unlock(&bo_va->vm->mutex);
+               list_del(&bo_va->bo_list);
+               kfree(bo_va);
+       }
+}
+
 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 {
        struct radeon_bo *bo;
@@ -55,6 +69,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
        list_del_init(&bo->list);
        mutex_unlock(&bo->rdev->gem.mutex);
        radeon_bo_clear_surface_reg(bo);
+       radeon_bo_clear_va(bo);
        drm_gem_object_release(&bo->gem_base);
        kfree(bo);
 }
@@ -95,6 +110,7 @@ int radeon_bo_create(struct radeon_device *rdev,
        enum ttm_bo_type type;
        unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
        unsigned long max_size = 0;
+       size_t acc_size;
        int r;
 
        size = ALIGN(size, PAGE_SIZE);
@@ -117,6 +133,9 @@ int radeon_bo_create(struct radeon_device *rdev,
                return -ENOMEM;
        }
 
+       acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
+                                      sizeof(struct radeon_bo));
+
 retry:
        bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
        if (bo == NULL)
@@ -130,12 +149,13 @@ retry:
        bo->gem_base.driver_private = NULL;
        bo->surface_reg = -1;
        INIT_LIST_HEAD(&bo->list);
+       INIT_LIST_HEAD(&bo->va);
        radeon_ttm_placement_from_domain(bo, domain);
        /* Kernel allocation are uninterruptible */
        mutex_lock(&rdev->vram_mutex);
        r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
-                       &bo->placement, page_align, 0, !kernel, NULL, size,
-                       &radeon_ttm_bo_destroy);
+                       &bo->placement, page_align, 0, !kernel, NULL,
+                       acc_size, &radeon_ttm_bo_destroy);
        mutex_unlock(&rdev->vram_mutex);
        if (unlikely(r != 0)) {
                if (r != -ERESTARTSYS) {
@@ -483,6 +503,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
                return;
        rbo = container_of(bo, struct radeon_bo, tbo);
        radeon_bo_check_tiling(rbo, 0, 1);
+       radeon_vm_bo_invalidate(rbo->rdev, rbo);
 }
 
 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
@@ -556,3 +577,16 @@ int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
        }
        return 0;
 }
+
+/* object have to be reserved */
+struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm)
+{
+       struct radeon_bo_va *bo_va;
+
+       list_for_each_entry(bo_va, &rbo->va, bo_list) {
+               if (bo_va->vm == vm) {
+                       return bo_va;
+               }
+       }
+       return NULL;
+}
index b07f0f9b86277a76bc1b9386bec73e1b53e1b5fb..cde430308870f75247aa5901320731a0a917ceb2 100644 (file)
@@ -83,6 +83,16 @@ static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
        return !!atomic_read(&bo->tbo.reserved);
 }
 
+static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
+{
+       return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
+static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
+{
+       return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
 /**
  * radeon_bo_mmap_offset - return mmap offset of bo
  * @bo:        radeon object for which we query the offset
@@ -128,4 +138,26 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
                                        struct ttm_mem_reg *mem);
 extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
+extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
+                                        struct radeon_vm *vm);
+
+/*
+ * sub allocation
+ */
+extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+                                    struct radeon_sa_manager *sa_manager,
+                                    unsigned size, u32 domain);
+extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+                                     struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+                                     struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+                                       struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_new(struct radeon_device *rdev,
+                           struct radeon_sa_manager *sa_manager,
+                           struct radeon_sa_bo *sa_bo,
+                           unsigned size, unsigned align);
+extern void radeon_sa_bo_free(struct radeon_device *rdev,
+                             struct radeon_sa_bo *sa_bo);
+
 #endif
index 78a665bd95198483862ba0c124453a1d14a87d48..095148e29a1f5cf494d8903ff36b7031af772cd3 100644 (file)
@@ -252,7 +252,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
 
        mutex_lock(&rdev->ddev->struct_mutex);
        mutex_lock(&rdev->vram_mutex);
-       mutex_lock(&rdev->cp.mutex);
+       for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+               if (rdev->ring[i].ring_obj)
+                       mutex_lock(&rdev->ring[i].mutex);
+       }
 
        /* gui idle int has issues on older chips it seems */
        if (rdev->family >= CHIP_R600) {
@@ -268,12 +271,13 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
                        radeon_irq_set(rdev);
                }
        } else {
-               if (rdev->cp.ready) {
+               struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+               if (ring->ready) {
                        struct radeon_fence *fence;
-                       radeon_ring_alloc(rdev, 64);
-                       radeon_fence_create(rdev, &fence);
+                       radeon_ring_alloc(rdev, ring, 64);
+                       radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, ring));
                        radeon_fence_emit(rdev, fence);
-                       radeon_ring_commit(rdev);
+                       radeon_ring_commit(rdev, ring);
                        radeon_fence_wait(fence, false);
                        radeon_fence_unref(&fence);
                }
@@ -307,7 +311,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
 
        rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
 
-       mutex_unlock(&rdev->cp.mutex);
+       for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+               if (rdev->ring[i].ring_obj)
+                       mutex_unlock(&rdev->ring[i].mutex);
+       }
        mutex_unlock(&rdev->vram_mutex);
        mutex_unlock(&rdev->ddev->struct_mutex);
 }
@@ -795,19 +802,14 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
        resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
        mutex_lock(&rdev->pm.mutex);
        if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
-               unsigned long irq_flags;
                int not_processed = 0;
+               int i;
 
-               read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-               if (!list_empty(&rdev->fence_drv.emited)) {
-                       struct list_head *ptr;
-                       list_for_each(ptr, &rdev->fence_drv.emited) {
-                               /* count up to 3, that's enought info */
-                               if (++not_processed >= 3)
-                                       break;
-                       }
+               for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+                       not_processed += radeon_fence_count_emitted(rdev, i);
+                       if (not_processed >= 3)
+                               break;
                }
-               read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
 
                if (not_processed >= 3) { /* should upclock */
                        if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
index 49d58202202c8dc4cf3808e7b48c4c13ac620e6b..e8bc70933d1b342a9bd3b9d04bbcbfc14c5438dc 100644 (file)
@@ -34,6 +34,7 @@
 #include "atom.h"
 
 int radeon_debugfs_ib_init(struct radeon_device *rdev);
+int radeon_debugfs_ring_init(struct radeon_device *rdev);
 
 u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
 {
@@ -60,105 +61,106 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
        return idx_value;
 }
 
-void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
 {
 #if DRM_DEBUG_CODE
-       if (rdev->cp.count_dw <= 0) {
+       if (ring->count_dw <= 0) {
                DRM_ERROR("radeon: writting more dword to ring than expected !\n");
        }
 #endif
-       rdev->cp.ring[rdev->cp.wptr++] = v;
-       rdev->cp.wptr &= rdev->cp.ptr_mask;
-       rdev->cp.count_dw--;
-       rdev->cp.ring_free_dw--;
+       ring->ring[ring->wptr++] = v;
+       ring->wptr &= ring->ptr_mask;
+       ring->count_dw--;
+       ring->ring_free_dw--;
 }
 
-void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
-{
-       struct radeon_ib *ib, *n;
-
-       list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
-               list_del(&ib->list);
-               vfree(ib->ptr);
-               kfree(ib);
-       }
-}
-
-void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
+/*
+ * IB.
+ */
+bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
 {
-       struct radeon_ib *bib;
-
-       bib = kmalloc(sizeof(*bib), GFP_KERNEL);
-       if (bib == NULL)
-               return;
-       bib->ptr = vmalloc(ib->length_dw * 4);
-       if (bib->ptr == NULL) {
-               kfree(bib);
-               return;
+       bool done = false;
+
+       /* only free ib which have been emited */
+       if (ib->fence && ib->fence->emitted) {
+               if (radeon_fence_signaled(ib->fence)) {
+                       radeon_fence_unref(&ib->fence);
+                       radeon_sa_bo_free(rdev, &ib->sa_bo);
+                       done = true;
+               }
        }
-       memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
-       bib->length_dw = ib->length_dw;
-       mutex_lock(&rdev->ib_pool.mutex);
-       list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
-       mutex_unlock(&rdev->ib_pool.mutex);
+       return done;
 }
 
-/*
- * IB.
- */
-int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+                 struct radeon_ib **ib, unsigned size)
 {
        struct radeon_fence *fence;
-       struct radeon_ib *nib;
-       int r = 0, i, c;
+       unsigned cretry = 0;
+       int r = 0, i, idx;
 
        *ib = NULL;
-       r = radeon_fence_create(rdev, &fence);
+       /* align size on 256 bytes */
+       size = ALIGN(size, 256);
+
+       r = radeon_fence_create(rdev, &fence, ring);
        if (r) {
                dev_err(rdev->dev, "failed to create fence for new IB\n");
                return r;
        }
+
        mutex_lock(&rdev->ib_pool.mutex);
-       for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
-               i &= (RADEON_IB_POOL_SIZE - 1);
-               if (rdev->ib_pool.ibs[i].free) {
-                       nib = &rdev->ib_pool.ibs[i];
-                       break;
-               }
-       }
-       if (nib == NULL) {
-               /* This should never happen, it means we allocated all
-                * IB and haven't scheduled one yet, return EBUSY to
-                * userspace hoping that on ioctl recall we get better
-                * luck
-                */
-               dev_err(rdev->dev, "no free indirect buffer !\n");
+       idx = rdev->ib_pool.head_id;
+retry:
+       if (cretry > 5) {
+               dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
                mutex_unlock(&rdev->ib_pool.mutex);
                radeon_fence_unref(&fence);
-               return -EBUSY;
+               return -ENOMEM;
        }
-       rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
-       nib->free = false;
-       if (nib->fence) {
-               mutex_unlock(&rdev->ib_pool.mutex);
-               r = radeon_fence_wait(nib->fence, false);
-               if (r) {
-                       dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
-                               nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
-                       mutex_lock(&rdev->ib_pool.mutex);
-                       nib->free = true;
-                       mutex_unlock(&rdev->ib_pool.mutex);
-                       radeon_fence_unref(&fence);
-                       return r;
+       cretry++;
+       for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+               radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
+               if (rdev->ib_pool.ibs[idx].fence == NULL) {
+                       r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
+                                            &rdev->ib_pool.ibs[idx].sa_bo,
+                                            size, 256);
+                       if (!r) {
+                               *ib = &rdev->ib_pool.ibs[idx];
+                               (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
+                               (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
+                               (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
+                               (*ib)->gpu_addr += (*ib)->sa_bo.offset;
+                               (*ib)->fence = fence;
+                               (*ib)->vm_id = 0;
+                               /* ib are most likely to be allocated in a ring fashion
+                                * thus rdev->ib_pool.head_id should be the id of the
+                                * oldest ib
+                                */
+                               rdev->ib_pool.head_id = (1 + idx);
+                               rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
+                               mutex_unlock(&rdev->ib_pool.mutex);
+                               return 0;
+                       }
                }
-               mutex_lock(&rdev->ib_pool.mutex);
+               idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+       }
+       /* this should be rare event, ie all ib scheduled none signaled yet.
+        */
+       for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+               if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
+                       r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
+                       if (!r) {
+                               goto retry;
+                       }
+                       /* an error happened */
+                       break;
+               }
+               idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
        }
-       radeon_fence_unref(&nib->fence);
-       nib->fence = fence;
-       nib->length_dw = 0;
        mutex_unlock(&rdev->ib_pool.mutex);
-       *ib = nib;
-       return 0;
+       radeon_fence_unref(&fence);
+       return r;
 }
 
 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -169,247 +171,255 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
        if (tmp == NULL) {
                return;
        }
-       if (!tmp->fence->emited)
-               radeon_fence_unref(&tmp->fence);
        mutex_lock(&rdev->ib_pool.mutex);
-       tmp->free = true;
+       if (tmp->fence && !tmp->fence->emitted) {
+               radeon_sa_bo_free(rdev, &tmp->sa_bo);
+               radeon_fence_unref(&tmp->fence);
+       }
        mutex_unlock(&rdev->ib_pool.mutex);
 }
 
 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
 {
+       struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
        int r = 0;
 
-       if (!ib->length_dw || !rdev->cp.ready) {
+       if (!ib->length_dw || !ring->ready) {
                /* TODO: Nothings in the ib we should report. */
                DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
                return -EINVAL;
        }
 
        /* 64 dwords should be enough for fence too */
-       r = radeon_ring_lock(rdev, 64);
+       r = radeon_ring_lock(rdev, ring, 64);
        if (r) {
                DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
                return r;
        }
-       radeon_ring_ib_execute(rdev, ib);
+       radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
        radeon_fence_emit(rdev, ib->fence);
-       mutex_lock(&rdev->ib_pool.mutex);
-       /* once scheduled IB is considered free and protected by the fence */
-       ib->free = true;
-       mutex_unlock(&rdev->ib_pool.mutex);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_unlock_commit(rdev, ring);
        return 0;
 }
 
 int radeon_ib_pool_init(struct radeon_device *rdev)
 {
-       void *ptr;
-       uint64_t gpu_addr;
-       int i;
-       int r = 0;
+       int i, r;
 
-       if (rdev->ib_pool.robj)
+       mutex_lock(&rdev->ib_pool.mutex);
+       if (rdev->ib_pool.ready) {
+               mutex_unlock(&rdev->ib_pool.mutex);
                return 0;
-       INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
-       /* Allocate 1M object buffer */
-       r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
-                            PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
-                            &rdev->ib_pool.robj);
-       if (r) {
-               DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
-               return r;
        }
-       r = radeon_bo_reserve(rdev->ib_pool.robj, false);
-       if (unlikely(r != 0))
-               return r;
-       r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
-       if (r) {
-               radeon_bo_unreserve(rdev->ib_pool.robj);
-               DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
-               return r;
-       }
-       r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
-       radeon_bo_unreserve(rdev->ib_pool.robj);
+
+       r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager,
+                                     RADEON_IB_POOL_SIZE*64*1024,
+                                     RADEON_GEM_DOMAIN_GTT);
        if (r) {
-               DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
+               mutex_unlock(&rdev->ib_pool.mutex);
                return r;
        }
-       for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
-               unsigned offset;
 
-               offset = i * 64 * 1024;
-               rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
-               rdev->ib_pool.ibs[i].ptr = ptr + offset;
+       for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+               rdev->ib_pool.ibs[i].fence = NULL;
                rdev->ib_pool.ibs[i].idx = i;
                rdev->ib_pool.ibs[i].length_dw = 0;
-               rdev->ib_pool.ibs[i].free = true;
+               INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
        }
        rdev->ib_pool.head_id = 0;
        rdev->ib_pool.ready = true;
        DRM_INFO("radeon: ib pool ready.\n");
+
        if (radeon_debugfs_ib_init(rdev)) {
                DRM_ERROR("Failed to register debugfs file for IB !\n");
        }
-       return r;
+       if (radeon_debugfs_ring_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for rings !\n");
+       }
+       mutex_unlock(&rdev->ib_pool.mutex);
+       return 0;
 }
 
 void radeon_ib_pool_fini(struct radeon_device *rdev)
 {
-       int r;
-       struct radeon_bo *robj;
+       unsigned i;
 
-       if (!rdev->ib_pool.ready) {
-               return;
-       }
        mutex_lock(&rdev->ib_pool.mutex);
-       radeon_ib_bogus_cleanup(rdev);
-       robj = rdev->ib_pool.robj;
-       rdev->ib_pool.robj = NULL;
-       mutex_unlock(&rdev->ib_pool.mutex);
-
-       if (robj) {
-               r = radeon_bo_reserve(robj, false);
-               if (likely(r == 0)) {
-                       radeon_bo_kunmap(robj);
-                       radeon_bo_unpin(robj);
-                       radeon_bo_unreserve(robj);
+       if (rdev->ib_pool.ready) {
+               for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+                       radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
+                       radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
                }
-               radeon_bo_unref(&robj);
+               radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
+               rdev->ib_pool.ready = false;
        }
+       mutex_unlock(&rdev->ib_pool.mutex);
 }
 
+int radeon_ib_pool_start(struct radeon_device *rdev)
+{
+       return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
+}
+
+int radeon_ib_pool_suspend(struct radeon_device *rdev)
+{
+       return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
+}
 
 /*
  * Ring.
  */
-void radeon_ring_free_size(struct radeon_device *rdev)
+int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
 {
-       if (rdev->wb.enabled)
-               rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
-       else {
-               if (rdev->family >= CHIP_R600)
-                       rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
-               else
-                       rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+       /* r1xx-r5xx only has CP ring */
+       if (rdev->family < CHIP_R600)
+               return RADEON_RING_TYPE_GFX_INDEX;
+
+       if (rdev->family >= CHIP_CAYMAN) {
+               if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
+                       return CAYMAN_RING_TYPE_CP1_INDEX;
+               else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
+                       return CAYMAN_RING_TYPE_CP2_INDEX;
        }
+       return RADEON_RING_TYPE_GFX_INDEX;
+}
+
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+       u32 rptr;
+
+       if (rdev->wb.enabled)
+               rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+       else
+               rptr = RREG32(ring->rptr_reg);
+       ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
        /* This works because ring_size is a power of 2 */
-       rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
-       rdev->cp.ring_free_dw -= rdev->cp.wptr;
-       rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
-       if (!rdev->cp.ring_free_dw) {
-               rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
+       ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
+       ring->ring_free_dw -= ring->wptr;
+       ring->ring_free_dw &= ring->ptr_mask;
+       if (!ring->ring_free_dw) {
+               ring->ring_free_dw = ring->ring_size / 4;
        }
 }
 
-int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
+
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
 {
        int r;
 
        /* Align requested size with padding so unlock_commit can
         * pad safely */
-       ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
-       while (ndw > (rdev->cp.ring_free_dw - 1)) {
-               radeon_ring_free_size(rdev);
-               if (ndw < rdev->cp.ring_free_dw) {
+       ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+       while (ndw > (ring->ring_free_dw - 1)) {
+               radeon_ring_free_size(rdev, ring);
+               if (ndw < ring->ring_free_dw) {
                        break;
                }
-               r = radeon_fence_wait_next(rdev);
+               r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
                if (r)
                        return r;
        }
-       rdev->cp.count_dw = ndw;
-       rdev->cp.wptr_old = rdev->cp.wptr;
+       ring->count_dw = ndw;
+       ring->wptr_old = ring->wptr;
        return 0;
 }
 
-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
 {
        int r;
 
-       mutex_lock(&rdev->cp.mutex);
-       r = radeon_ring_alloc(rdev, ndw);
+       mutex_lock(&ring->mutex);
+       r = radeon_ring_alloc(rdev, ring, ndw);
        if (r) {
-               mutex_unlock(&rdev->cp.mutex);
+               mutex_unlock(&ring->mutex);
                return r;
        }
        return 0;
 }
 
-void radeon_ring_commit(struct radeon_device *rdev)
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        unsigned count_dw_pad;
        unsigned i;
 
        /* We pad to match fetch size */
-       count_dw_pad = (rdev->cp.align_mask + 1) -
-                      (rdev->cp.wptr & rdev->cp.align_mask);
+       count_dw_pad = (ring->align_mask + 1) -
+                      (ring->wptr & ring->align_mask);
        for (i = 0; i < count_dw_pad; i++) {
-               radeon_ring_write(rdev, 2 << 30);
+               radeon_ring_write(ring, ring->nop);
        }
        DRM_MEMORYBARRIER();
-       radeon_cp_commit(rdev);
+       WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
+       (void)RREG32(ring->wptr_reg);
 }
 
-void radeon_ring_unlock_commit(struct radeon_device *rdev)
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
 {
-       radeon_ring_commit(rdev);
-       mutex_unlock(&rdev->cp.mutex);
+       radeon_ring_commit(rdev, ring);
+       mutex_unlock(&ring->mutex);
 }
 
-void radeon_ring_unlock_undo(struct radeon_device *rdev)
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
 {
-       rdev->cp.wptr = rdev->cp.wptr_old;
-       mutex_unlock(&rdev->cp.mutex);
+       ring->wptr = ring->wptr_old;
+       mutex_unlock(&ring->mutex);
 }
 
-int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
+                    unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+                    u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
 {
        int r;
 
-       rdev->cp.ring_size = ring_size;
+       ring->ring_size = ring_size;
+       ring->rptr_offs = rptr_offs;
+       ring->rptr_reg = rptr_reg;
+       ring->wptr_reg = wptr_reg;
+       ring->ptr_reg_shift = ptr_reg_shift;
+       ring->ptr_reg_mask = ptr_reg_mask;
+       ring->nop = nop;
        /* Allocate ring buffer */
-       if (rdev->cp.ring_obj == NULL) {
-               r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
+       if (ring->ring_obj == NULL) {
+               r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
                                        RADEON_GEM_DOMAIN_GTT,
-                                       &rdev->cp.ring_obj);
+                                       &ring->ring_obj);
                if (r) {
                        dev_err(rdev->dev, "(%d) ring create failed\n", r);
                        return r;
                }
-               r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+               r = radeon_bo_reserve(ring->ring_obj, false);
                if (unlikely(r != 0))
                        return r;
-               r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
-                                       &rdev->cp.gpu_addr);
+               r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
+                                       &ring->gpu_addr);
                if (r) {
-                       radeon_bo_unreserve(rdev->cp.ring_obj);
+                       radeon_bo_unreserve(ring->ring_obj);
                        dev_err(rdev->dev, "(%d) ring pin failed\n", r);
                        return r;
                }
-               r = radeon_bo_kmap(rdev->cp.ring_obj,
-                                      (void **)&rdev->cp.ring);
-               radeon_bo_unreserve(rdev->cp.ring_obj);
+               r = radeon_bo_kmap(ring->ring_obj,
+                                      (void **)&ring->ring);
+               radeon_bo_unreserve(ring->ring_obj);
                if (r) {
                        dev_err(rdev->dev, "(%d) ring map failed\n", r);
                        return r;
                }
        }
-       rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
-       rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
+       ring->ptr_mask = (ring->ring_size / 4) - 1;
+       ring->ring_free_dw = ring->ring_size / 4;
        return 0;
 }
 
-void radeon_ring_fini(struct radeon_device *rdev)
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        int r;
        struct radeon_bo *ring_obj;
 
-       mutex_lock(&rdev->cp.mutex);
-       ring_obj = rdev->cp.ring_obj;
-       rdev->cp.ring = NULL;
-       rdev->cp.ring_obj = NULL;
-       mutex_unlock(&rdev->cp.mutex);
+       mutex_lock(&ring->mutex);
+       ring_obj = ring->ring_obj;
+       ring->ring = NULL;
+       ring->ring_obj = NULL;
+       mutex_unlock(&ring->mutex);
 
        if (ring_obj) {
                r = radeon_bo_reserve(ring_obj, false);
@@ -422,72 +432,83 @@ void radeon_ring_fini(struct radeon_device *rdev)
        }
 }
 
-
 /*
  * Debugfs info
  */
 #if defined(CONFIG_DEBUG_FS)
-static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
+
+static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct radeon_ib *ib = node->info_ent->data;
-       unsigned i;
-
-       if (ib == NULL) {
-               return 0;
-       }
-       seq_printf(m, "IB %04u\n", ib->idx);
-       seq_printf(m, "IB fence %p\n", ib->fence);
-       seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
-       for (i = 0; i < ib->length_dw; i++) {
-               seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
+       struct drm_device *dev = node->minor->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       int ridx = *(int*)node->info_ent->data;
+       struct radeon_ring *ring = &rdev->ring[ridx];
+       unsigned count, i, j;
+
+       radeon_ring_free_size(rdev, ring);
+       count = (ring->ring_size / 4) - ring->ring_free_dw;
+       seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
+       seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
+       seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
+       seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
+       seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
+       seq_printf(m, "%u dwords in ring\n", count);
+       i = ring->rptr;
+       for (j = 0; j <= count; j++) {
+               seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+               i = (i + 1) & ring->ptr_mask;
        }
        return 0;
 }
 
-static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
+static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
+static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
+static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
+
+static struct drm_info_list radeon_debugfs_ring_info_list[] = {
+       {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
+       {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
+       {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
+};
+
+static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct radeon_device *rdev = node->info_ent->data;
-       struct radeon_ib *ib;
+       struct radeon_ib *ib = node->info_ent->data;
        unsigned i;
 
-       mutex_lock(&rdev->ib_pool.mutex);
-       if (list_empty(&rdev->ib_pool.bogus_ib)) {
-               mutex_unlock(&rdev->ib_pool.mutex);
-               seq_printf(m, "no bogus IB recorded\n");
+       if (ib == NULL) {
                return 0;
        }
-       ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
-       list_del_init(&ib->list);
-       mutex_unlock(&rdev->ib_pool.mutex);
+       seq_printf(m, "IB %04u\n", ib->idx);
+       seq_printf(m, "IB fence %p\n", ib->fence);
        seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
        for (i = 0; i < ib->length_dw; i++) {
                seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
        }
-       vfree(ib->ptr);
-       kfree(ib);
        return 0;
 }
 
 static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
 static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
+#endif
 
-static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
-       {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
-};
+int radeon_debugfs_ring_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
+                                       ARRAY_SIZE(radeon_debugfs_ring_info_list));
+#else
+       return 0;
 #endif
+}
 
 int radeon_debugfs_ib_init(struct radeon_device *rdev)
 {
 #if defined(CONFIG_DEBUG_FS)
        unsigned i;
-       int r;
 
-       radeon_debugfs_ib_bogus_info_list[0].data = rdev;
-       r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
-       if (r)
-               return r;
        for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
                sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
                radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
new file mode 100644 (file)
index 0000000..4cce47e
--- /dev/null
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "radeon.h"
+
+int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+                             struct radeon_sa_manager *sa_manager,
+                             unsigned size, u32 domain)
+{
+       int r;
+
+       sa_manager->bo = NULL;
+       sa_manager->size = size;
+       sa_manager->domain = domain;
+       INIT_LIST_HEAD(&sa_manager->sa_bo);
+
+       r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
+                            RADEON_GEM_DOMAIN_CPU, &sa_manager->bo);
+       if (r) {
+               dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
+               return r;
+       }
+
+       return r;
+}
+
+void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+                              struct radeon_sa_manager *sa_manager)
+{
+       struct radeon_sa_bo *sa_bo, *tmp;
+
+       if (!list_empty(&sa_manager->sa_bo)) {
+               dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
+       }
+       list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) {
+               list_del_init(&sa_bo->list);
+       }
+       radeon_bo_unref(&sa_manager->bo);
+       sa_manager->size = 0;
+}
+
+int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+                              struct radeon_sa_manager *sa_manager)
+{
+       int r;
+
+       if (sa_manager->bo == NULL) {
+               dev_err(rdev->dev, "no bo for sa manager\n");
+               return -EINVAL;
+       }
+
+       /* map the buffer */
+       r = radeon_bo_reserve(sa_manager->bo, false);
+       if (r) {
+               dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
+               return r;
+       }
+       r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
+       if (r) {
+               radeon_bo_unreserve(sa_manager->bo);
+               dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
+               return r;
+       }
+       r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+       radeon_bo_unreserve(sa_manager->bo);
+       return r;
+}
+
+int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+                                struct radeon_sa_manager *sa_manager)
+{
+       int r;
+
+       if (sa_manager->bo == NULL) {
+               dev_err(rdev->dev, "no bo for sa manager\n");
+               return -EINVAL;
+       }
+
+       r = radeon_bo_reserve(sa_manager->bo, false);
+       if (!r) {
+               radeon_bo_kunmap(sa_manager->bo);
+               radeon_bo_unpin(sa_manager->bo);
+               radeon_bo_unreserve(sa_manager->bo);
+       }
+       return r;
+}
+
+/*
+ * Principe is simple, we keep a list of sub allocation in offset
+ * order (first entry has offset == 0, last entry has the highest
+ * offset).
+ *
+ * When allocating new object we first check if there is room at
+ * the end total_size - (last_object_offset + last_object_size) >=
+ * alloc_size. If so we allocate new object there.
+ *
+ * When there is not enough room at the end, we start waiting for
+ * each sub object until we reach object_offset+object_size >=
+ * alloc_size, this object then become the sub object we return.
+ *
+ * Alignment can't be bigger than page size
+ */
+int radeon_sa_bo_new(struct radeon_device *rdev,
+                    struct radeon_sa_manager *sa_manager,
+                    struct radeon_sa_bo *sa_bo,
+                    unsigned size, unsigned align)
+{
+       struct radeon_sa_bo *tmp;
+       struct list_head *head;
+       unsigned offset = 0, wasted = 0;
+
+       BUG_ON(align > RADEON_GPU_PAGE_SIZE);
+       BUG_ON(size > sa_manager->size);
+
+       /* no one ? */
+       head = sa_manager->sa_bo.prev;
+       if (list_empty(&sa_manager->sa_bo)) {
+               goto out;
+       }
+
+       /* look for a hole big enough */
+       offset = 0;
+       list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
+               /* room before this object ? */
+               if ((tmp->offset - offset) >= size) {
+                       head = tmp->list.prev;
+                       goto out;
+               }
+               offset = tmp->offset + tmp->size;
+               wasted = offset % align;
+               if (wasted) {
+                       wasted = align - wasted;
+               }
+               offset += wasted;
+       }
+       /* room at the end ? */
+       head = sa_manager->sa_bo.prev;
+       tmp = list_entry(head, struct radeon_sa_bo, list);
+       offset = tmp->offset + tmp->size;
+       wasted = offset % align;
+       if (wasted) {
+               wasted = align - wasted;
+       }
+       offset += wasted;
+       if ((sa_manager->size - offset) < size) {
+               /* failed to find somethings big enough */
+               return -ENOMEM;
+       }
+
+out:
+       sa_bo->manager = sa_manager;
+       sa_bo->offset = offset;
+       sa_bo->size = size;
+       list_add(&sa_bo->list, head);
+       return 0;
+}
+
+void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo)
+{
+       list_del_init(&sa_bo->list);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
new file mode 100644 (file)
index 0000000..61dd4e3
--- /dev/null
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2011 Christian König.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Christian König <deathsimple@vodafone.de>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "radeon.h"
+
+static int radeon_semaphore_add_bo(struct radeon_device *rdev)
+{
+       struct radeon_semaphore_bo *bo;
+       unsigned long irq_flags;
+       uint64_t gpu_addr;
+       uint32_t *cpu_ptr;
+       int r, i;
+
+
+       bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
+       if (bo == NULL) {
+               return -ENOMEM;
+       }
+       INIT_LIST_HEAD(&bo->free);
+       INIT_LIST_HEAD(&bo->list);
+       bo->nused = 0;
+
+       r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
+       if (r) {
+               dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
+               kfree(bo);
+               return r;
+       }
+       gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
+       gpu_addr += bo->ib->sa_bo.offset;
+       cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
+       cpu_ptr += (bo->ib->sa_bo.offset >> 2);
+       for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
+               bo->semaphores[i].gpu_addr = gpu_addr;
+               bo->semaphores[i].cpu_ptr = cpu_ptr;
+               bo->semaphores[i].bo = bo;
+               list_add_tail(&bo->semaphores[i].list, &bo->free);
+               gpu_addr += 8;
+               cpu_ptr += 2;
+       }
+       write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+       list_add_tail(&bo->list, &rdev->semaphore_drv.bo);
+       write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+       return 0;
+}
+
+static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
+                                          struct radeon_semaphore_bo *bo)
+{
+       radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
+       radeon_fence_unref(&bo->ib->fence);
+       list_del(&bo->list);
+       kfree(bo);
+}
+
+void radeon_semaphore_shrink_locked(struct radeon_device *rdev)
+{
+       struct radeon_semaphore_bo *bo, *n;
+
+       if (list_empty(&rdev->semaphore_drv.bo)) {
+               return;
+       }
+       /* only shrink if first bo has free semaphore */
+       bo = list_first_entry(&rdev->semaphore_drv.bo, struct radeon_semaphore_bo, list);
+       if (list_empty(&bo->free)) {
+               return;
+       }
+       list_for_each_entry_safe_continue(bo, n, &rdev->semaphore_drv.bo, list) {
+               if (bo->nused)
+                       continue;
+               radeon_semaphore_del_bo_locked(rdev, bo);
+       }
+}
+
+int radeon_semaphore_create(struct radeon_device *rdev,
+                           struct radeon_semaphore **semaphore)
+{
+       struct radeon_semaphore_bo *bo;
+       unsigned long irq_flags;
+       bool do_retry = true;
+       int r;
+
+retry:
+       *semaphore = NULL;
+       write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+       list_for_each_entry(bo, &rdev->semaphore_drv.bo, list) {
+               if (list_empty(&bo->free))
+                       continue;
+               *semaphore = list_first_entry(&bo->free, struct radeon_semaphore, list);
+               (*semaphore)->cpu_ptr[0] = 0;
+               (*semaphore)->cpu_ptr[1] = 0;
+               list_del(&(*semaphore)->list);
+               bo->nused++;
+               break;
+       }
+       write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+
+       if (*semaphore == NULL) {
+               if (do_retry) {
+                       do_retry = false;
+                       r = radeon_semaphore_add_bo(rdev);
+                       if (r)
+                               return r;
+                       goto retry;
+               }
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+                                 struct radeon_semaphore *semaphore)
+{
+       radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
+}
+
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+                               struct radeon_semaphore *semaphore)
+{
+       radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
+}
+
+void radeon_semaphore_free(struct radeon_device *rdev,
+                          struct radeon_semaphore *semaphore)
+{
+       unsigned long irq_flags;
+
+       write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+       semaphore->bo->nused--;
+       list_add_tail(&semaphore->list, &semaphore->bo->free);
+       radeon_semaphore_shrink_locked(rdev);
+       write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+}
+
+void radeon_semaphore_driver_fini(struct radeon_device *rdev)
+{
+       struct radeon_semaphore_bo *bo, *n;
+       unsigned long irq_flags;
+
+       write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+       /* we force to free everything */
+       list_for_each_entry_safe(bo, n, &rdev->semaphore_drv.bo, list) {
+               if (!list_empty(&bo->free)) {
+                       dev_err(rdev->dev, "still in use semaphore\n");
+               }
+               radeon_semaphore_del_bo_locked(rdev, bo);
+       }
+       write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+}
index 602fa3541c454f8ac2c058cb5832520fad95612f..dc5dcf483aa3c0217c85a2db318ac07542754dde 100644 (file)
@@ -42,7 +42,9 @@ void radeon_test_moves(struct radeon_device *rdev)
        /* Number of tests =
         * (Total GTT - IB pool - writeback page - ring buffers) / test size
         */
-       n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size;
+       n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
+       for (i = 0; i < RADEON_NUM_RINGS; ++i)
+               n -= rdev->ring[i].ring_size;
        if (rdev->wb.wb_obj)
                n -= RADEON_GPU_PAGE_SIZE;
        if (rdev->ih.ring_obj)
@@ -104,7 +106,7 @@ void radeon_test_moves(struct radeon_device *rdev)
 
                radeon_bo_kunmap(gtt_obj[i]);
 
-               r = radeon_fence_create(rdev, &fence);
+               r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
                if (r) {
                        DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i);
                        goto out_cleanup;
@@ -153,7 +155,7 @@ void radeon_test_moves(struct radeon_device *rdev)
 
                radeon_bo_kunmap(vram_obj);
 
-               r = radeon_fence_create(rdev, &fence);
+               r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
                if (r) {
                        DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i);
                        goto out_cleanup;
@@ -232,3 +234,264 @@ out_cleanup:
                printk(KERN_WARNING "Error while testing BO move.\n");
        }
 }
+
+void radeon_test_ring_sync(struct radeon_device *rdev,
+                          struct radeon_ring *ringA,
+                          struct radeon_ring *ringB)
+{
+       struct radeon_fence *fence1 = NULL, *fence2 = NULL;
+       struct radeon_semaphore *semaphore = NULL;
+       int ridxA = radeon_ring_index(rdev, ringA);
+       int ridxB = radeon_ring_index(rdev, ringB);
+       int r;
+
+       r = radeon_fence_create(rdev, &fence1, ridxA);
+       if (r) {
+               DRM_ERROR("Failed to create sync fence 1\n");
+               goto out_cleanup;
+       }
+       r = radeon_fence_create(rdev, &fence2, ridxA);
+       if (r) {
+               DRM_ERROR("Failed to create sync fence 2\n");
+               goto out_cleanup;
+       }
+
+       r = radeon_semaphore_create(rdev, &semaphore);
+       if (r) {
+               DRM_ERROR("Failed to create semaphore\n");
+               goto out_cleanup;
+       }
+
+       r = radeon_ring_lock(rdev, ringA, 64);
+       if (r) {
+               DRM_ERROR("Failed to lock ring A %d\n", ridxA);
+               goto out_cleanup;
+       }
+       radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
+       radeon_fence_emit(rdev, fence1);
+       radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
+       radeon_fence_emit(rdev, fence2);
+       radeon_ring_unlock_commit(rdev, ringA);
+
+       mdelay(1000);
+
+       if (radeon_fence_signaled(fence1)) {
+               DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
+               goto out_cleanup;
+       }
+
+       r = radeon_ring_lock(rdev, ringB, 64);
+       if (r) {
+               DRM_ERROR("Failed to lock ring B %p\n", ringB);
+               goto out_cleanup;
+       }
+       radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
+       radeon_ring_unlock_commit(rdev, ringB);
+
+       r = radeon_fence_wait(fence1, false);
+       if (r) {
+               DRM_ERROR("Failed to wait for sync fence 1\n");
+               goto out_cleanup;
+       }
+
+       mdelay(1000);
+
+       if (radeon_fence_signaled(fence2)) {
+               DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
+               goto out_cleanup;
+       }
+
+       r = radeon_ring_lock(rdev, ringB, 64);
+       if (r) {
+               DRM_ERROR("Failed to lock ring B %p\n", ringB);
+               goto out_cleanup;
+       }
+       radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
+       radeon_ring_unlock_commit(rdev, ringB);
+
+       r = radeon_fence_wait(fence2, false);
+       if (r) {
+               DRM_ERROR("Failed to wait for sync fence 1\n");
+               goto out_cleanup;
+       }
+
+out_cleanup:
+       if (semaphore)
+               radeon_semaphore_free(rdev, semaphore);
+
+       if (fence1)
+               radeon_fence_unref(&fence1);
+
+       if (fence2)
+               radeon_fence_unref(&fence2);
+
+       if (r)
+               printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
+}
+
+void radeon_test_ring_sync2(struct radeon_device *rdev,
+                           struct radeon_ring *ringA,
+                           struct radeon_ring *ringB,
+                           struct radeon_ring *ringC)
+{
+       struct radeon_fence *fenceA = NULL, *fenceB = NULL;
+       struct radeon_semaphore *semaphore = NULL;
+       int ridxA = radeon_ring_index(rdev, ringA);
+       int ridxB = radeon_ring_index(rdev, ringB);
+       int ridxC = radeon_ring_index(rdev, ringC);
+       bool sigA, sigB;
+       int i, r;
+
+       r = radeon_fence_create(rdev, &fenceA, ridxA);
+       if (r) {
+               DRM_ERROR("Failed to create sync fence 1\n");
+               goto out_cleanup;
+       }
+       r = radeon_fence_create(rdev, &fenceB, ridxB);
+       if (r) {
+               DRM_ERROR("Failed to create sync fence 2\n");
+               goto out_cleanup;
+       }
+
+       r = radeon_semaphore_create(rdev, &semaphore);
+       if (r) {
+               DRM_ERROR("Failed to create semaphore\n");
+               goto out_cleanup;
+       }
+
+       r = radeon_ring_lock(rdev, ringA, 64);
+       if (r) {
+               DRM_ERROR("Failed to lock ring A %d\n", ridxA);
+               goto out_cleanup;
+       }
+       radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
+       radeon_fence_emit(rdev, fenceA);
+       radeon_ring_unlock_commit(rdev, ringA);
+
+       r = radeon_ring_lock(rdev, ringB, 64);
+       if (r) {
+               DRM_ERROR("Failed to lock ring B %d\n", ridxB);
+               goto out_cleanup;
+       }
+       radeon_semaphore_emit_wait(rdev, ridxB, semaphore);
+       radeon_fence_emit(rdev, fenceB);
+       radeon_ring_unlock_commit(rdev, ringB);
+
+       mdelay(1000);
+
+       if (radeon_fence_signaled(fenceA)) {
+               DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+               goto out_cleanup;
+       }
+       if (radeon_fence_signaled(fenceB)) {
+               DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+               goto out_cleanup;
+       }
+
+       r = radeon_ring_lock(rdev, ringC, 64);
+       if (r) {
+               DRM_ERROR("Failed to lock ring B %p\n", ringC);
+               goto out_cleanup;
+       }
+       radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
+       radeon_ring_unlock_commit(rdev, ringC);
+
+       for (i = 0; i < 30; ++i) {
+               mdelay(100);
+               sigA = radeon_fence_signaled(fenceA);
+               sigB = radeon_fence_signaled(fenceB);
+               if (sigA || sigB)
+                       break;
+       }
+
+       if (!sigA && !sigB) {
+               DRM_ERROR("Neither fence A nor B has been signaled\n");
+               goto out_cleanup;
+       } else if (sigA && sigB) {
+               DRM_ERROR("Both fence A and B has been signaled\n");
+               goto out_cleanup;
+       }
+
+       DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
+
+       r = radeon_ring_lock(rdev, ringC, 64);
+       if (r) {
+               DRM_ERROR("Failed to lock ring B %p\n", ringC);
+               goto out_cleanup;
+       }
+       radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
+       radeon_ring_unlock_commit(rdev, ringC);
+
+       mdelay(1000);
+
+       r = radeon_fence_wait(fenceA, false);
+       if (r) {
+               DRM_ERROR("Failed to wait for sync fence A\n");
+               goto out_cleanup;
+       }
+       r = radeon_fence_wait(fenceB, false);
+       if (r) {
+               DRM_ERROR("Failed to wait for sync fence B\n");
+               goto out_cleanup;
+       }
+
+out_cleanup:
+       if (semaphore)
+               radeon_semaphore_free(rdev, semaphore);
+
+       if (fenceA)
+               radeon_fence_unref(&fenceA);
+
+       if (fenceB)
+               radeon_fence_unref(&fenceB);
+
+       if (r)
+               printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
+}
+
+void radeon_test_syncing(struct radeon_device *rdev)
+{
+       int i, j, k;
+
+       for (i = 1; i < RADEON_NUM_RINGS; ++i) {
+               struct radeon_ring *ringA = &rdev->ring[i];
+               if (!ringA->ready)
+                       continue;
+
+               for (j = 0; j < i; ++j) {
+                       struct radeon_ring *ringB = &rdev->ring[j];
+                       if (!ringB->ready)
+                               continue;
+
+                       DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
+                       radeon_test_ring_sync(rdev, ringA, ringB);
+
+                       DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
+                       radeon_test_ring_sync(rdev, ringB, ringA);
+
+                       for (k = 0; k < j; ++k) {
+                               struct radeon_ring *ringC = &rdev->ring[k];
+                               if (!ringC->ready)
+                                       continue;
+
+                               DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
+                               radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
+
+                               DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
+                               radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
+
+                               DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
+                               radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
+
+                               DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
+                               radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
+
+                               DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
+                               radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
+
+                               DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
+                               radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
+                       }
+               }
+       }
+}
index 0b5468bfaf54484531062bec756aa874e844bb3a..c421e77ace71e45cfd9a18e487bd2947e5285313 100644 (file)
@@ -114,24 +114,6 @@ static void radeon_ttm_global_fini(struct radeon_device *rdev)
        }
 }
 
-struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
-
-static struct ttm_backend*
-radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
-{
-       struct radeon_device *rdev;
-
-       rdev = radeon_get_rdev(bdev);
-#if __OS_HAS_AGP
-       if (rdev->flags & RADEON_IS_AGP) {
-               return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
-       } else
-#endif
-       {
-               return radeon_ttm_backend_create(rdev);
-       }
-}
-
 static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 {
        return 0;
@@ -206,7 +188,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
        rbo = container_of(bo, struct radeon_bo, tbo);
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
-               if (rbo->rdev->cp.ready == false)
+               if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
                        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
                else
                        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
@@ -241,10 +223,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
        struct radeon_device *rdev;
        uint64_t old_start, new_start;
        struct radeon_fence *fence;
-       int r;
+       int r, i;
 
        rdev = radeon_get_rdev(bo->bdev);
-       r = radeon_fence_create(rdev, &fence);
+       r = radeon_fence_create(rdev, &fence, rdev->copy_ring);
        if (unlikely(r)) {
                return r;
        }
@@ -273,13 +255,43 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
                DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
                return -EINVAL;
        }
-       if (!rdev->cp.ready) {
-               DRM_ERROR("Trying to move memory with CP turned off.\n");
+       if (!rdev->ring[rdev->copy_ring].ready) {
+               DRM_ERROR("Trying to move memory with ring turned off.\n");
                return -EINVAL;
        }
 
        BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
 
+       /* sync other rings */
+       if (rdev->family >= CHIP_R600) {
+               for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+                       /* no need to sync to our own or unused rings */
+                       if (i == rdev->copy_ring || !rdev->ring[i].ready)
+                               continue;
+
+                       if (!fence->semaphore) {
+                               r = radeon_semaphore_create(rdev, &fence->semaphore);
+                               /* FIXME: handle semaphore error */
+                               if (r)
+                                       continue;
+                       }
+
+                       r = radeon_ring_lock(rdev, &rdev->ring[i], 3);
+                       /* FIXME: handle ring lock error */
+                       if (r)
+                               continue;
+                       radeon_semaphore_emit_signal(rdev, i, fence->semaphore);
+                       radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
+
+                       r = radeon_ring_lock(rdev, &rdev->ring[rdev->copy_ring], 3);
+                       /* FIXME: handle ring lock error */
+                       if (r)
+                               continue;
+                       radeon_semaphore_emit_wait(rdev, rdev->copy_ring, fence->semaphore);
+                       radeon_ring_unlock_commit(rdev, &rdev->ring[rdev->copy_ring]);
+               }
+       }
+
        r = radeon_copy(rdev, old_start, new_start,
                        new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
                        fence);
@@ -398,7 +410,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
                radeon_move_null(bo, new_mem);
                return 0;
        }
-       if (!rdev->cp.ready || rdev->asic->copy == NULL) {
+       if (!rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready || rdev->asic->copy == NULL) {
                /* use memcpy */
                goto memcpy;
        }
@@ -515,8 +527,166 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
        return radeon_fence_signaled((struct radeon_fence *)sync_obj);
 }
 
+/*
+ * TTM backend functions.
+ */
+struct radeon_ttm_tt {
+       struct ttm_dma_tt               ttm;
+       struct radeon_device            *rdev;
+       u64                             offset;
+};
+
+static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
+                                  struct ttm_mem_reg *bo_mem)
+{
+       struct radeon_ttm_tt *gtt = (void*)ttm;
+       int r;
+
+       gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+       if (!ttm->num_pages) {
+               WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+                    ttm->num_pages, bo_mem, ttm);
+       }
+       r = radeon_gart_bind(gtt->rdev, gtt->offset,
+                            ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
+       if (r) {
+               DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+                         ttm->num_pages, (unsigned)gtt->offset);
+               return r;
+       }
+       return 0;
+}
+
+static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+       struct radeon_ttm_tt *gtt = (void *)ttm;
+
+       radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
+       return 0;
+}
+
+static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+       struct radeon_ttm_tt *gtt = (void *)ttm;
+
+       ttm_dma_tt_fini(&gtt->ttm);
+       kfree(gtt);
+}
+
+static struct ttm_backend_func radeon_backend_func = {
+       .bind = &radeon_ttm_backend_bind,
+       .unbind = &radeon_ttm_backend_unbind,
+       .destroy = &radeon_ttm_backend_destroy,
+};
+
+struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
+                                   unsigned long size, uint32_t page_flags,
+                                   struct page *dummy_read_page)
+{
+       struct radeon_device *rdev;
+       struct radeon_ttm_tt *gtt;
+
+       rdev = radeon_get_rdev(bdev);
+#if __OS_HAS_AGP
+       if (rdev->flags & RADEON_IS_AGP) {
+               return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
+                                        size, page_flags, dummy_read_page);
+       }
+#endif
+
+       gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
+       if (gtt == NULL) {
+               return NULL;
+       }
+       gtt->ttm.ttm.func = &radeon_backend_func;
+       gtt->rdev = rdev;
+       if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
+               kfree(gtt);
+               return NULL;
+       }
+       return &gtt->ttm.ttm;
+}
+
+static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+{
+       struct radeon_device *rdev;
+       struct radeon_ttm_tt *gtt = (void *)ttm;
+       unsigned i;
+       int r;
+
+       if (ttm->state != tt_unpopulated)
+               return 0;
+
+       rdev = radeon_get_rdev(ttm->bdev);
+#if __OS_HAS_AGP
+       if (rdev->flags & RADEON_IS_AGP) {
+               return ttm_agp_tt_populate(ttm);
+       }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+       if (swiotlb_nr_tbl()) {
+               return ttm_dma_populate(&gtt->ttm, rdev->dev);
+       }
+#endif
+
+       r = ttm_pool_populate(ttm);
+       if (r) {
+               return r;
+       }
+
+       for (i = 0; i < ttm->num_pages; i++) {
+               gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
+                                                      0, PAGE_SIZE,
+                                                      PCI_DMA_BIDIRECTIONAL);
+               if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
+                       while (--i) {
+                               pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+                                              PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+                               gtt->ttm.dma_address[i] = 0;
+                       }
+                       ttm_pool_unpopulate(ttm);
+                       return -EFAULT;
+               }
+       }
+       return 0;
+}
+
+static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+       struct radeon_device *rdev;
+       struct radeon_ttm_tt *gtt = (void *)ttm;
+       unsigned i;
+
+       rdev = radeon_get_rdev(ttm->bdev);
+#if __OS_HAS_AGP
+       if (rdev->flags & RADEON_IS_AGP) {
+               ttm_agp_tt_unpopulate(ttm);
+               return;
+       }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+       if (swiotlb_nr_tbl()) {
+               ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
+               return;
+       }
+#endif
+
+       for (i = 0; i < ttm->num_pages; i++) {
+               if (gtt->ttm.dma_address[i]) {
+                       pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+                                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+               }
+       }
+
+       ttm_pool_unpopulate(ttm);
+}
+
 static struct ttm_bo_driver radeon_bo_driver = {
-       .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
+       .ttm_tt_create = &radeon_ttm_tt_create,
+       .ttm_tt_populate = &radeon_ttm_tt_populate,
+       .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
        .invalidate_caches = &radeon_invalidate_caches,
        .init_mem_type = &radeon_init_mem_type,
        .evict_flags = &radeon_evict_flags,
@@ -680,124 +850,6 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
 }
 
 
-/*
- * TTM backend functions.
- */
-struct radeon_ttm_backend {
-       struct ttm_backend              backend;
-       struct radeon_device            *rdev;
-       unsigned long                   num_pages;
-       struct page                     **pages;
-       struct page                     *dummy_read_page;
-       dma_addr_t                      *dma_addrs;
-       bool                            populated;
-       bool                            bound;
-       unsigned                        offset;
-};
-
-static int radeon_ttm_backend_populate(struct ttm_backend *backend,
-                                      unsigned long num_pages,
-                                      struct page **pages,
-                                      struct page *dummy_read_page,
-                                      dma_addr_t *dma_addrs)
-{
-       struct radeon_ttm_backend *gtt;
-
-       gtt = container_of(backend, struct radeon_ttm_backend, backend);
-       gtt->pages = pages;
-       gtt->dma_addrs = dma_addrs;
-       gtt->num_pages = num_pages;
-       gtt->dummy_read_page = dummy_read_page;
-       gtt->populated = true;
-       return 0;
-}
-
-static void radeon_ttm_backend_clear(struct ttm_backend *backend)
-{
-       struct radeon_ttm_backend *gtt;
-
-       gtt = container_of(backend, struct radeon_ttm_backend, backend);
-       gtt->pages = NULL;
-       gtt->dma_addrs = NULL;
-       gtt->num_pages = 0;
-       gtt->dummy_read_page = NULL;
-       gtt->populated = false;
-       gtt->bound = false;
-}
-
-
-static int radeon_ttm_backend_bind(struct ttm_backend *backend,
-                                  struct ttm_mem_reg *bo_mem)
-{
-       struct radeon_ttm_backend *gtt;
-       int r;
-
-       gtt = container_of(backend, struct radeon_ttm_backend, backend);
-       gtt->offset = bo_mem->start << PAGE_SHIFT;
-       if (!gtt->num_pages) {
-               WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
-                    gtt->num_pages, bo_mem, backend);
-       }
-       r = radeon_gart_bind(gtt->rdev, gtt->offset,
-                            gtt->num_pages, gtt->pages, gtt->dma_addrs);
-       if (r) {
-               DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
-                         gtt->num_pages, gtt->offset);
-               return r;
-       }
-       gtt->bound = true;
-       return 0;
-}
-
-static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
-{
-       struct radeon_ttm_backend *gtt;
-
-       gtt = container_of(backend, struct radeon_ttm_backend, backend);
-       radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
-       gtt->bound = false;
-       return 0;
-}
-
-static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
-{
-       struct radeon_ttm_backend *gtt;
-
-       gtt = container_of(backend, struct radeon_ttm_backend, backend);
-       if (gtt->bound) {
-               radeon_ttm_backend_unbind(backend);
-       }
-       kfree(gtt);
-}
-
-static struct ttm_backend_func radeon_backend_func = {
-       .populate = &radeon_ttm_backend_populate,
-       .clear = &radeon_ttm_backend_clear,
-       .bind = &radeon_ttm_backend_bind,
-       .unbind = &radeon_ttm_backend_unbind,
-       .destroy = &radeon_ttm_backend_destroy,
-};
-
-struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
-{
-       struct radeon_ttm_backend *gtt;
-
-       gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
-       if (gtt == NULL) {
-               return NULL;
-       }
-       gtt->backend.bdev = &rdev->mman.bdev;
-       gtt->backend.flags = 0;
-       gtt->backend.func = &radeon_backend_func;
-       gtt->rdev = rdev;
-       gtt->pages = NULL;
-       gtt->num_pages = 0;
-       gtt->dummy_read_page = NULL;
-       gtt->populated = false;
-       gtt->bound = false;
-       return &gtt->backend;
-}
-
 #define RADEON_DEBUGFS_MEM_TYPES 2
 
 #if defined(CONFIG_DEBUG_FS)
@@ -820,8 +872,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
 {
 #if defined(CONFIG_DEBUG_FS)
-       static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
-       static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
+       static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
+       static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
        unsigned i;
 
        for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
@@ -843,8 +895,17 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
        radeon_mem_types_list[i].name = radeon_mem_types_names[i];
        radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
        radeon_mem_types_list[i].driver_features = 0;
-       radeon_mem_types_list[i].data = NULL;
-       return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
+       radeon_mem_types_list[i++].data = NULL;
+#ifdef CONFIG_SWIOTLB
+       if (swiotlb_nr_tbl()) {
+               sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
+               radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+               radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
+               radeon_mem_types_list[i].driver_features = 0;
+               radeon_mem_types_list[i++].data = NULL;
+       }
+#endif
+       return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
 
 #endif
        return 0;
index 06b90c87f8f397012c82d631fa30a5bd57c5da5a..b0ce84a20a68faf628ea3b72171a41b38e78dfaf 100644 (file)
@@ -410,6 +410,12 @@ static int rs400_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r100_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -419,11 +425,18 @@ static int rs400_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_ib_init(rdev);
+
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r100_ib_test(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
                return r;
        }
+
        return 0;
 }
 
@@ -447,11 +460,14 @@ int rs400_resume(struct radeon_device *rdev)
        r300_clock_startup(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
+
+       rdev->accel_working = true;
        return rs400_startup(rdev);
 }
 
 int rs400_suspend(struct radeon_device *rdev)
 {
+       radeon_ib_pool_suspend(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
        r100_irq_disable(rdev);
@@ -530,7 +546,14 @@ int rs400_init(struct radeon_device *rdev)
        if (r)
                return r;
        r300_set_reg_safe(rdev);
+
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = rs400_startup(rdev);
        if (r) {
                /* Somethings want wront with the accel init stop accel */
index b1053d64042313df931b9cde933e121bfa814517..803e0d3c177385ad31c4861c2647b28f721562d8 100644 (file)
@@ -324,10 +324,10 @@ void rs600_hpd_fini(struct radeon_device *rdev)
 
 void rs600_bm_disable(struct radeon_device *rdev)
 {
-       u32 tmp;
+       u16 tmp;
 
        /* disable bus mastering */
-       pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+       pci_read_config_word(rdev->pdev, 0x4, &tmp);
        pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
        mdelay(1);
 }
@@ -549,7 +549,7 @@ int rs600_irq_set(struct radeon_device *rdev)
                WREG32(R_000040_GEN_INT_CNTL, 0);
                return -EINVAL;
        }
-       if (rdev->irq.sw_int) {
+       if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
                tmp |= S_000040_SW_INT_EN(1);
        }
        if (rdev->irq.gui_idle) {
@@ -642,7 +642,7 @@ int rs600_irq_process(struct radeon_device *rdev)
        while (status || rdev->irq.stat_regs.r500.disp_int) {
                /* SW interrupt */
                if (G_000044_SW_INT(status)) {
-                       radeon_fence_process(rdev);
+                       radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                }
                /* GUI idle */
                if (G_000040_GUI_IDLE(status)) {
@@ -849,6 +849,12 @@ static int rs600_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        rs600_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -858,15 +864,21 @@ static int rs600_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_ib_init(rdev);
+
+       r = r600_audio_init(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+               dev_err(rdev->dev, "failed initializing audio\n");
                return r;
        }
 
-       r = r600_audio_init(rdev);
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r100_ib_test(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing audio\n");
+               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
                return r;
        }
 
@@ -891,11 +903,14 @@ int rs600_resume(struct radeon_device *rdev)
        rv515_clock_startup(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
+
+       rdev->accel_working = true;
        return rs600_startup(rdev);
 }
 
 int rs600_suspend(struct radeon_device *rdev)
 {
+       radeon_ib_pool_suspend(rdev);
        r600_audio_fini(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
@@ -976,7 +991,14 @@ int rs600_init(struct radeon_device *rdev)
        if (r)
                return r;
        rs600_set_safe_registers(rdev);
+
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = rs600_startup(rdev);
        if (r) {
                /* Somethings want wront with the accel init stop accel */
index a9049ed1a519db77daa9360127e758719a8919c8..4f24a0fa8c82f1c4b9ac790073be0a3e69fb00eb 100644 (file)
@@ -621,6 +621,12 @@ static int rs690_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        rs600_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -630,15 +636,21 @@ static int rs690_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_ib_init(rdev);
+
+       r = r600_audio_init(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+               dev_err(rdev->dev, "failed initializing audio\n");
                return r;
        }
 
-       r = r600_audio_init(rdev);
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r100_ib_test(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing audio\n");
+               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
                return r;
        }
 
@@ -663,11 +675,14 @@ int rs690_resume(struct radeon_device *rdev)
        rv515_clock_startup(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
+
+       rdev->accel_working = true;
        return rs690_startup(rdev);
 }
 
 int rs690_suspend(struct radeon_device *rdev)
 {
+       radeon_ib_pool_suspend(rdev);
        r600_audio_fini(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
@@ -749,7 +764,14 @@ int rs690_init(struct radeon_device *rdev)
        if (r)
                return r;
        rs600_set_safe_registers(rdev);
+
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = rs690_startup(rdev);
        if (r) {
                /* Somethings want wront with the accel init stop accel */
index 6613ee9ecca331dbcf9db9295667ca70c0817082..880637fd1946c5b7f717487fca8dad37eddb0b42 100644 (file)
@@ -55,44 +55,45 @@ void rv515_debugfs(struct radeon_device *rdev)
 
 void rv515_ring_start(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
 
-       r = radeon_ring_lock(rdev, 64);
+       r = radeon_ring_lock(rdev, ring, 64);
        if (r) {
                return;
        }
-       radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
+       radeon_ring_write(ring,
                          ISYNC_ANY2D_IDLE3D |
                          ISYNC_ANY3D_IDLE2D |
                          ISYNC_WAIT_IDLEGUI |
                          ISYNC_CPSCRATCH_IDLEGUI);
-       radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
-       radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
-       radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
-       radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
-       radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
-       radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
-       radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
-       radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
-       radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
-       radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
-       radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
-       radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
-       radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+       radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+       radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+       radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+       radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
+       radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
+       radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+       radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+       radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+       radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+       radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+       radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+       radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
+       radeon_ring_write(ring,
                          ((6 << MS_X0_SHIFT) |
                           (6 << MS_Y0_SHIFT) |
                           (6 << MS_X1_SHIFT) |
@@ -101,8 +102,8 @@ void rv515_ring_start(struct radeon_device *rdev)
                           (6 << MS_Y2_SHIFT) |
                           (6 << MSBD0_Y_SHIFT) |
                           (6 << MSBD0_X_SHIFT)));
-       radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
+       radeon_ring_write(ring,
                          ((6 << MS_X3_SHIFT) |
                           (6 << MS_Y3_SHIFT) |
                           (6 << MS_X4_SHIFT) |
@@ -110,15 +111,15 @@ void rv515_ring_start(struct radeon_device *rdev)
                           (6 << MS_X5_SHIFT) |
                           (6 << MS_Y5_SHIFT) |
                           (6 << MSBD1_SHIFT)));
-       radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0));
-       radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
-       radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0));
-       radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
-       radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0));
-       radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
-       radeon_ring_write(rdev, PACKET0(0x20C8, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
+       radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
+       radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
+       radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
+       radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
+       radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
+       radeon_ring_write(ring, PACKET0(0x20C8, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_unlock_commit(rdev, ring);
 }
 
 int rv515_mc_wait_for_idle(struct radeon_device *rdev)
@@ -392,6 +393,12 @@ static int rv515_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        rs600_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -401,9 +408,15 @@ static int rv515_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_ib_init(rdev);
+
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r100_ib_test(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
                return r;
        }
        return 0;
@@ -428,6 +441,8 @@ int rv515_resume(struct radeon_device *rdev)
        rv515_clock_startup(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
+
+       rdev->accel_working = true;
        return rv515_startup(rdev);
 }
 
@@ -524,7 +539,14 @@ int rv515_init(struct radeon_device *rdev)
        if (r)
                return r;
        rv515_set_safe_registers(rdev);
+
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = rv515_startup(rdev);
        if (r) {
                /* Somethings want wront with the accel init stop accel */
index 23ae1c60ab3d97b8a1576c567054b736fd70eb93..a1668b659ddd3e3f8ee94c97de6474468c71d3dd 100644 (file)
@@ -357,7 +357,7 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
 void r700_cp_fini(struct radeon_device *rdev)
 {
        r700_cp_stop(rdev);
-       radeon_ring_fini(rdev);
+       radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
 }
 
 /*
@@ -1043,6 +1043,7 @@ int rv770_mc_init(struct radeon_device *rdev)
 
 static int rv770_startup(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
 
        /* enable pcie gen2 link */
@@ -1082,6 +1083,12 @@ static int rv770_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -1091,7 +1098,9 @@ static int rv770_startup(struct radeon_device *rdev)
        }
        r600_irq_set(rdev);
 
-       r = radeon_ring_init(rdev, rdev->cp.ring_size);
+       r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+                            R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+                            0, 0xfffff, RADEON_CP_PACKET2);
        if (r)
                return r;
        r = rv770_cp_load_microcode(rdev);
@@ -1101,6 +1110,17 @@ static int rv770_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "IB test failed (%d).\n", r);
+               rdev->accel_working = false;
+               return r;
+       }
+
        return 0;
 }
 
@@ -1115,18 +1135,13 @@ int rv770_resume(struct radeon_device *rdev)
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
 
+       rdev->accel_working = true;
        r = rv770_startup(rdev);
        if (r) {
                DRM_ERROR("r600 startup failed on resume\n");
                return r;
        }
 
-       r = r600_ib_test(rdev);
-       if (r) {
-               DRM_ERROR("radeon: failed testing IB (%d).\n", r);
-               return r;
-       }
-
        r = r600_audio_init(rdev);
        if (r) {
                dev_err(rdev->dev, "radeon: audio init failed\n");
@@ -1140,13 +1155,14 @@ int rv770_resume(struct radeon_device *rdev)
 int rv770_suspend(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
+       radeon_ib_pool_suspend(rdev);
+       r600_blit_suspend(rdev);
        /* FIXME: we should wait for ring to be empty */
        r700_cp_stop(rdev);
-       rdev->cp.ready = false;
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
        r600_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        rv770_pcie_gart_disable(rdev);
-       r600_blit_suspend(rdev);
 
        return 0;
 }
@@ -1215,8 +1231,8 @@ int rv770_init(struct radeon_device *rdev)
        if (r)
                return r;
 
-       rdev->cp.ring_obj = NULL;
-       r600_ring_init(rdev, 1024 * 1024);
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+       r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
@@ -1225,30 +1241,24 @@ int rv770_init(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = rv770_startup(rdev);
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r700_cp_fini(rdev);
                r600_irq_fini(rdev);
                radeon_wb_fini(rdev);
+               r100_ib_fini(rdev);
                radeon_irq_kms_fini(rdev);
                rv770_pcie_gart_fini(rdev);
                rdev->accel_working = false;
        }
-       if (rdev->accel_working) {
-               r = radeon_ib_pool_init(rdev);
-               if (r) {
-                       dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
-                       rdev->accel_working = false;
-               } else {
-                       r = r600_ib_test(rdev);
-                       if (r) {
-                               dev_err(rdev->dev, "IB test failed (%d).\n", r);
-                               rdev->accel_working = false;
-                       }
-               }
-       }
 
        r = r600_audio_init(rdev);
        if (r) {
@@ -1265,11 +1275,12 @@ void rv770_fini(struct radeon_device *rdev)
        r700_cp_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
-       radeon_ib_pool_fini(rdev);
+       r100_ib_fini(rdev);
        radeon_irq_kms_fini(rdev);
        rv770_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
+       radeon_semaphore_driver_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_agp_fini(rdev);
        radeon_bo_fini(rdev);
index 5468d1cd3296102560aa1653173233c637494bad..89afe0b83643282ced1abf4ed62660c510008423 100644 (file)
@@ -35,6 +35,17 @@ static struct pci_device_id pciidlist[] = {
        savage_PCI_IDS
 };
 
+static const struct file_operations savage_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
@@ -46,17 +57,7 @@ static struct drm_driver driver = {
        .reclaim_buffers = savage_reclaim_buffers,
        .ioctls = savage_ioctls,
        .dma_ioctl = savage_bci_buffers,
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = drm_ioctl,
-                .mmap = drm_mmap,
-                .poll = drm_poll,
-                .fasync = drm_fasync,
-                .llseek = noop_llseek,
-       },
-
+       .fops = &savage_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
index a9c5716bea4eb1f3a155c2d1d15cda3480c0f51d..06da063ece2e59681f3c51110006e13e7e58ae0f 100644 (file)
@@ -48,9 +48,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
 
        dev->dev_private = (void *)dev_priv;
        dev_priv->chipset = chipset;
-       ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
-       if (ret)
-               kfree(dev_priv);
+       idr_init(&dev->object_name_idr);
 
        return ret;
 }
@@ -59,32 +57,60 @@ static int sis_driver_unload(struct drm_device *dev)
 {
        drm_sis_private_t *dev_priv = dev->dev_private;
 
-       drm_sman_takedown(&dev_priv->sman);
+       idr_remove_all(&dev_priv->object_idr);
+       idr_destroy(&dev_priv->object_idr);
+
        kfree(dev_priv);
 
        return 0;
 }
 
+static const struct file_operations sis_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .llseek = noop_llseek,
+};
+
+static int sis_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+       struct sis_file_private *file_priv;
+
+       DRM_DEBUG_DRIVER("\n");
+       file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+       if (!file_priv)
+               return -ENOMEM;
+
+       file->driver_priv = file_priv;
+
+       INIT_LIST_HEAD(&file_priv->obj_list);
+
+       return 0;
+}
+
+void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+       struct sis_file_private *file_priv = file->driver_priv;
+
+       kfree(file_priv);
+}
+
 static struct drm_driver driver = {
        .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
        .load = sis_driver_load,
        .unload = sis_driver_unload,
+       .open = sis_driver_open,
+       .postclose = sis_driver_postclose,
        .dma_quiescent = sis_idle,
        .reclaim_buffers = NULL,
        .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
        .lastclose = sis_lastclose,
        .ioctls = sis_ioctls,
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = drm_ioctl,
-                .mmap = drm_mmap,
-                .poll = drm_poll,
-                .fasync = drm_fasync,
-                .llseek = noop_llseek,
-       },
-
+       .fops = &sis_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
index 194303c177ad65375af2801200e219f69894d3c6..573758b2d2d66caa1118ab4cb00b2ecc77c1fb52 100644 (file)
@@ -44,7 +44,7 @@ enum sis_family {
        SIS_CHIP_315 = 1,
 };
 
-#include "drm_sman.h"
+#include "drm_mm.h"
 
 
 #define SIS_BASE (dev_priv->mmio)
@@ -54,12 +54,15 @@ enum sis_family {
 typedef struct drm_sis_private {
        drm_local_map_t *mmio;
        unsigned int idle_fault;
-       struct drm_sman sman;
        unsigned int chipset;
        int vram_initialized;
        int agp_initialized;
        unsigned long vram_offset;
        unsigned long agp_offset;
+       struct drm_mm vram_mm;
+       struct drm_mm agp_mm;
+       /** Mapping of userspace keys to mm objects */
+       struct idr object_idr;
 } drm_sis_private_t;
 
 extern int sis_idle(struct drm_device *dev);
index 7fe2b63412ce96aa2c0829127c25261a7bb7aa6d..dd4a316c3d746b974c462294817b0321c1629af2 100644 (file)
 #define AGP_TYPE 1
 
 
+struct sis_memblock {
+       struct drm_mm_node mm_node;
+       struct sis_memreq req;
+       struct list_head owner_list;
+};
+
 #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
 /* fb management via fb device */
 
 #define SIS_MM_ALIGN_SHIFT 0
 #define SIS_MM_ALIGN_MASK 0
 
-static void *sis_sman_mm_allocate(void *private, unsigned long size,
-                                 unsigned alignment)
-{
-       struct sis_memreq req;
-
-       req.size = size;
-       sis_malloc(&req);
-       if (req.size == 0)
-               return NULL;
-       else
-               return (void *)(unsigned long)~req.offset;
-}
-
-static void sis_sman_mm_free(void *private, void *ref)
-{
-       sis_free(~((unsigned long)ref));
-}
-
-static void sis_sman_mm_destroy(void *private)
-{
-       ;
-}
-
-static unsigned long sis_sman_mm_offset(void *private, void *ref)
-{
-       return ~((unsigned long)ref);
-}
-
 #else /* CONFIG_FB_SIS[_MODULE] */
 
 #define SIS_MM_ALIGN_SHIFT 4
@@ -86,30 +64,11 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
 {
        drm_sis_private_t *dev_priv = dev->dev_private;
        drm_sis_fb_t *fb = data;
-       int ret;
 
        mutex_lock(&dev->struct_mutex);
-#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
-       {
-               struct drm_sman_mm sman_mm;
-               sman_mm.private = (void *)0xFFFFFFFF;
-               sman_mm.allocate = sis_sman_mm_allocate;
-               sman_mm.free = sis_sman_mm_free;
-               sman_mm.destroy = sis_sman_mm_destroy;
-               sman_mm.offset = sis_sman_mm_offset;
-               ret =
-                   drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm);
-       }
-#else
-       ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
-                                fb->size >> SIS_MM_ALIGN_SHIFT);
-#endif
-
-       if (ret) {
-               DRM_ERROR("VRAM memory manager initialisation error\n");
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
+       /* Unconditionally init the drm_mm, even though we don't use it when the
+        * fb sis driver is available - make cleanup easier. */
+       drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> SIS_MM_ALIGN_SHIFT);
 
        dev_priv->vram_initialized = 1;
        dev_priv->vram_offset = fb->offset;
@@ -120,13 +79,15 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
        return 0;
 }
 
-static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
+static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
                         void *data, int pool)
 {
        drm_sis_private_t *dev_priv = dev->dev_private;
        drm_sis_mem_t *mem = data;
-       int retval = 0;
-       struct drm_memblock_item *item;
+       int retval = 0, user_key;
+       struct sis_memblock *item;
+       struct sis_file_private *file_priv = file->driver_priv;
+       unsigned long offset;
 
        mutex_lock(&dev->struct_mutex);
 
@@ -138,25 +99,68 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
                return -EINVAL;
        }
 
-       mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
-       item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0,
-                             (unsigned long)file_priv);
+       item = kzalloc(sizeof(*item), GFP_KERNEL);
+       if (!item) {
+               retval = -ENOMEM;
+               goto fail_alloc;
+       }
 
-       mutex_unlock(&dev->struct_mutex);
-       if (item) {
-               mem->offset = ((pool == 0) ?
-                             dev_priv->vram_offset : dev_priv->agp_offset) +
-                   (item->mm->
-                    offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
-               mem->free = item->user_hash.key;
-               mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
+       mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
+       if (pool == AGP_TYPE) {
+               retval = drm_mm_insert_node(&dev_priv->agp_mm,
+                                           &item->mm_node,
+                                           mem->size, 0);
+               offset = item->mm_node.start;
        } else {
-               mem->offset = 0;
-               mem->size = 0;
-               mem->free = 0;
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+               item->req.size = mem->size;
+               sis_malloc(&item->req);
+               if (item->req.size == 0)
+                       retval = -ENOMEM;
+               offset = item->req.offset;
+#else
+               retval = drm_mm_insert_node(&dev_priv->vram_mm,
+                                           &item->mm_node,
+                                           mem->size, 0);
+               offset = item->mm_node.start;
+#endif
+       }
+       if (retval)
+               goto fail_alloc;
+
+again:
+       if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
                retval = -ENOMEM;
+               goto fail_idr;
        }
 
+       retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
+       if (retval == -EAGAIN)
+               goto again;
+       if (retval)
+               goto fail_idr;
+
+       list_add(&item->owner_list, &file_priv->obj_list);
+       mutex_unlock(&dev->struct_mutex);
+
+       mem->offset = ((pool == 0) ?
+                     dev_priv->vram_offset : dev_priv->agp_offset) +
+           (offset << SIS_MM_ALIGN_SHIFT);
+       mem->free = user_key;
+       mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
+
+       return 0;
+
+fail_idr:
+       drm_mm_remove_node(&item->mm_node);
+fail_alloc:
+       kfree(item);
+       mutex_unlock(&dev->struct_mutex);
+
+       mem->offset = 0;
+       mem->size = 0;
+       mem->free = 0;
+
        DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
                  mem->offset);
 
@@ -167,14 +171,28 @@ static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *fil
 {
        drm_sis_private_t *dev_priv = dev->dev_private;
        drm_sis_mem_t *mem = data;
-       int ret;
+       struct sis_memblock *obj;
 
        mutex_lock(&dev->struct_mutex);
-       ret = drm_sman_free_key(&dev_priv->sman, mem->free);
+       obj = idr_find(&dev_priv->object_idr, mem->free);
+       if (obj == NULL) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       idr_remove(&dev_priv->object_idr, mem->free);
+       list_del(&obj->owner_list);
+       if (drm_mm_node_allocated(&obj->mm_node))
+               drm_mm_remove_node(&obj->mm_node);
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+       else
+               sis_free(obj->req.offset);
+#endif
+       kfree(obj);
        mutex_unlock(&dev->struct_mutex);
        DRM_DEBUG("free = 0x%lx\n", mem->free);
 
-       return ret;
+       return 0;
 }
 
 static int sis_fb_alloc(struct drm_device *dev, void *data,
@@ -188,18 +206,10 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
 {
        drm_sis_private_t *dev_priv = dev->dev_private;
        drm_sis_agp_t *agp = data;
-       int ret;
        dev_priv = dev->dev_private;
 
        mutex_lock(&dev->struct_mutex);
-       ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
-                                agp->size >> SIS_MM_ALIGN_SHIFT);
-
-       if (ret) {
-               DRM_ERROR("AGP memory manager initialisation error\n");
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
+       drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> SIS_MM_ALIGN_SHIFT);
 
        dev_priv->agp_initialized = 1;
        dev_priv->agp_offset = agp->offset;
@@ -293,20 +303,26 @@ void sis_lastclose(struct drm_device *dev)
                return;
 
        mutex_lock(&dev->struct_mutex);
-       drm_sman_cleanup(&dev_priv->sman);
-       dev_priv->vram_initialized = 0;
-       dev_priv->agp_initialized = 0;
+       if (dev_priv->vram_initialized) {
+               drm_mm_takedown(&dev_priv->vram_mm);
+               dev_priv->vram_initialized = 0;
+       }
+       if (dev_priv->agp_initialized) {
+               drm_mm_takedown(&dev_priv->agp_mm);
+               dev_priv->agp_initialized = 0;
+       }
        dev_priv->mmio = NULL;
        mutex_unlock(&dev->struct_mutex);
 }
 
 void sis_reclaim_buffers_locked(struct drm_device *dev,
-                               struct drm_file *file_priv)
+                               struct drm_file *file)
 {
-       drm_sis_private_t *dev_priv = dev->dev_private;
+       struct sis_file_private *file_priv = file->driver_priv;
+       struct sis_memblock *entry, *next;
 
        mutex_lock(&dev->struct_mutex);
-       if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
+       if (list_empty(&file_priv->obj_list)) {
                mutex_unlock(&dev->struct_mutex);
                return;
        }
@@ -314,7 +330,18 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
        if (dev->driver->dma_quiescent)
                dev->driver->dma_quiescent(dev);
 
-       drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
+
+       list_for_each_entry_safe(entry, next, &file_priv->obj_list,
+                                owner_list) {
+               list_del(&entry->owner_list);
+               if (drm_mm_node_allocated(&entry->mm_node))
+                       drm_mm_remove_node(&entry->mm_node);
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+               else
+                       sis_free(entry->req.offset);
+#endif
+               kfree(entry);
+       }
        mutex_unlock(&dev->struct_mutex);
        return;
 }
index cda29911e332edb7d4dc5ed1d024c597fb6cb2bc..1613c78544c0e8b9bf4aa63501f0dece2d69d003 100644 (file)
@@ -41,20 +41,21 @@ static struct pci_device_id pciidlist[] = {
        tdfx_PCI_IDS
 };
 
+static const struct file_operations tdfx_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        .driver_features = DRIVER_USE_MTRR,
        .reclaim_buffers = drm_core_reclaim_buffers,
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = drm_ioctl,
-                .mmap = drm_mmap,
-                .poll = drm_poll,
-                .fasync = drm_fasync,
-                .llseek = noop_llseek,
-       },
-
+       .fops = &tdfx_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
index f3cf6f02c9970da645280df0f12622a12a3e0d7e..b2b33dde2afb06a1c78ffcbeb2e826a28ec0bb58 100644 (file)
@@ -7,4 +7,8 @@ ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
        ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
        ttm_bo_manager.o
 
+ifeq ($(CONFIG_SWIOTLB),y)
+ttm-y += ttm_page_alloc_dma.o
+endif
+
 obj-$(CONFIG_DRM_TTM) += ttm.o
index 1c4a72f681c16c89c9ebf0378330910654fa713d..747c1413fc955b2bd56a583a0a19fcf07ae9eac6 100644 (file)
@@ -31,6 +31,7 @@
 
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_page_alloc.h"
 #ifdef TTM_HAS_AGP
 #include "ttm/ttm_placement.h"
 #include <linux/agp_backend.h>
 #include <asm/agp.h>
 
 struct ttm_agp_backend {
-       struct ttm_backend backend;
+       struct ttm_tt ttm;
        struct agp_memory *mem;
        struct agp_bridge_data *bridge;
 };
 
-static int ttm_agp_populate(struct ttm_backend *backend,
-                           unsigned long num_pages, struct page **pages,
-                           struct page *dummy_read_page,
-                           dma_addr_t *dma_addrs)
+static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 {
-       struct ttm_agp_backend *agp_be =
-           container_of(backend, struct ttm_agp_backend, backend);
-       struct page **cur_page, **last_page = pages + num_pages;
+       struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+       struct drm_mm_node *node = bo_mem->mm_node;
        struct agp_memory *mem;
+       int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+       unsigned i;
 
-       mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
+       mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
        if (unlikely(mem == NULL))
                return -ENOMEM;
 
        mem->page_count = 0;
-       for (cur_page = pages; cur_page < last_page; ++cur_page) {
-               struct page *page = *cur_page;
+       for (i = 0; i < ttm->num_pages; i++) {
+               struct page *page = ttm->pages[i];
+
                if (!page)
-                       page = dummy_read_page;
+                       page = ttm->dummy_read_page;
 
                mem->pages[mem->page_count++] = page;
        }
        agp_be->mem = mem;
-       return 0;
-}
-
-static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
-{
-       struct ttm_agp_backend *agp_be =
-           container_of(backend, struct ttm_agp_backend, backend);
-       struct drm_mm_node *node = bo_mem->mm_node;
-       struct agp_memory *mem = agp_be->mem;
-       int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
-       int ret;
 
        mem->is_flushed = 1;
        mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
@@ -90,50 +79,39 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
        return ret;
 }
 
-static int ttm_agp_unbind(struct ttm_backend *backend)
+static int ttm_agp_unbind(struct ttm_tt *ttm)
 {
-       struct ttm_agp_backend *agp_be =
-           container_of(backend, struct ttm_agp_backend, backend);
+       struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
 
-       if (agp_be->mem->is_bound)
-               return agp_unbind_memory(agp_be->mem);
-       else
-               return 0;
-}
-
-static void ttm_agp_clear(struct ttm_backend *backend)
-{
-       struct ttm_agp_backend *agp_be =
-           container_of(backend, struct ttm_agp_backend, backend);
-       struct agp_memory *mem = agp_be->mem;
-
-       if (mem) {
-               ttm_agp_unbind(backend);
-               agp_free_memory(mem);
+       if (agp_be->mem) {
+               if (agp_be->mem->is_bound)
+                       return agp_unbind_memory(agp_be->mem);
+               agp_free_memory(agp_be->mem);
+               agp_be->mem = NULL;
        }
-       agp_be->mem = NULL;
+       return 0;
 }
 
-static void ttm_agp_destroy(struct ttm_backend *backend)
+static void ttm_agp_destroy(struct ttm_tt *ttm)
 {
-       struct ttm_agp_backend *agp_be =
-           container_of(backend, struct ttm_agp_backend, backend);
+       struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
 
        if (agp_be->mem)
-               ttm_agp_clear(backend);
+               ttm_agp_unbind(ttm);
+       ttm_tt_fini(ttm);
        kfree(agp_be);
 }
 
 static struct ttm_backend_func ttm_agp_func = {
-       .populate = ttm_agp_populate,
-       .clear = ttm_agp_clear,
        .bind = ttm_agp_bind,
        .unbind = ttm_agp_unbind,
        .destroy = ttm_agp_destroy,
 };
 
-struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
-                                        struct agp_bridge_data *bridge)
+struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+                                struct agp_bridge_data *bridge,
+                                unsigned long size, uint32_t page_flags,
+                                struct page *dummy_read_page)
 {
        struct ttm_agp_backend *agp_be;
 
@@ -143,10 +121,29 @@ struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
 
        agp_be->mem = NULL;
        agp_be->bridge = bridge;
-       agp_be->backend.func = &ttm_agp_func;
-       agp_be->backend.bdev = bdev;
-       return &agp_be->backend;
+       agp_be->ttm.func = &ttm_agp_func;
+
+       if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+               return NULL;
+       }
+
+       return &agp_be->ttm;
+}
+EXPORT_SYMBOL(ttm_agp_tt_create);
+
+int ttm_agp_tt_populate(struct ttm_tt *ttm)
+{
+       if (ttm->state != tt_unpopulated)
+               return 0;
+
+       return ttm_pool_populate(ttm);
+}
+EXPORT_SYMBOL(ttm_agp_tt_populate);
+
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
+{
+       ttm_pool_unpopulate(ttm);
 }
-EXPORT_SYMBOL(ttm_agp_backend_init);
+EXPORT_SYMBOL(ttm_agp_tt_unpopulate);
 
 #endif
index 0bb0f5f713e6cf9166a1b91eb19ab81b85206716..2f0eab66ece6c1eb9eeffaae72a517727a0257ee 100644 (file)
@@ -137,6 +137,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
        struct ttm_buffer_object *bo =
            container_of(list_kref, struct ttm_buffer_object, list_kref);
        struct ttm_bo_device *bdev = bo->bdev;
+       size_t acc_size = bo->acc_size;
 
        BUG_ON(atomic_read(&bo->list_kref.refcount));
        BUG_ON(atomic_read(&bo->kref.refcount));
@@ -152,9 +153,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
        if (bo->destroy)
                bo->destroy(bo);
        else {
-               ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
                kfree(bo);
        }
+       ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
 }
 
 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
@@ -337,27 +338,11 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
                if (zero_alloc)
                        page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
        case ttm_bo_type_kernel:
-               bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
-                                       page_flags, glob->dummy_read_page);
+               bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+                                                     page_flags, glob->dummy_read_page);
                if (unlikely(bo->ttm == NULL))
                        ret = -ENOMEM;
                break;
-       case ttm_bo_type_user:
-               bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
-                                       page_flags | TTM_PAGE_FLAG_USER,
-                                       glob->dummy_read_page);
-               if (unlikely(bo->ttm == NULL)) {
-                       ret = -ENOMEM;
-                       break;
-               }
-
-               ret = ttm_tt_set_user(bo->ttm, current,
-                                     bo->buffer_start, bo->num_pages);
-               if (unlikely(ret != 0)) {
-                       ttm_tt_destroy(bo->ttm);
-                       bo->ttm = NULL;
-               }
-               break;
        default:
                printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
                ret = -EINVAL;
@@ -419,9 +404,6 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
                }
        }
 
-       if (bdev->driver->move_notify)
-               bdev->driver->move_notify(bo, mem);
-
        if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
            !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
                ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
@@ -434,6 +416,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
        if (ret)
                goto out_err;
 
+       if (bdev->driver->move_notify)
+               bdev->driver->move_notify(bo, mem);
+
 moved:
        if (bo->evicted) {
                ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
@@ -472,6 +457,9 @@ out_err:
 
 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
 {
+       if (bo->bdev->driver->move_notify)
+               bo->bdev->driver->move_notify(bo, NULL);
+
        if (bo->ttm) {
                ttm_tt_unbind(bo->ttm);
                ttm_tt_destroy(bo->ttm);
@@ -913,16 +901,12 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
 }
 
 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
-                                bool disallow_fixed,
                                 uint32_t mem_type,
                                 uint32_t proposed_placement,
                                 uint32_t *masked_placement)
 {
        uint32_t cur_flags = ttm_bo_type_flags(mem_type);
 
-       if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
-               return false;
-
        if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
                return false;
 
@@ -967,7 +951,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                man = &bdev->man[mem_type];
 
                type_ok = ttm_bo_mt_compatible(man,
-                                               bo->type == ttm_bo_type_user,
                                                mem_type,
                                                placement->placement[i],
                                                &cur_flags);
@@ -1015,7 +998,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                if (!man->has_type)
                        continue;
                if (!ttm_bo_mt_compatible(man,
-                                               bo->type == ttm_bo_type_user,
                                                mem_type,
                                                placement->busy_placement[i],
                                                &cur_flags))
@@ -1185,6 +1167,17 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
 {
        int ret = 0;
        unsigned long num_pages;
+       struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+
+       ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+       if (ret) {
+               printk(KERN_ERR TTM_PFX "Out of kernel memory.\n");
+               if (destroy)
+                       (*destroy)(bo);
+               else
+                       kfree(bo);
+               return -ENOMEM;
+       }
 
        size += buffer_start & ~PAGE_MASK;
        num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1255,14 +1248,34 @@ out_err:
 }
 EXPORT_SYMBOL(ttm_bo_init);
 
-static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
-                                unsigned long num_pages)
+size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
+                      unsigned long bo_size,
+                      unsigned struct_size)
 {
-       size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
-           PAGE_MASK;
+       unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+       size_t size = 0;
 
-       return glob->ttm_bo_size + 2 * page_array_size;
+       size += ttm_round_pot(struct_size);
+       size += PAGE_ALIGN(npages * sizeof(void *));
+       size += ttm_round_pot(sizeof(struct ttm_tt));
+       return size;
 }
+EXPORT_SYMBOL(ttm_bo_acc_size);
+
+size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
+                          unsigned long bo_size,
+                          unsigned struct_size)
+{
+       unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+       size_t size = 0;
+
+       size += ttm_round_pot(struct_size);
+       size += PAGE_ALIGN(npages * sizeof(void *));
+       size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
+       size += ttm_round_pot(sizeof(struct ttm_dma_tt));
+       return size;
+}
+EXPORT_SYMBOL(ttm_bo_dma_acc_size);
 
 int ttm_bo_create(struct ttm_bo_device *bdev,
                        unsigned long size,
@@ -1276,10 +1289,10 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
 {
        struct ttm_buffer_object *bo;
        struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+       size_t acc_size;
        int ret;
 
-       size_t acc_size =
-           ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+       acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
        ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
        if (unlikely(ret != 0))
                return ret;
@@ -1465,13 +1478,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
                goto out_no_shrink;
        }
 
-       glob->ttm_bo_extra_size =
-               ttm_round_pot(sizeof(struct ttm_tt)) +
-               ttm_round_pot(sizeof(struct ttm_backend));
-
-       glob->ttm_bo_size = glob->ttm_bo_extra_size +
-               ttm_round_pot(sizeof(struct ttm_buffer_object));
-
        atomic_set(&glob->bo_count, 0);
 
        ret = kobject_init_and_add(
index 082fcaea583fb08303d1baab9b68125a1cf31079..f8187ead7b373336c21e603caa448bad99d50b47 100644 (file)
@@ -244,7 +244,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
                                unsigned long page,
                                pgprot_t prot)
 {
-       struct page *d = ttm_tt_get_page(ttm, page);
+       struct page *d = ttm->pages[page];
        void *dst;
 
        if (!d)
@@ -281,7 +281,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
                                unsigned long page,
                                pgprot_t prot)
 {
-       struct page *s = ttm_tt_get_page(ttm, page);
+       struct page *s = ttm->pages[page];
        void *src;
 
        if (!s)
@@ -342,6 +342,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
        if (old_iomap == NULL && ttm == NULL)
                goto out2;
 
+       if (ttm->state == tt_unpopulated) {
+               ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+               if (ret)
+                       goto out1;
+       }
+
        add = 0;
        dir = 1;
 
@@ -439,6 +445,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        kref_init(&fbo->list_kref);
        kref_init(&fbo->kref);
        fbo->destroy = &ttm_transfered_destroy;
+       fbo->acc_size = 0;
 
        *new_obj = fbo;
        return 0;
@@ -502,10 +509,16 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
 {
        struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
        struct ttm_tt *ttm = bo->ttm;
-       struct page *d;
-       int i;
+       int ret;
 
        BUG_ON(!ttm);
+
+       if (ttm->state == tt_unpopulated) {
+               ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+               if (ret)
+                       return ret;
+       }
+
        if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
                /*
                 * We're mapping a single page, and the desired
@@ -513,18 +526,9 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
                 */
 
                map->bo_kmap_type = ttm_bo_map_kmap;
-               map->page = ttm_tt_get_page(ttm, start_page);
+               map->page = ttm->pages[start_page];
                map->virtual = kmap(map->page);
        } else {
-           /*
-            * Populate the part we're mapping;
-            */
-               for (i = start_page; i < start_page + num_pages; ++i) {
-                       d = ttm_tt_get_page(ttm, i);
-                       if (!d)
-                               return -ENOMEM;
-               }
-
                /*
                 * We need to use vmap to get the desired page protection
                 * or to make the buffer object look contiguous.
index 221b924acebe273fb8b19080cd5a90b026256e73..54412848de88e5d7506872750c8187e10841b99e 100644 (file)
@@ -174,18 +174,23 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
                    vm_get_page_prot(vma->vm_flags) :
                    ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
+
+               /* Allocate all page at once, most common usage */
+               if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
+                       retval = VM_FAULT_OOM;
+                       goto out_io_unlock;
+               }
        }
 
        /*
         * Speculatively prefault a number of pages. Only error on
         * first page.
         */
-
        for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
                if (bo->mem.bus.is_iomem)
                        pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
                else {
-                       page = ttm_tt_get_page(ttm, page_offset);
+                       page = ttm->pages[page_offset];
                        if (unlikely(!page && i == 0)) {
                                retval = VM_FAULT_OOM;
                                goto out_io_unlock;
index e70ddd82dc022e38c42b10bb4bce1b60ff511f04..9eba8e9a4e9c6f6c35bb60da9da9d4cf1a6fc9fd 100644 (file)
@@ -395,6 +395,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
                       zone->name, (unsigned long long) zone->max_mem >> 10);
        }
        ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+       ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
        return 0;
 out_no_zone:
        ttm_mem_global_release(glob);
@@ -409,6 +410,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
 
        /* let the page allocator first stop the shrink work. */
        ttm_page_alloc_fini();
+       ttm_dma_page_alloc_fini();
 
        flush_workqueue(glob->swap_queue);
        destroy_workqueue(glob->swap_queue);
index 727e93daac3b04ba387ecd5093a468e4889654e1..499debda791e9534f05b397e5a400ef69c97a293 100644 (file)
@@ -619,8 +619,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
  * @return count of pages still required to fulfill the request.
  */
 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
-               struct list_head *pages, int ttm_flags,
-               enum ttm_caching_state cstate, unsigned count)
+                                       struct list_head *pages,
+                                       int ttm_flags,
+                                       enum ttm_caching_state cstate,
+                                       unsigned count)
 {
        unsigned long irq_flags;
        struct list_head *p;
@@ -660,17 +662,67 @@ out:
        return count;
 }
 
+/* Put all pages in pages list to correct pool to wait for reuse */
+static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
+                         enum ttm_caching_state cstate)
+{
+       unsigned long irq_flags;
+       struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+       unsigned i;
+
+       if (pool == NULL) {
+               /* No pool for this memory type so free the pages */
+               for (i = 0; i < npages; i++) {
+                       if (pages[i]) {
+                               if (page_count(pages[i]) != 1)
+                                       printk(KERN_ERR TTM_PFX
+                                              "Erroneous page count. "
+                                              "Leaking pages.\n");
+                               __free_page(pages[i]);
+                               pages[i] = NULL;
+                       }
+               }
+               return;
+       }
+
+       spin_lock_irqsave(&pool->lock, irq_flags);
+       for (i = 0; i < npages; i++) {
+               if (pages[i]) {
+                       if (page_count(pages[i]) != 1)
+                               printk(KERN_ERR TTM_PFX
+                                      "Erroneous page count. "
+                                      "Leaking pages.\n");
+                       list_add_tail(&pages[i]->lru, &pool->list);
+                       pages[i] = NULL;
+                       pool->npages++;
+               }
+       }
+       /* Check that we don't go over the pool limit */
+       npages = 0;
+       if (pool->npages > _manager->options.max_size) {
+               npages = pool->npages - _manager->options.max_size;
+               /* free at least NUM_PAGES_TO_ALLOC number of pages
+                * to reduce calls to set_memory_wb */
+               if (npages < NUM_PAGES_TO_ALLOC)
+                       npages = NUM_PAGES_TO_ALLOC;
+       }
+       spin_unlock_irqrestore(&pool->lock, irq_flags);
+       if (npages)
+               ttm_page_pool_free(pool, npages);
+}
+
 /*
  * On success pages list will hold count number of correctly
  * cached pages.
  */
-int ttm_get_pages(struct list_head *pages, int flags,
-                 enum ttm_caching_state cstate, unsigned count,
-                 dma_addr_t *dma_address)
+static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
+                        enum ttm_caching_state cstate)
 {
        struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+       struct list_head plist;
        struct page *p = NULL;
        gfp_t gfp_flags = GFP_USER;
+       unsigned count;
        int r;
 
        /* set zero flag for page allocation if required */
@@ -684,7 +736,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
                else
                        gfp_flags |= GFP_HIGHUSER;
 
-               for (r = 0; r < count; ++r) {
+               for (r = 0; r < npages; ++r) {
                        p = alloc_page(gfp_flags);
                        if (!p) {
 
@@ -693,87 +745,53 @@ int ttm_get_pages(struct list_head *pages, int flags,
                                return -ENOMEM;
                        }
 
-                       list_add(&p->lru, pages);
+                       pages[r] = p;
                }
                return 0;
        }
 
-
        /* combine zero flag to pool flags */
        gfp_flags |= pool->gfp_flags;
 
        /* First we take pages from the pool */
-       count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
+       INIT_LIST_HEAD(&plist);
+       npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
+       count = 0;
+       list_for_each_entry(p, &plist, lru) {
+               pages[count++] = p;
+       }
 
        /* clear the pages coming from the pool if requested */
        if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
-               list_for_each_entry(p, pages, lru) {
+               list_for_each_entry(p, &plist, lru) {
                        clear_page(page_address(p));
                }
        }
 
        /* If pool didn't have enough pages allocate new one. */
-       if (count > 0) {
+       if (npages > 0) {
                /* ttm_alloc_new_pages doesn't reference pool so we can run
                 * multiple requests in parallel.
                 **/
-               r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
+               INIT_LIST_HEAD(&plist);
+               r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
+               list_for_each_entry(p, &plist, lru) {
+                       pages[count++] = p;
+               }
                if (r) {
                        /* If there is any pages in the list put them back to
                         * the pool. */
                        printk(KERN_ERR TTM_PFX
                               "Failed to allocate extra pages "
                               "for large request.");
-                       ttm_put_pages(pages, 0, flags, cstate, NULL);
+                       ttm_put_pages(pages, count, flags, cstate);
                        return r;
                }
        }
 
-
        return 0;
 }
 
-/* Put all pages in pages list to correct pool to wait for reuse */
-void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
-                  enum ttm_caching_state cstate, dma_addr_t *dma_address)
-{
-       unsigned long irq_flags;
-       struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
-       struct page *p, *tmp;
-
-       if (pool == NULL) {
-               /* No pool for this memory type so free the pages */
-
-               list_for_each_entry_safe(p, tmp, pages, lru) {
-                       __free_page(p);
-               }
-               /* Make the pages list empty */
-               INIT_LIST_HEAD(pages);
-               return;
-       }
-       if (page_count == 0) {
-               list_for_each_entry_safe(p, tmp, pages, lru) {
-                       ++page_count;
-               }
-       }
-
-       spin_lock_irqsave(&pool->lock, irq_flags);
-       list_splice_init(pages, &pool->list);
-       pool->npages += page_count;
-       /* Check that we don't go over the pool limit */
-       page_count = 0;
-       if (pool->npages > _manager->options.max_size) {
-               page_count = pool->npages - _manager->options.max_size;
-               /* free at least NUM_PAGES_TO_ALLOC number of pages
-                * to reduce calls to set_memory_wb */
-               if (page_count < NUM_PAGES_TO_ALLOC)
-                       page_count = NUM_PAGES_TO_ALLOC;
-       }
-       spin_unlock_irqrestore(&pool->lock, irq_flags);
-       if (page_count)
-               ttm_page_pool_free(pool, page_count);
-}
-
 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
                char *name)
 {
@@ -836,6 +854,62 @@ void ttm_page_alloc_fini(void)
        _manager = NULL;
 }
 
+int ttm_pool_populate(struct ttm_tt *ttm)
+{
+       struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+       unsigned i;
+       int ret;
+
+       if (ttm->state != tt_unpopulated)
+               return 0;
+
+       for (i = 0; i < ttm->num_pages; ++i) {
+               ret = ttm_get_pages(&ttm->pages[i], 1,
+                                   ttm->page_flags,
+                                   ttm->caching_state);
+               if (ret != 0) {
+                       ttm_pool_unpopulate(ttm);
+                       return -ENOMEM;
+               }
+
+               ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+                                               false, false);
+               if (unlikely(ret != 0)) {
+                       ttm_pool_unpopulate(ttm);
+                       return -ENOMEM;
+               }
+       }
+
+       if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+               ret = ttm_tt_swapin(ttm);
+               if (unlikely(ret != 0)) {
+                       ttm_pool_unpopulate(ttm);
+                       return ret;
+               }
+       }
+
+       ttm->state = tt_unbound;
+       return 0;
+}
+EXPORT_SYMBOL(ttm_pool_populate);
+
+void ttm_pool_unpopulate(struct ttm_tt *ttm)
+{
+       unsigned i;
+
+       for (i = 0; i < ttm->num_pages; ++i) {
+               if (ttm->pages[i]) {
+                       ttm_mem_global_free_page(ttm->glob->mem_glob,
+                                                ttm->pages[i]);
+                       ttm_put_pages(&ttm->pages[i], 1,
+                                     ttm->page_flags,
+                                     ttm->caching_state);
+               }
+       }
+       ttm->state = tt_unpopulated;
+}
+EXPORT_SYMBOL(ttm_pool_unpopulate);
+
 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
 {
        struct ttm_page_pool *p;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
new file mode 100644 (file)
index 0000000..37ead69
--- /dev/null
@@ -0,0 +1,1143 @@
+/*
+ * Copyright 2011 (c) Oracle Corp.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ */
+
+/*
+ * A simple DMA pool losely based on dmapool.c. It has certain advantages
+ * over the DMA pools:
+ * - Pool collects resently freed pages for reuse (and hooks up to
+ *   the shrinker).
+ * - Tracks currently in use pages
+ * - Tracks whether the page is UC, WB or cached (and reverts to WB
+ *   when freed).
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/kthread.h>
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_page_alloc.h"
+#ifdef TTM_HAS_AGP
+#include <asm/agp.h>
+#endif
+
+#define NUM_PAGES_TO_ALLOC             (PAGE_SIZE/sizeof(struct page *))
+#define SMALL_ALLOCATION               4
+#define FREE_ALL_PAGES                 (~0U)
+/* times are in msecs */
+#define IS_UNDEFINED                   (0)
+#define IS_WC                          (1<<1)
+#define IS_UC                          (1<<2)
+#define IS_CACHED                      (1<<3)
+#define IS_DMA32                       (1<<4)
+
+enum pool_type {
+       POOL_IS_UNDEFINED,
+       POOL_IS_WC = IS_WC,
+       POOL_IS_UC = IS_UC,
+       POOL_IS_CACHED = IS_CACHED,
+       POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
+       POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
+       POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
+};
+/*
+ * The pool structure. There are usually six pools:
+ *  - generic (not restricted to DMA32):
+ *      - write combined, uncached, cached.
+ *  - dma32 (up to 2^32 - so up 4GB):
+ *      - write combined, uncached, cached.
+ * for each 'struct device'. The 'cached' is for pages that are actively used.
+ * The other ones can be shrunk by the shrinker API if neccessary.
+ * @pools: The 'struct device->dma_pools' link.
+ * @type: Type of the pool
+ * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
+ * used with irqsave/irqrestore variants because pool allocator maybe called
+ * from delayed work.
+ * @inuse_list: Pool of pages that are in use. The order is very important and
+ *   it is in the order that the TTM pages that are put back are in.
+ * @free_list: Pool of pages that are free to be used. No order requirements.
+ * @dev: The device that is associated with these pools.
+ * @size: Size used during DMA allocation.
+ * @npages_free: Count of available pages for re-use.
+ * @npages_in_use: Count of pages that are in use.
+ * @nfrees: Stats when pool is shrinking.
+ * @nrefills: Stats when the pool is grown.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @name: Name of the pool.
+ * @dev_name: Name derieved from dev - similar to how dev_info works.
+ *   Used during shutdown as the dev_info during release is unavailable.
+ */
+struct dma_pool {
+       struct list_head pools; /* The 'struct device->dma_pools link */
+       enum pool_type type;
+       spinlock_t lock;
+       struct list_head inuse_list;
+       struct list_head free_list;
+       struct device *dev;
+       unsigned size;
+       unsigned npages_free;
+       unsigned npages_in_use;
+       unsigned long nfrees; /* Stats when shrunk. */
+       unsigned long nrefills; /* Stats when grown. */
+       gfp_t gfp_flags;
+       char name[13]; /* "cached dma32" */
+       char dev_name[64]; /* Constructed from dev */
+};
+
+/*
+ * The accounting page keeping track of the allocated page along with
+ * the DMA address.
+ * @page_list: The link to the 'page_list' in 'struct dma_pool'.
+ * @vaddr: The virtual address of the page
+ * @dma: The bus address of the page. If the page is not allocated
+ *   via the DMA API, it will be -1.
+ */
+struct dma_page {
+       struct list_head page_list;
+       void *vaddr;
+       struct page *p;
+       dma_addr_t dma;
+};
+
+/*
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialization to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+       unsigned        alloc_size;
+       unsigned        max_size;
+       unsigned        small;
+};
+
+/*
+ * Contains the list of all of the 'struct device' and their corresponding
+ * DMA pools. Guarded by _mutex->lock.
+ * @pools: The link to 'struct ttm_pool_manager->pools'
+ * @dev: The 'struct device' associated with the 'pool'
+ * @pool: The 'struct dma_pool' associated with the 'dev'
+ */
+struct device_pools {
+       struct list_head pools;
+       struct device *dev;
+       struct dma_pool *pool;
+};
+
+/*
+ * struct ttm_pool_manager - Holds memory pools for fast allocation
+ *
+ * @lock: Lock used when adding/removing from pools
+ * @pools: List of 'struct device' and 'struct dma_pool' tuples.
+ * @options: Limits for the pool.
+ * @npools: Total amount of pools in existence.
+ * @shrinker: The structure used by [un|]register_shrinker
+ */
+struct ttm_pool_manager {
+       struct mutex            lock;
+       struct list_head        pools;
+       struct ttm_pool_opts    options;
+       unsigned                npools;
+       struct shrinker         mm_shrink;
+       struct kobject          kobj;
+};
+
+static struct ttm_pool_manager *_manager;
+
+static struct attribute ttm_page_pool_max = {
+       .name = "pool_max_size",
+       .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+       .name = "pool_small_allocation",
+       .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+       .name = "pool_allocation_size",
+       .mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+       &ttm_page_pool_max,
+       &ttm_page_pool_small,
+       &ttm_page_pool_alloc_size,
+       NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+       struct ttm_pool_manager *m =
+               container_of(kobj, struct ttm_pool_manager, kobj);
+       kfree(m);
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
+                             const char *buffer, size_t size)
+{
+       struct ttm_pool_manager *m =
+               container_of(kobj, struct ttm_pool_manager, kobj);
+       int chars;
+       unsigned val;
+       chars = sscanf(buffer, "%u", &val);
+       if (chars == 0)
+               return size;
+
+       /* Convert kb to number of pages */
+       val = val / (PAGE_SIZE >> 10);
+
+       if (attr == &ttm_page_pool_max)
+               m->options.max_size = val;
+       else if (attr == &ttm_page_pool_small)
+               m->options.small = val;
+       else if (attr == &ttm_page_pool_alloc_size) {
+               if (val > NUM_PAGES_TO_ALLOC*8) {
+                       printk(KERN_ERR TTM_PFX
+                              "Setting allocation size to %lu "
+                              "is not allowed. Recommended size is "
+                              "%lu\n",
+                              NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+                              NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+                       return size;
+               } else if (val > NUM_PAGES_TO_ALLOC) {
+                       printk(KERN_WARNING TTM_PFX
+                              "Setting allocation size to "
+                              "larger than %lu is not recommended.\n",
+                              NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+               }
+               m->options.alloc_size = val;
+       }
+
+       return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
+                            char *buffer)
+{
+       struct ttm_pool_manager *m =
+               container_of(kobj, struct ttm_pool_manager, kobj);
+       unsigned val = 0;
+
+       if (attr == &ttm_page_pool_max)
+               val = m->options.max_size;
+       else if (attr == &ttm_page_pool_small)
+               val = m->options.small;
+       else if (attr == &ttm_page_pool_alloc_size)
+               val = m->options.alloc_size;
+
+       val = val * (PAGE_SIZE >> 10);
+
+       return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+       .show = &ttm_pool_show,
+       .store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+       .release = &ttm_pool_kobj_release,
+       .sysfs_ops = &ttm_pool_sysfs_ops,
+       .default_attrs = ttm_pool_attrs,
+};
+
+#ifndef CONFIG_X86
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+       int i;
+
+       for (i = 0; i < addrinarray; i++)
+               unmap_page_from_agp(pages[i]);
+#endif
+       return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+       int i;
+
+       for (i = 0; i < addrinarray; i++)
+               map_page_into_agp(pages[i]);
+#endif
+       return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+       int i;
+
+       for (i = 0; i < addrinarray; i++)
+               map_page_into_agp(pages[i]);
+#endif
+       return 0;
+}
+#endif /* for !CONFIG_X86 */
+
+static int ttm_set_pages_caching(struct dma_pool *pool,
+                                struct page **pages, unsigned cpages)
+{
+       int r = 0;
+       /* Set page caching */
+       if (pool->type & IS_UC) {
+               r = set_pages_array_uc(pages, cpages);
+               if (r)
+                       pr_err(TTM_PFX
+                              "%s: Failed to set %d pages to uc!\n",
+                              pool->dev_name, cpages);
+       }
+       if (pool->type & IS_WC) {
+               r = set_pages_array_wc(pages, cpages);
+               if (r)
+                       pr_err(TTM_PFX
+                              "%s: Failed to set %d pages to wc!\n",
+                              pool->dev_name, cpages);
+       }
+       return r;
+}
+
+static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
+{
+       dma_addr_t dma = d_page->dma;
+       dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
+
+       kfree(d_page);
+       d_page = NULL;
+}
+static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
+{
+       struct dma_page *d_page;
+
+       d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
+       if (!d_page)
+               return NULL;
+
+       d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
+                                          &d_page->dma,
+                                          pool->gfp_flags);
+       if (d_page->vaddr)
+               d_page->p = virt_to_page(d_page->vaddr);
+       else {
+               kfree(d_page);
+               d_page = NULL;
+       }
+       return d_page;
+}
+static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
+{
+       enum pool_type type = IS_UNDEFINED;
+
+       if (flags & TTM_PAGE_FLAG_DMA32)
+               type |= IS_DMA32;
+       if (cstate == tt_cached)
+               type |= IS_CACHED;
+       else if (cstate == tt_uncached)
+               type |= IS_UC;
+       else
+               type |= IS_WC;
+
+       return type;
+}
+
+static void ttm_pool_update_free_locked(struct dma_pool *pool,
+                                       unsigned freed_pages)
+{
+       pool->npages_free -= freed_pages;
+       pool->nfrees += freed_pages;
+
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
+                             struct page *pages[], unsigned npages)
+{
+       struct dma_page *d_page, *tmp;
+
+       /* Don't set WB on WB page pool. */
+       if (npages && !(pool->type & IS_CACHED) &&
+           set_pages_array_wb(pages, npages))
+               pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
+                       pool->dev_name, npages);
+
+       list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+               list_del(&d_page->page_list);
+               __ttm_dma_free_page(pool, d_page);
+       }
+}
+
+static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
+{
+       /* Don't set WB on WB page pool. */
+       if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
+               pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
+                       pool->dev_name, 1);
+
+       list_del(&d_page->page_list);
+       __ttm_dma_free_page(pool, d_page);
+}
+
+/*
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @nr_free: If set to true will free all pages in pool
+ **/
+static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
+{
+       unsigned long irq_flags;
+       struct dma_page *dma_p, *tmp;
+       struct page **pages_to_free;
+       struct list_head d_pages;
+       unsigned freed_pages = 0,
+                npages_to_free = nr_free;
+
+       if (NUM_PAGES_TO_ALLOC < nr_free)
+               npages_to_free = NUM_PAGES_TO_ALLOC;
+#if 0
+       if (nr_free > 1) {
+               pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
+                       pool->dev_name, pool->name, current->pid,
+                       npages_to_free, nr_free);
+       }
+#endif
+       pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+                       GFP_KERNEL);
+
+       if (!pages_to_free) {
+               pr_err(TTM_PFX
+                      "%s: Failed to allocate memory for pool free operation.\n",
+                       pool->dev_name);
+               return 0;
+       }
+       INIT_LIST_HEAD(&d_pages);
+restart:
+       spin_lock_irqsave(&pool->lock, irq_flags);
+
+       /* We picking the oldest ones off the list */
+       list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
+                                        page_list) {
+               if (freed_pages >= npages_to_free)
+                       break;
+
+               /* Move the dma_page from one list to another. */
+               list_move(&dma_p->page_list, &d_pages);
+
+               pages_to_free[freed_pages++] = dma_p->p;
+               /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+               if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+
+                       ttm_pool_update_free_locked(pool, freed_pages);
+                       /**
+                        * Because changing page caching is costly
+                        * we unlock the pool to prevent stalling.
+                        */
+                       spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+                       ttm_dma_pages_put(pool, &d_pages, pages_to_free,
+                                         freed_pages);
+
+                       INIT_LIST_HEAD(&d_pages);
+
+                       if (likely(nr_free != FREE_ALL_PAGES))
+                               nr_free -= freed_pages;
+
+                       if (NUM_PAGES_TO_ALLOC >= nr_free)
+                               npages_to_free = nr_free;
+                       else
+                               npages_to_free = NUM_PAGES_TO_ALLOC;
+
+                       freed_pages = 0;
+
+                       /* free all so restart the processing */
+                       if (nr_free)
+                               goto restart;
+
+                       /* Not allowed to fall through or break because
+                        * following context is inside spinlock while we are
+                        * outside here.
+                        */
+                       goto out;
+
+               }
+       }
+
+       /* remove range of pages from the pool */
+       if (freed_pages) {
+               ttm_pool_update_free_locked(pool, freed_pages);
+               nr_free -= freed_pages;
+       }
+
+       spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+       if (freed_pages)
+               ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
+out:
+       kfree(pages_to_free);
+       return nr_free;
+}
+
+static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
+{
+       struct device_pools *p;
+       struct dma_pool *pool;
+
+       if (!dev)
+               return;
+
+       mutex_lock(&_manager->lock);
+       list_for_each_entry_reverse(p, &_manager->pools, pools) {
+               if (p->dev != dev)
+                       continue;
+               pool = p->pool;
+               if (pool->type != type)
+                       continue;
+
+               list_del(&p->pools);
+               kfree(p);
+               _manager->npools--;
+               break;
+       }
+       list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
+               if (pool->type != type)
+                       continue;
+               /* Takes a spinlock.. */
+               ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
+               WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
+               /* This code path is called after _all_ references to the
+                * struct device has been dropped - so nobody should be
+                * touching it. In case somebody is trying to _add_ we are
+                * guarded by the mutex. */
+               list_del(&pool->pools);
+               kfree(pool);
+               break;
+       }
+       mutex_unlock(&_manager->lock);
+}
+
+/*
+ * On free-ing of the 'struct device' this deconstructor is run.
+ * Albeit the pool might have already been freed earlier.
+ */
+static void ttm_dma_pool_release(struct device *dev, void *res)
+{
+       struct dma_pool *pool = *(struct dma_pool **)res;
+
+       if (pool)
+               ttm_dma_free_pool(dev, pool->type);
+}
+
+static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
+{
+       return *(struct dma_pool **)res == match_data;
+}
+
+static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
+                                         enum pool_type type)
+{
+       char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
+       enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
+       struct device_pools *sec_pool = NULL;
+       struct dma_pool *pool = NULL, **ptr;
+       unsigned i;
+       int ret = -ENODEV;
+       char *p;
+
+       if (!dev)
+               return NULL;
+
+       ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return NULL;
+
+       ret = -ENOMEM;
+
+       pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
+                           dev_to_node(dev));
+       if (!pool)
+               goto err_mem;
+
+       sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
+                               dev_to_node(dev));
+       if (!sec_pool)
+               goto err_mem;
+
+       INIT_LIST_HEAD(&sec_pool->pools);
+       sec_pool->dev = dev;
+       sec_pool->pool =  pool;
+
+       INIT_LIST_HEAD(&pool->free_list);
+       INIT_LIST_HEAD(&pool->inuse_list);
+       INIT_LIST_HEAD(&pool->pools);
+       spin_lock_init(&pool->lock);
+       pool->dev = dev;
+       pool->npages_free = pool->npages_in_use = 0;
+       pool->nfrees = 0;
+       pool->gfp_flags = flags;
+       pool->size = PAGE_SIZE;
+       pool->type = type;
+       pool->nrefills = 0;
+       p = pool->name;
+       for (i = 0; i < 5; i++) {
+               if (type & t[i]) {
+                       p += snprintf(p, sizeof(pool->name) - (p - pool->name),
+                                     "%s", n[i]);
+               }
+       }
+       *p = 0;
+       /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
+        * - the kobj->name has already been deallocated.*/
+       snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
+                dev_driver_string(dev), dev_name(dev));
+       mutex_lock(&_manager->lock);
+       /* You can get the dma_pool from either the global: */
+       list_add(&sec_pool->pools, &_manager->pools);
+       _manager->npools++;
+       /* or from 'struct device': */
+       list_add(&pool->pools, &dev->dma_pools);
+       mutex_unlock(&_manager->lock);
+
+       *ptr = pool;
+       devres_add(dev, ptr);
+
+       return pool;
+err_mem:
+       devres_free(ptr);
+       kfree(sec_pool);
+       kfree(pool);
+       return ERR_PTR(ret);
+}
+
+static struct dma_pool *ttm_dma_find_pool(struct device *dev,
+                                         enum pool_type type)
+{
+       struct dma_pool *pool, *tmp, *found = NULL;
+
+       if (type == IS_UNDEFINED)
+               return found;
+
+       /* NB: We iterate on the 'struct dev' which has no spinlock, but
+        * it does have a kref which we have taken. The kref is taken during
+        * graphic driver loading - in the drm_pci_init it calls either
+        * pci_dev_get or pci_register_driver which both end up taking a kref
+        * on 'struct device'.
+        *
+        * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
+        * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
+        * thing is at that point of time there are no pages associated with the
+        * driver so this function will not be called.
+        */
+       list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
+               if (pool->type != type)
+                       continue;
+               found = pool;
+               break;
+       }
+       return found;
+}
+
+/*
+ * Free pages the pages that failed to change the caching state. If there
+ * are pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
+                                                struct list_head *d_pages,
+                                                struct page **failed_pages,
+                                                unsigned cpages)
+{
+       struct dma_page *d_page, *tmp;
+       struct page *p;
+       unsigned i = 0;
+
+       p = failed_pages[0];
+       if (!p)
+               return;
+       /* Find the failed page. */
+       list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+               if (d_page->p != p)
+                       continue;
+               /* .. and then progress over the full list. */
+               list_del(&d_page->page_list);
+               __ttm_dma_free_page(pool, d_page);
+               if (++i < cpages)
+                       p = failed_pages[i];
+               else
+                       break;
+       }
+
+}
+
+/*
+ * Allocate 'count' pages, and put 'need' number of them on the
+ * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
+ * The full list of pages should also be on 'd_pages'.
+ * We return zero for success, and negative numbers as errors.
+ */
+static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
+                                       struct list_head *d_pages,
+                                       unsigned count)
+{
+       struct page **caching_array;
+       struct dma_page *dma_p;
+       struct page *p;
+       int r = 0;
+       unsigned i, cpages;
+       unsigned max_cpages = min(count,
+                       (unsigned)(PAGE_SIZE/sizeof(struct page *)));
+
+       /* allocate array for page caching change */
+       caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+
+       if (!caching_array) {
+               pr_err(TTM_PFX
+                      "%s: Unable to allocate table for new pages.",
+                       pool->dev_name);
+               return -ENOMEM;
+       }
+
+       if (count > 1) {
+               pr_debug("%s: (%s:%d) Getting %d pages\n",
+                       pool->dev_name, pool->name, current->pid,
+                       count);
+       }
+
+       for (i = 0, cpages = 0; i < count; ++i) {
+               dma_p = __ttm_dma_alloc_page(pool);
+               if (!dma_p) {
+                       pr_err(TTM_PFX "%s: Unable to get page %u.\n",
+                               pool->dev_name, i);
+
+                       /* store already allocated pages in the pool after
+                        * setting the caching state */
+                       if (cpages) {
+                               r = ttm_set_pages_caching(pool, caching_array,
+                                                         cpages);
+                               if (r)
+                                       ttm_dma_handle_caching_state_failure(
+                                               pool, d_pages, caching_array,
+                                               cpages);
+                       }
+                       r = -ENOMEM;
+                       goto out;
+               }
+               p = dma_p->p;
+#ifdef CONFIG_HIGHMEM
+               /* gfp flags of highmem page should never be dma32 so we
+                * we should be fine in such case
+                */
+               if (!PageHighMem(p))
+#endif
+               {
+                       caching_array[cpages++] = p;
+                       if (cpages == max_cpages) {
+                               /* Note: Cannot hold the spinlock */
+                               r = ttm_set_pages_caching(pool, caching_array,
+                                                cpages);
+                               if (r) {
+                                       ttm_dma_handle_caching_state_failure(
+                                               pool, d_pages, caching_array,
+                                               cpages);
+                                       goto out;
+                               }
+                               cpages = 0;
+                       }
+               }
+               list_add(&dma_p->page_list, d_pages);
+       }
+
+       if (cpages) {
+               r = ttm_set_pages_caching(pool, caching_array, cpages);
+               if (r)
+                       ttm_dma_handle_caching_state_failure(pool, d_pages,
+                                       caching_array, cpages);
+       }
+out:
+       kfree(caching_array);
+       return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ */
+static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
+                                        unsigned long *irq_flags)
+{
+       unsigned count = _manager->options.small;
+       int r = pool->npages_free;
+
+       if (count > pool->npages_free) {
+               struct list_head d_pages;
+
+               INIT_LIST_HEAD(&d_pages);
+
+               spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+               /* Returns how many more are neccessary to fulfill the
+                * request. */
+               r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
+
+               spin_lock_irqsave(&pool->lock, *irq_flags);
+               if (!r) {
+                       /* Add the fresh to the end.. */
+                       list_splice(&d_pages, &pool->free_list);
+                       ++pool->nrefills;
+                       pool->npages_free += count;
+                       r = count;
+               } else {
+                       struct dma_page *d_page;
+                       unsigned cpages = 0;
+
+                       pr_err(TTM_PFX "%s: Failed to fill %s pool (r:%d)!\n",
+                               pool->dev_name, pool->name, r);
+
+                       list_for_each_entry(d_page, &d_pages, page_list) {
+                               cpages++;
+                       }
+                       list_splice_tail(&d_pages, &pool->free_list);
+                       pool->npages_free += cpages;
+                       r = cpages;
+               }
+       }
+       return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ * The populate list is actually a stack (not that is matters as TTM
+ * allocates one page at a time.
+ */
+static int ttm_dma_pool_get_pages(struct dma_pool *pool,
+                                 struct ttm_dma_tt *ttm_dma,
+                                 unsigned index)
+{
+       struct dma_page *d_page;
+       struct ttm_tt *ttm = &ttm_dma->ttm;
+       unsigned long irq_flags;
+       int count, r = -ENOMEM;
+
+       spin_lock_irqsave(&pool->lock, irq_flags);
+       count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
+       if (count) {
+               d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
+               ttm->pages[index] = d_page->p;
+               ttm_dma->dma_address[index] = d_page->dma;
+               list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
+               r = 0;
+               pool->npages_in_use += 1;
+               pool->npages_free -= 1;
+       }
+       spin_unlock_irqrestore(&pool->lock, irq_flags);
+       return r;
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
+ */
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+       struct ttm_tt *ttm = &ttm_dma->ttm;
+       struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+       struct dma_pool *pool;
+       enum pool_type type;
+       unsigned i;
+       gfp_t gfp_flags;
+       int ret;
+
+       if (ttm->state != tt_unpopulated)
+               return 0;
+
+       type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+       if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
+               gfp_flags = GFP_USER | GFP_DMA32;
+       else
+               gfp_flags = GFP_HIGHUSER;
+       if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+               gfp_flags |= __GFP_ZERO;
+
+       pool = ttm_dma_find_pool(dev, type);
+       if (!pool) {
+               pool = ttm_dma_pool_init(dev, gfp_flags, type);
+               if (IS_ERR_OR_NULL(pool)) {
+                       return -ENOMEM;
+               }
+       }
+
+       INIT_LIST_HEAD(&ttm_dma->pages_list);
+       for (i = 0; i < ttm->num_pages; ++i) {
+               ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+               if (ret != 0) {
+                       ttm_dma_unpopulate(ttm_dma, dev);
+                       return -ENOMEM;
+               }
+
+               ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+                                               false, false);
+               if (unlikely(ret != 0)) {
+                       ttm_dma_unpopulate(ttm_dma, dev);
+                       return -ENOMEM;
+               }
+       }
+
+       if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+               ret = ttm_tt_swapin(ttm);
+               if (unlikely(ret != 0)) {
+                       ttm_dma_unpopulate(ttm_dma, dev);
+                       return ret;
+               }
+       }
+
+       ttm->state = tt_unbound;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_populate);
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_dma_pool_get_num_unused_pages(void)
+{
+       struct device_pools *p;
+       unsigned total = 0;
+
+       mutex_lock(&_manager->lock);
+       list_for_each_entry(p, &_manager->pools, pools)
+               total += p->pool->npages_free;
+       mutex_unlock(&_manager->lock);
+       return total;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+       struct ttm_tt *ttm = &ttm_dma->ttm;
+       struct dma_pool *pool;
+       struct dma_page *d_page, *next;
+       enum pool_type type;
+       bool is_cached = false;
+       unsigned count = 0, i, npages = 0;
+       unsigned long irq_flags;
+
+       type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+       pool = ttm_dma_find_pool(dev, type);
+       if (!pool) {
+               WARN_ON(!pool);
+               return;
+       }
+       is_cached = (ttm_dma_find_pool(pool->dev,
+                    ttm_to_type(ttm->page_flags, tt_cached)) == pool);
+
+       /* make sure pages array match list and count number of pages */
+       list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
+               ttm->pages[count] = d_page->p;
+               count++;
+       }
+
+       spin_lock_irqsave(&pool->lock, irq_flags);
+       pool->npages_in_use -= count;
+       if (is_cached) {
+               pool->nfrees += count;
+       } else {
+               pool->npages_free += count;
+               list_splice(&ttm_dma->pages_list, &pool->free_list);
+               npages = count;
+               if (pool->npages_free > _manager->options.max_size) {
+                       npages = pool->npages_free - _manager->options.max_size;
+                       /* free at least NUM_PAGES_TO_ALLOC number of pages
+                        * to reduce calls to set_memory_wb */
+                       if (npages < NUM_PAGES_TO_ALLOC)
+                               npages = NUM_PAGES_TO_ALLOC;
+               }
+       }
+       spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+       if (is_cached) {
+               list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
+                       ttm_mem_global_free_page(ttm->glob->mem_glob,
+                                                d_page->p);
+                       ttm_dma_page_put(pool, d_page);
+               }
+       } else {
+               for (i = 0; i < count; i++) {
+                       ttm_mem_global_free_page(ttm->glob->mem_glob,
+                                                ttm->pages[i]);
+               }
+       }
+
+       INIT_LIST_HEAD(&ttm_dma->pages_list);
+       for (i = 0; i < ttm->num_pages; i++) {
+               ttm->pages[i] = NULL;
+               ttm_dma->dma_address[i] = 0;
+       }
+
+       /* shrink pool if necessary (only on !is_cached pools)*/
+       if (npages)
+               ttm_dma_page_pool_free(pool, npages);
+       ttm->state = tt_unpopulated;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+
+/**
+ * Callback for mm to request pool to reduce number of page held.
+ */
+static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
+                                 struct shrink_control *sc)
+{
+       static atomic_t start_pool = ATOMIC_INIT(0);
+       unsigned idx = 0;
+       unsigned pool_offset = atomic_add_return(1, &start_pool);
+       unsigned shrink_pages = sc->nr_to_scan;
+       struct device_pools *p;
+
+       if (list_empty(&_manager->pools))
+               return 0;
+
+       mutex_lock(&_manager->lock);
+       pool_offset = pool_offset % _manager->npools;
+       list_for_each_entry(p, &_manager->pools, pools) {
+               unsigned nr_free;
+
+               if (!p->dev)
+                       continue;
+               if (shrink_pages == 0)
+                       break;
+               /* Do it in round-robin fashion. */
+               if (++idx < pool_offset)
+                       continue;
+               nr_free = shrink_pages;
+               shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+               pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
+                       p->pool->dev_name, p->pool->name, current->pid, nr_free,
+                       shrink_pages);
+       }
+       mutex_unlock(&_manager->lock);
+       /* return estimated number of unused pages in pool */
+       return ttm_dma_pool_get_num_unused_pages();
+}
+
+static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+       manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
+       manager->mm_shrink.seeks = 1;
+       register_shrinker(&manager->mm_shrink);
+}
+
+static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+       unregister_shrinker(&manager->mm_shrink);
+}
+
+int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+       int ret = -ENOMEM;
+
+       WARN_ON(_manager);
+
+       printk(KERN_INFO TTM_PFX "Initializing DMA pool allocator.\n");
+
+       _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+       if (!_manager)
+               goto err_manager;
+
+       mutex_init(&_manager->lock);
+       INIT_LIST_HEAD(&_manager->pools);
+
+       _manager->options.max_size = max_pages;
+       _manager->options.small = SMALL_ALLOCATION;
+       _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+       /* This takes care of auto-freeing the _manager */
+       ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
+                                  &glob->kobj, "dma_pool");
+       if (unlikely(ret != 0)) {
+               kobject_put(&_manager->kobj);
+               goto err;
+       }
+       ttm_dma_pool_mm_shrink_init(_manager);
+       return 0;
+err_manager:
+       kfree(_manager);
+       _manager = NULL;
+err:
+       return ret;
+}
+
+void ttm_dma_page_alloc_fini(void)
+{
+       struct device_pools *p, *t;
+
+       printk(KERN_INFO TTM_PFX "Finalizing DMA pool allocator.\n");
+       ttm_dma_pool_mm_shrink_fini(_manager);
+
+       list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
+               dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
+                       current->pid);
+               WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
+                       ttm_dma_pool_match, p->pool));
+               ttm_dma_free_pool(p->dev, p->pool->type);
+       }
+       kobject_put(&_manager->kobj);
+       _manager = NULL;
+}
+
+int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+       struct device_pools *p;
+       struct dma_pool *pool = NULL;
+       char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
+                    "name", "virt", "busaddr"};
+
+       if (!_manager) {
+               seq_printf(m, "No pool allocator running.\n");
+               return 0;
+       }
+       seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
+                  h[0], h[1], h[2], h[3], h[4], h[5]);
+       mutex_lock(&_manager->lock);
+       list_for_each_entry(p, &_manager->pools, pools) {
+               struct device *dev = p->dev;
+               if (!dev)
+                       continue;
+               pool = p->pool;
+               seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
+                               pool->name, pool->nrefills,
+                               pool->nfrees, pool->npages_in_use,
+                               pool->npages_free,
+                               pool->dev_name);
+       }
+       mutex_unlock(&_manager->lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
index f9cc548d6d98cddfe0eccb5dcc154b059162b2d1..2f75d203a2bf259e8b17d71b03f79fe948fc7bc9 100644 (file)
 #include "ttm/ttm_placement.h"
 #include "ttm/ttm_page_alloc.h"
 
-static int ttm_tt_swapin(struct ttm_tt *ttm);
-
 /**
  * Allocates storage for pointers to the pages that back the ttm.
  */
 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
 {
-       ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
-       ttm->dma_address = drm_calloc_large(ttm->num_pages,
-                                           sizeof(*ttm->dma_address));
-}
-
-static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
-{
-       drm_free_large(ttm->pages);
-       ttm->pages = NULL;
-       drm_free_large(ttm->dma_address);
-       ttm->dma_address = NULL;
-}
-
-static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
-{
-       int write;
-       int dirty;
-       struct page *page;
-       int i;
-       struct ttm_backend *be = ttm->be;
-
-       BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
-       write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
-       dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
-
-       if (be)
-               be->func->clear(be);
-
-       for (i = 0; i < ttm->num_pages; ++i) {
-               page = ttm->pages[i];
-               if (page == NULL)
-                       continue;
-
-               if (page == ttm->dummy_read_page) {
-                       BUG_ON(write);
-                       continue;
-               }
-
-               if (write && dirty && !PageReserved(page))
-                       set_page_dirty_lock(page);
-
-               ttm->pages[i] = NULL;
-               ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
-               put_page(page);
-       }
-       ttm->state = tt_unpopulated;
-       ttm->first_himem_page = ttm->num_pages;
-       ttm->last_lomem_page = -1;
+       ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
 }
 
-static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
+static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
 {
-       struct page *p;
-       struct list_head h;
-       struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
-       int ret;
-
-       while (NULL == (p = ttm->pages[index])) {
-
-               INIT_LIST_HEAD(&h);
-
-               ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
-                                   &ttm->dma_address[index]);
-
-               if (ret != 0)
-                       return NULL;
-
-               p = list_first_entry(&h, struct page, lru);
-
-               ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
-               if (unlikely(ret != 0))
-                       goto out_err;
-
-               if (PageHighMem(p))
-                       ttm->pages[--ttm->first_himem_page] = p;
-               else
-                       ttm->pages[++ttm->last_lomem_page] = p;
-       }
-       return p;
-out_err:
-       put_page(p);
-       return NULL;
-}
-
-struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
-{
-       int ret;
-
-       if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
-               ret = ttm_tt_swapin(ttm);
-               if (unlikely(ret != 0))
-                       return NULL;
-       }
-       return __ttm_tt_get_page(ttm, index);
-}
-
-int ttm_tt_populate(struct ttm_tt *ttm)
-{
-       struct page *page;
-       unsigned long i;
-       struct ttm_backend *be;
-       int ret;
-
-       if (ttm->state != tt_unpopulated)
-               return 0;
-
-       if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
-               ret = ttm_tt_swapin(ttm);
-               if (unlikely(ret != 0))
-                       return ret;
-       }
-
-       be = ttm->be;
-
-       for (i = 0; i < ttm->num_pages; ++i) {
-               page = __ttm_tt_get_page(ttm, i);
-               if (!page)
-                       return -ENOMEM;
-       }
-
-       be->func->populate(be, ttm->num_pages, ttm->pages,
-                          ttm->dummy_read_page, ttm->dma_address);
-       ttm->state = tt_unbound;
-       return 0;
+       ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
+       ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
+                                           sizeof(*ttm->dma_address));
 }
-EXPORT_SYMBOL(ttm_tt_populate);
 
 #ifdef CONFIG_X86
 static inline int ttm_tt_set_page_caching(struct page *p,
@@ -278,153 +159,100 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
 }
 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
 
-static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
-{
-       int i;
-       unsigned count = 0;
-       struct list_head h;
-       struct page *cur_page;
-       struct ttm_backend *be = ttm->be;
-
-       INIT_LIST_HEAD(&h);
-
-       if (be)
-               be->func->clear(be);
-       for (i = 0; i < ttm->num_pages; ++i) {
-
-               cur_page = ttm->pages[i];
-               ttm->pages[i] = NULL;
-               if (cur_page) {
-                       if (page_count(cur_page) != 1)
-                               printk(KERN_ERR TTM_PFX
-                                      "Erroneous page count. "
-                                      "Leaking pages.\n");
-                       ttm_mem_global_free_page(ttm->glob->mem_glob,
-                                                cur_page);
-                       list_add(&cur_page->lru, &h);
-                       count++;
-               }
-       }
-       ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
-                     ttm->dma_address);
-       ttm->state = tt_unpopulated;
-       ttm->first_himem_page = ttm->num_pages;
-       ttm->last_lomem_page = -1;
-}
-
 void ttm_tt_destroy(struct ttm_tt *ttm)
 {
-       struct ttm_backend *be;
-
        if (unlikely(ttm == NULL))
                return;
 
-       be = ttm->be;
-       if (likely(be != NULL)) {
-               be->func->destroy(be);
-               ttm->be = NULL;
+       if (ttm->state == tt_bound) {
+               ttm_tt_unbind(ttm);
        }
 
        if (likely(ttm->pages != NULL)) {
-               if (ttm->page_flags & TTM_PAGE_FLAG_USER)
-                       ttm_tt_free_user_pages(ttm);
-               else
-                       ttm_tt_free_alloced_pages(ttm);
-
-               ttm_tt_free_page_directory(ttm);
+               ttm->bdev->driver->ttm_tt_unpopulate(ttm);
        }
 
        if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
            ttm->swap_storage)
                fput(ttm->swap_storage);
 
-       kfree(ttm);
+       ttm->swap_storage = NULL;
+       ttm->func->destroy(ttm);
 }
 
-int ttm_tt_set_user(struct ttm_tt *ttm,
-                   struct task_struct *tsk,
-                   unsigned long start, unsigned long num_pages)
+int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+               unsigned long size, uint32_t page_flags,
+               struct page *dummy_read_page)
 {
-       struct mm_struct *mm = tsk->mm;
-       int ret;
-       int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
-       struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
-
-       BUG_ON(num_pages != ttm->num_pages);
-       BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
-
-       /**
-        * Account user pages as lowmem pages for now.
-        */
-
-       ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
-                                  false, false);
-       if (unlikely(ret != 0))
-               return ret;
-
-       down_read(&mm->mmap_sem);
-       ret = get_user_pages(tsk, mm, start, num_pages,
-                            write, 0, ttm->pages, NULL);
-       up_read(&mm->mmap_sem);
+       ttm->bdev = bdev;
+       ttm->glob = bdev->glob;
+       ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       ttm->caching_state = tt_cached;
+       ttm->page_flags = page_flags;
+       ttm->dummy_read_page = dummy_read_page;
+       ttm->state = tt_unpopulated;
+       ttm->swap_storage = NULL;
 
-       if (ret != num_pages && write) {
-               ttm_tt_free_user_pages(ttm);
-               ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
+       ttm_tt_alloc_page_directory(ttm);
+       if (!ttm->pages) {
+               ttm_tt_destroy(ttm);
+               printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
                return -ENOMEM;
        }
-
-       ttm->tsk = tsk;
-       ttm->start = start;
-       ttm->state = tt_unbound;
-
        return 0;
 }
+EXPORT_SYMBOL(ttm_tt_init);
 
-struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
-                            uint32_t page_flags, struct page *dummy_read_page)
+void ttm_tt_fini(struct ttm_tt *ttm)
 {
-       struct ttm_bo_driver *bo_driver = bdev->driver;
-       struct ttm_tt *ttm;
-
-       if (!bo_driver)
-               return NULL;
+       drm_free_large(ttm->pages);
+       ttm->pages = NULL;
+}
+EXPORT_SYMBOL(ttm_tt_fini);
 
-       ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
-       if (!ttm)
-               return NULL;
+int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+               unsigned long size, uint32_t page_flags,
+               struct page *dummy_read_page)
+{
+       struct ttm_tt *ttm = &ttm_dma->ttm;
 
+       ttm->bdev = bdev;
        ttm->glob = bdev->glob;
        ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       ttm->first_himem_page = ttm->num_pages;
-       ttm->last_lomem_page = -1;
        ttm->caching_state = tt_cached;
        ttm->page_flags = page_flags;
-
        ttm->dummy_read_page = dummy_read_page;
+       ttm->state = tt_unpopulated;
+       ttm->swap_storage = NULL;
 
-       ttm_tt_alloc_page_directory(ttm);
-       if (!ttm->pages) {
+       INIT_LIST_HEAD(&ttm_dma->pages_list);
+       ttm_dma_tt_alloc_page_directory(ttm_dma);
+       if (!ttm->pages || !ttm_dma->dma_address) {
                ttm_tt_destroy(ttm);
                printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
-               return NULL;
-       }
-       ttm->be = bo_driver->create_ttm_backend_entry(bdev);
-       if (!ttm->be) {
-               ttm_tt_destroy(ttm);
-               printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
-               return NULL;
+               return -ENOMEM;
        }
-       ttm->state = tt_unpopulated;
-       return ttm;
+       return 0;
 }
+EXPORT_SYMBOL(ttm_dma_tt_init);
+
+void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
+{
+       struct ttm_tt *ttm = &ttm_dma->ttm;
+
+       drm_free_large(ttm->pages);
+       ttm->pages = NULL;
+       drm_free_large(ttm_dma->dma_address);
+       ttm_dma->dma_address = NULL;
+}
+EXPORT_SYMBOL(ttm_dma_tt_fini);
 
 void ttm_tt_unbind(struct ttm_tt *ttm)
 {
        int ret;
-       struct ttm_backend *be = ttm->be;
 
        if (ttm->state == tt_bound) {
-               ret = be->func->unbind(be);
+               ret = ttm->func->unbind(ttm);
                BUG_ON(ret);
                ttm->state = tt_unbound;
        }
@@ -433,7 +261,6 @@ void ttm_tt_unbind(struct ttm_tt *ttm)
 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 {
        int ret = 0;
-       struct ttm_backend *be;
 
        if (!ttm)
                return -EINVAL;
@@ -441,25 +268,21 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
        if (ttm->state == tt_bound)
                return 0;
 
-       be = ttm->be;
-
-       ret = ttm_tt_populate(ttm);
+       ret = ttm->bdev->driver->ttm_tt_populate(ttm);
        if (ret)
                return ret;
 
-       ret = be->func->bind(be, bo_mem);
+       ret = ttm->func->bind(ttm, bo_mem);
        if (unlikely(ret != 0))
                return ret;
 
        ttm->state = tt_bound;
 
-       if (ttm->page_flags & TTM_PAGE_FLAG_USER)
-               ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
        return 0;
 }
 EXPORT_SYMBOL(ttm_tt_bind);
 
-static int ttm_tt_swapin(struct ttm_tt *ttm)
+int ttm_tt_swapin(struct ttm_tt *ttm)
 {
        struct address_space *swap_space;
        struct file *swap_storage;
@@ -470,16 +293,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
        int i;
        int ret = -ENOMEM;
 
-       if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
-               ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
-                                     ttm->num_pages);
-               if (unlikely(ret != 0))
-                       return ret;
-
-               ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
-               return 0;
-       }
-
        swap_storage = ttm->swap_storage;
        BUG_ON(swap_storage == NULL);
 
@@ -491,7 +304,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
                        ret = PTR_ERR(from_page);
                        goto out_err;
                }
-               to_page = __ttm_tt_get_page(ttm, i);
+               to_page = ttm->pages[i];
                if (unlikely(to_page == NULL))
                        goto out_err;
 
@@ -512,7 +325,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
 
        return 0;
 out_err:
-       ttm_tt_free_alloced_pages(ttm);
        return ret;
 }
 
@@ -530,18 +342,6 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
        BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
        BUG_ON(ttm->caching_state != tt_cached);
 
-       /*
-        * For user buffers, just unpin the pages, as there should be
-        * vma references.
-        */
-
-       if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
-               ttm_tt_free_user_pages(ttm);
-               ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
-               ttm->swap_storage = NULL;
-               return 0;
-       }
-
        if (!persistent_swap_storage) {
                swap_storage = shmem_file_setup("ttm swap",
                                                ttm->num_pages << PAGE_SHIFT,
@@ -576,7 +376,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
                page_cache_release(to_page);
        }
 
-       ttm_tt_free_alloced_pages(ttm);
+       ttm->bdev->driver->ttm_tt_unpopulate(ttm);
        ttm->swap_storage = swap_storage;
        ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
        if (persistent_swap_storage)
index a83e86d3956cb284a04945b988e7844a248d4e01..02661f35f7a057a131244f56759397d447b00176 100644 (file)
 
 #include "drm_pciids.h"
 
+static int via_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+       struct via_file_private *file_priv;
+
+       DRM_DEBUG_DRIVER("\n");
+       file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+       if (!file_priv)
+               return -ENOMEM;
+
+       file->driver_priv = file_priv;
+
+       INIT_LIST_HEAD(&file_priv->obj_list);
+
+       return 0;
+}
+
+void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+       struct via_file_private *file_priv = file->driver_priv;
+
+       kfree(file_priv);
+}
+
 static struct pci_device_id pciidlist[] = {
        viadrv_PCI_IDS
 };
 
+static const struct file_operations via_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
            DRIVER_IRQ_SHARED,
        .load = via_driver_load,
        .unload = via_driver_unload,
+       .open = via_driver_open,
+       .postclose = via_driver_postclose,
        .context_dtor = via_final_context,
        .get_vblank_counter = via_get_vblank_counter,
        .enable_vblank = via_enable_vblank,
@@ -54,17 +90,7 @@ static struct drm_driver driver = {
        .reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
        .lastclose = via_lastclose,
        .ioctls = via_ioctls,
-       .fops = {
-               .owner = THIS_MODULE,
-               .open = drm_open,
-               .release = drm_release,
-               .unlocked_ioctl = drm_ioctl,
-               .mmap = drm_mmap,
-               .poll = drm_poll,
-               .fasync = drm_fasync,
-               .llseek = noop_llseek,
-               },
-
+       .fops = &via_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
index 9cf87d9123252c49c00c79a396cbe9a1b836c0a0..88edacc93006f05a22affa0e8a9b282324c4ffa6 100644 (file)
@@ -24,7 +24,7 @@
 #ifndef _VIA_DRV_H_
 #define _VIA_DRV_H_
 
-#include "drm_sman.h"
+#include "drm_mm.h"
 #define DRIVER_AUTHOR  "Various"
 
 #define DRIVER_NAME            "via"
@@ -88,9 +88,12 @@ typedef struct drm_via_private {
        uint32_t irq_pending_mask;
        int *irq_map;
        unsigned int idle_fault;
-       struct drm_sman sman;
        int vram_initialized;
+       struct drm_mm vram_mm;
        int agp_initialized;
+       struct drm_mm agp_mm;
+       /** Mapping of userspace keys to mm objects */
+       struct idr object_idr;
        unsigned long vram_offset;
        unsigned long agp_offset;
        drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
index 6cca9a709f7a25dfc3ba890f070023196450d7b1..a2ab343651518d948d830d237f493b27534bf945 100644 (file)
@@ -104,15 +104,10 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
 
        dev_priv->chipset = chipset;
 
-       ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
-       if (ret) {
-               kfree(dev_priv);
-               return ret;
-       }
+       idr_init(&dev->object_name_idr);
 
        ret = drm_vblank_init(dev, 1);
        if (ret) {
-               drm_sman_takedown(&dev_priv->sman);
                kfree(dev_priv);
                return ret;
        }
@@ -124,7 +119,8 @@ int via_driver_unload(struct drm_device *dev)
 {
        drm_via_private_t *dev_priv = dev->dev_private;
 
-       drm_sman_takedown(&dev_priv->sman);
+       idr_remove_all(&dev_priv->object_idr);
+       idr_destroy(&dev_priv->object_idr);
 
        kfree(dev_priv);
 
index 6cc2dadae3ef06acb3e94ff60da0d968470ac431..a3574d09a07d0c95066f3010a40998f048f728f8 100644 (file)
 #include "drmP.h"
 #include "via_drm.h"
 #include "via_drv.h"
-#include "drm_sman.h"
 
 #define VIA_MM_ALIGN_SHIFT 4
 #define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
 
+struct via_memblock {
+       struct drm_mm_node mm_node;
+       struct list_head owner_list;
+};
+
 int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        drm_via_agp_t *agp = data;
        drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-       int ret;
 
        mutex_lock(&dev->struct_mutex);
-       ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
-                                agp->size >> VIA_MM_ALIGN_SHIFT);
-
-       if (ret) {
-               DRM_ERROR("AGP memory manager initialisation error\n");
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
+       drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
 
        dev_priv->agp_initialized = 1;
        dev_priv->agp_offset = agp->offset;
@@ -61,17 +57,9 @@ int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        drm_via_fb_t *fb = data;
        drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-       int ret;
 
        mutex_lock(&dev->struct_mutex);
-       ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
-                                fb->size >> VIA_MM_ALIGN_SHIFT);
-
-       if (ret) {
-               DRM_ERROR("VRAM memory manager initialisation error\n");
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
+       drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
 
        dev_priv->vram_initialized = 1;
        dev_priv->vram_offset = fb->offset;
@@ -108,19 +96,25 @@ void via_lastclose(struct drm_device *dev)
                return;
 
        mutex_lock(&dev->struct_mutex);
-       drm_sman_cleanup(&dev_priv->sman);
-       dev_priv->vram_initialized = 0;
-       dev_priv->agp_initialized = 0;
+       if (dev_priv->vram_initialized) {
+               drm_mm_takedown(&dev_priv->vram_mm);
+               dev_priv->vram_initialized = 0;
+       }
+       if (dev_priv->agp_initialized) {
+               drm_mm_takedown(&dev_priv->agp_mm);
+               dev_priv->agp_initialized = 0;
+       }
        mutex_unlock(&dev->struct_mutex);
 }
 
 int via_mem_alloc(struct drm_device *dev, void *data,
-                 struct drm_file *file_priv)
+                 struct drm_file *file)
 {
        drm_via_mem_t *mem = data;
-       int retval = 0;
-       struct drm_memblock_item *item;
+       int retval = 0, user_key;
+       struct via_memblock *item;
        drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+       struct via_file_private *file_priv = file->driver_priv;
        unsigned long tmpSize;
 
        if (mem->type > VIA_MEM_AGP) {
@@ -136,24 +130,57 @@ int via_mem_alloc(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
+       item = kzalloc(sizeof(*item), GFP_KERNEL);
+       if (!item) {
+               retval = -ENOMEM;
+               goto fail_alloc;
+       }
+
        tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
-       item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0,
-                             (unsigned long)file_priv);
-       mutex_unlock(&dev->struct_mutex);
-       if (item) {
-               mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
-                             dev_priv->vram_offset : dev_priv->agp_offset) +
-                   (item->mm->
-                    offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT);
-               mem->index = item->user_hash.key;
-       } else {
-               mem->offset = 0;
-               mem->size = 0;
-               mem->index = 0;
-               DRM_DEBUG("Video memory allocation failed\n");
+       if (mem->type == VIA_MEM_AGP)
+               retval = drm_mm_insert_node(&dev_priv->agp_mm,
+                                           &item->mm_node,
+                                           tmpSize, 0);
+       else
+               retval = drm_mm_insert_node(&dev_priv->vram_mm,
+                                           &item->mm_node,
+                                           tmpSize, 0);
+       if (retval)
+               goto fail_alloc;
+
+again:
+       if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
                retval = -ENOMEM;
+               goto fail_idr;
        }
 
+       retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
+       if (retval == -EAGAIN)
+               goto again;
+       if (retval)
+               goto fail_idr;
+
+       list_add(&item->owner_list, &file_priv->obj_list);
+       mutex_unlock(&dev->struct_mutex);
+
+       mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
+                     dev_priv->vram_offset : dev_priv->agp_offset) +
+           ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
+       mem->index = user_key;
+
+       return 0;
+
+fail_idr:
+       drm_mm_remove_node(&item->mm_node);
+fail_alloc:
+       kfree(item);
+       mutex_unlock(&dev->struct_mutex);
+
+       mem->offset = 0;
+       mem->size = 0;
+       mem->index = 0;
+       DRM_DEBUG("Video memory allocation failed\n");
+
        return retval;
 }
 
@@ -161,24 +188,35 @@ int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        drm_via_private_t *dev_priv = dev->dev_private;
        drm_via_mem_t *mem = data;
-       int ret;
+       struct via_memblock *obj;
 
        mutex_lock(&dev->struct_mutex);
-       ret = drm_sman_free_key(&dev_priv->sman, mem->index);
+       obj = idr_find(&dev_priv->object_idr, mem->index);
+       if (obj == NULL) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       idr_remove(&dev_priv->object_idr, mem->index);
+       list_del(&obj->owner_list);
+       drm_mm_remove_node(&obj->mm_node);
+       kfree(obj);
        mutex_unlock(&dev->struct_mutex);
+
        DRM_DEBUG("free = 0x%lx\n", mem->index);
 
-       return ret;
+       return 0;
 }
 
 
 void via_reclaim_buffers_locked(struct drm_device *dev,
-                               struct drm_file *file_priv)
+                               struct drm_file *file)
 {
-       drm_via_private_t *dev_priv = dev->dev_private;
+       struct via_file_private *file_priv = file->driver_priv;
+       struct via_memblock *entry, *next;
 
        mutex_lock(&dev->struct_mutex);
-       if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
+       if (list_empty(&file_priv->obj_list)) {
                mutex_unlock(&dev->struct_mutex);
                return;
        }
@@ -186,7 +224,12 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
        if (dev->driver->dma_quiescent)
                dev->driver->dma_quiescent(dev);
 
-       drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
+       list_for_each_entry_safe(entry, next, &file_priv->obj_list,
+                                owner_list) {
+               list_del(&entry->owner_list);
+               drm_mm_remove_node(&entry->mm_node);
+               kfree(entry);
+       }
        mutex_unlock(&dev->struct_mutex);
        return;
 }
index 5a72ed9082321e92f0da8abe2b67618a1db64919..1e2c0fb7f7869c7ea97a24d6d545bdff62df6537 100644 (file)
@@ -28,6 +28,7 @@
 #include "vmwgfx_drv.h"
 #include "ttm/ttm_bo_driver.h"
 #include "ttm/ttm_placement.h"
+#include "ttm/ttm_page_alloc.h"
 
 static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
        TTM_PL_FLAG_CACHED;
@@ -139,85 +140,63 @@ struct ttm_placement vmw_srf_placement = {
        .busy_placement = gmr_vram_placement_flags
 };
 
-struct vmw_ttm_backend {
-       struct ttm_backend backend;
-       struct page **pages;
-       unsigned long num_pages;
+struct vmw_ttm_tt {
+       struct ttm_tt ttm;
        struct vmw_private *dev_priv;
        int gmr_id;
 };
 
-static int vmw_ttm_populate(struct ttm_backend *backend,
-                           unsigned long num_pages, struct page **pages,
-                           struct page *dummy_read_page,
-                           dma_addr_t *dma_addrs)
+static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 {
-       struct vmw_ttm_backend *vmw_be =
-           container_of(backend, struct vmw_ttm_backend, backend);
-
-       vmw_be->pages = pages;
-       vmw_be->num_pages = num_pages;
-
-       return 0;
-}
-
-static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
-{
-       struct vmw_ttm_backend *vmw_be =
-           container_of(backend, struct vmw_ttm_backend, backend);
+       struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
 
        vmw_be->gmr_id = bo_mem->start;
 
-       return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
-                           vmw_be->num_pages, vmw_be->gmr_id);
+       return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
+                           ttm->num_pages, vmw_be->gmr_id);
 }
 
-static int vmw_ttm_unbind(struct ttm_backend *backend)
+static int vmw_ttm_unbind(struct ttm_tt *ttm)
 {
-       struct vmw_ttm_backend *vmw_be =
-           container_of(backend, struct vmw_ttm_backend, backend);
+       struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
 
        vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
        return 0;
 }
 
-static void vmw_ttm_clear(struct ttm_backend *backend)
+static void vmw_ttm_destroy(struct ttm_tt *ttm)
 {
-       struct vmw_ttm_backend *vmw_be =
-               container_of(backend, struct vmw_ttm_backend, backend);
-
-       vmw_be->pages = NULL;
-       vmw_be->num_pages = 0;
-}
-
-static void vmw_ttm_destroy(struct ttm_backend *backend)
-{
-       struct vmw_ttm_backend *vmw_be =
-           container_of(backend, struct vmw_ttm_backend, backend);
+       struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
 
+       ttm_tt_fini(ttm);
        kfree(vmw_be);
 }
 
 static struct ttm_backend_func vmw_ttm_func = {
-       .populate = vmw_ttm_populate,
-       .clear = vmw_ttm_clear,
        .bind = vmw_ttm_bind,
        .unbind = vmw_ttm_unbind,
        .destroy = vmw_ttm_destroy,
 };
 
-struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
+struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
+                                unsigned long size, uint32_t page_flags,
+                                struct page *dummy_read_page)
 {
-       struct vmw_ttm_backend *vmw_be;
+       struct vmw_ttm_tt *vmw_be;
 
        vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
        if (!vmw_be)
                return NULL;
 
-       vmw_be->backend.func = &vmw_ttm_func;
+       vmw_be->ttm.func = &vmw_ttm_func;
        vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
 
-       return &vmw_be->backend;
+       if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+               kfree(vmw_be);
+               return NULL;
+       }
+
+       return &vmw_be->ttm;
 }
 
 int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
@@ -357,7 +336,9 @@ static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
 }
 
 struct ttm_bo_driver vmw_bo_driver = {
-       .create_ttm_backend_entry = vmw_ttm_backend_init,
+       .ttm_tt_create = &vmw_ttm_tt_create,
+       .ttm_tt_populate = &ttm_pool_populate,
+       .ttm_tt_unpopulate = &ttm_pool_unpopulate,
        .invalidate_caches = vmw_invalidate_caches,
        .init_mem_type = vmw_init_mem_type,
        .evict_flags = vmw_evict_flags,
index dff8fc7671525d347382261402d60d002a21dbb5..f390f5f9cb684f64fc2e0dbb390a713841f296f2 100644 (file)
@@ -1064,6 +1064,21 @@ static const struct dev_pm_ops vmw_pm_ops = {
        .resume = vmw_pm_resume,
 };
 
+static const struct file_operations vmwgfx_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = vmw_unlocked_ioctl,
+       .mmap = vmw_mmap,
+       .poll = vmw_fops_poll,
+       .read = vmw_fops_read,
+       .fasync = drm_fasync,
+#if defined(CONFIG_COMPAT)
+       .compat_ioctl = drm_compat_ioctl,
+#endif
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
        DRIVER_MODESET,
@@ -1088,20 +1103,7 @@ static struct drm_driver driver = {
        .master_drop = vmw_master_drop,
        .open = vmw_driver_open,
        .postclose = vmw_postclose,
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = vmw_unlocked_ioctl,
-                .mmap = vmw_mmap,
-                .poll = vmw_fops_poll,
-                .read = vmw_fops_read,
-                .fasync = drm_fasync,
-#if defined(CONFIG_COMPAT)
-                .compat_ioctl = drm_compat_ioctl,
-#endif
-                .llseek = noop_llseek,
-       },
+       .fops = &vmwgfx_driver_fops,
        .name = VMWGFX_DRIVER_NAME,
        .desc = VMWGFX_DRIVER_DESC,
        .date = VMWGFX_DRIVER_DATE,
index f94b33ae221546a9d9170f3e0eb6b8292776721b..0af6ebdf205d821c6b68c825ea7db1ada77dff42 100644 (file)
@@ -690,7 +690,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
 
        /* XXX get the first 3 from the surface info */
        vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
-       vfbs->base.base.pitch = mode_cmd->pitch;
+       vfbs->base.base.pitches[0] = mode_cmd->pitch;
        vfbs->base.base.depth = mode_cmd->depth;
        vfbs->base.base.width = mode_cmd->width;
        vfbs->base.base.height = mode_cmd->height;
@@ -804,7 +804,7 @@ static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
        cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
        cmd->body.format.colorDepth = depth;
        cmd->body.format.reserved = 0;
-       cmd->body.bytesPerLine = framebuffer->base.pitch;
+       cmd->body.bytesPerLine = framebuffer->base.pitches[0];
        cmd->body.ptr.gmrId = framebuffer->user_handle;
        cmd->body.ptr.offset = 0;
 
@@ -1056,7 +1056,7 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
        }
 
        vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
-       vfbd->base.base.pitch = mode_cmd->pitch;
+       vfbd->base.base.pitches[0] = mode_cmd->pitch;
        vfbd->base.base.depth = mode_cmd->depth;
        vfbd->base.base.width = mode_cmd->width;
        vfbd->base.base.height = mode_cmd->height;
@@ -1085,7 +1085,7 @@ out_err1:
 
 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
                                                 struct drm_file *file_priv,
-                                                struct drm_mode_fb_cmd *mode_cmd)
+                                                struct drm_mode_fb_cmd2 *mode_cmd2)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -1093,8 +1093,16 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
        struct vmw_surface *surface = NULL;
        struct vmw_dma_buffer *bo = NULL;
        struct ttm_base_object *user_obj;
+       struct drm_mode_fb_cmd mode_cmd;
        int ret;
 
+       mode_cmd.width = mode_cmd2->width;
+       mode_cmd.height = mode_cmd2->height;
+       mode_cmd.pitch = mode_cmd2->pitches[0];
+       mode_cmd.handle = mode_cmd2->handles[0];
+       drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
+                                   &mode_cmd.bpp);
+
        /**
         * This code should be conditioned on Screen Objects not being used.
         * If screen objects are used, we can allocate a GMR to hold the
@@ -1102,8 +1110,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
         */
 
        if (!vmw_kms_validate_mode_vram(dev_priv,
-                                       mode_cmd->pitch,
-                                       mode_cmd->height)) {
+                                       mode_cmd.pitch,
+                                       mode_cmd.height)) {
                DRM_ERROR("VRAM size is too small for requested mode.\n");
                return ERR_PTR(-ENOMEM);
        }
@@ -1117,15 +1125,19 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
         * command stream using user-space handles.
         */
 
-       user_obj = ttm_base_object_lookup(tfile, mode_cmd->handle);
+       user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
        if (unlikely(user_obj == NULL)) {
                DRM_ERROR("Could not locate requested kms frame buffer.\n");
                return ERR_PTR(-ENOENT);
        }
 
+       /**
+        * End conditioned code.
+        */
+
        /* returns either a dmabuf or surface */
        ret = vmw_user_lookup_handle(dev_priv, tfile,
-                                    mode_cmd->handle,
+                                    mode_cmd.handle,
                                     &surface, &bo);
        if (ret)
                goto err_out;
@@ -1133,10 +1145,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
        /* Create the new framebuffer depending one what we got back */
        if (bo)
                ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
-                                                    mode_cmd);
+                                                    &mode_cmd);
        else if (surface)
                ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
-                                                     surface, &vfb, mode_cmd);
+                                                     surface, &vfb, &mode_cmd);
        else
                BUG();
 
@@ -1344,7 +1356,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
        cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
        cmd->body.format.colorDepth = vfb->base.depth;
        cmd->body.format.reserved = 0;
-       cmd->body.bytesPerLine = vfb->base.pitch;
+       cmd->body.bytesPerLine = vfb->base.pitches[0];
        cmd->body.ptr.gmrId = vfb->user_handle;
        cmd->body.ptr.offset = 0;
 
index e1cb8556355fc625946783c30286ba3fb9388762..a4f7f034996a87bc45d805b82022932e44f35d00 100644 (file)
@@ -29,6 +29,7 @@
 #define VMWGFX_KMS_H_
 
 #include "drmP.h"
+#include "drm_crtc_helper.h"
 #include "vmwgfx_drv.h"
 
 #define VMWGFX_NUM_DISPLAY_UNITS 8
index 8f8dbd43c33d0116a43fb1206d93a4512aca2229..f77b184be80735a0219fad3aa4103851b2ca3f5a 100644 (file)
@@ -95,7 +95,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
                        return 0;
                fb = entry->base.crtc.fb;
 
-               return vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
+               return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
                                          fb->bits_per_pixel, fb->depth);
        }
 
@@ -103,7 +103,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
                entry = list_entry(lds->active.next, typeof(*entry), active);
                fb = entry->base.crtc.fb;
 
-               vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch,
+               vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0],
                                   fb->bits_per_pixel, fb->depth);
        }
 
index 1c7f09e268190a5466fdac896e2d617382c8f2d1..a37abb581cbbcbb81f47a0437d78b9a9b123f22a 100644 (file)
@@ -1540,29 +1540,10 @@ out_bad_surface:
 /**
  * Buffer management.
  */
-
-static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
-                                 unsigned long num_pages)
-{
-       static size_t bo_user_size = ~0;
-
-       size_t page_array_size =
-           (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
-
-       if (unlikely(bo_user_size == ~0)) {
-               bo_user_size = glob->ttm_bo_extra_size +
-                   ttm_round_pot(sizeof(struct vmw_dma_buffer));
-       }
-
-       return bo_user_size + page_array_size;
-}
-
 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
 {
        struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-       struct ttm_bo_global *glob = bo->glob;
 
-       ttm_mem_global_free(glob->mem_glob, bo->acc_size);
        kfree(vmw_bo);
 }
 
@@ -1573,24 +1554,12 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
                    void (*bo_free) (struct ttm_buffer_object *bo))
 {
        struct ttm_bo_device *bdev = &dev_priv->bdev;
-       struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
        size_t acc_size;
        int ret;
 
        BUG_ON(!bo_free);
 
-       acc_size =
-           vmw_dmabuf_acc_size(bdev->glob,
-                               (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
-
-       ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
-       if (unlikely(ret != 0)) {
-               /* we must free the bo here as
-                * ttm_buffer_object_init does so as well */
-               bo_free(&vmw_bo->base);
-               return ret;
-       }
-
+       acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
        memset(vmw_bo, 0, sizeof(*vmw_bo));
 
        INIT_LIST_HEAD(&vmw_bo->validate_list);
@@ -1605,9 +1574,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
 {
        struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
-       struct ttm_bo_global *glob = bo->glob;
 
-       ttm_mem_global_free(glob->mem_glob, bo->acc_size);
        kfree(vmw_user_bo);
 }
 
index 22a4a051f221e885d647d6db2e0a0f046074efae..a421abdd1ab7968e696d91f8f1b10ca1ff6a31a2 100644 (file)
@@ -31,6 +31,11 @@ config HID
 
          If unsure, say Y.
 
+config HID_BATTERY_STRENGTH
+       bool
+       depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
+       default y
+
 config HIDRAW
        bool "/dev/hidraw raw HID device support"
        depends on HID
@@ -335,6 +340,7 @@ config HID_MULTITOUCH
          Say Y here if you have one of the following devices:
          - 3M PCT touch screens
          - ActionStar dual touch panels
+         - Atmel panels
          - Cando dual touch panels
          - Chunghwa panels
          - CVTouch panels
@@ -349,12 +355,15 @@ config HID_MULTITOUCH
          - Lumio CrystalTouch panels
          - MosArt dual-touch panels
          - PenMount dual touch panels
+         - PixArt optical touch screen
          - Pixcir dual touch panels
+         - Quanta panels
          - eGalax dual-touch panels, including the Joojoo and Wetab tablets
          - Stantum multitouch panels
          - Touch International Panels
          - Unitec Panels
          - XAT optical touch panels
+         - Xiroku optical touch panels
 
          If unsure, say N.
 
@@ -466,12 +475,6 @@ config HID_PRIMAX
        Support for Primax devices that are not fully compliant with the
        HID standard.
 
-config HID_QUANTA
-       tristate "Quanta Optical Touch panels"
-       depends on USB_HID
-       ---help---
-       Support for Quanta Optical Touch dual-touch panels.
-
 config HID_ROCCAT
        tristate "Roccat special event support"
        depends on USB_HID
@@ -492,6 +495,13 @@ config HID_ROCCAT_ARVO
        ---help---
        Support for Roccat Arvo keyboard.
 
+config HID_ROCCAT_ISKU
+       tristate "Roccat Isku keyboard support"
+       depends on USB_HID
+       depends on HID_ROCCAT
+       ---help---
+       Support for Roccat Isku keyboard.
+
 config HID_ROCCAT_KONE
        tristate "Roccat Kone Mouse support"
        depends on USB_HID
@@ -560,6 +570,12 @@ config GREENASIA_FF
        (like MANTA Warrior MM816 and SpeedLink Strike2 SL-6635) or adapter
        and want to enable force feedback support for it.
 
+config HID_HYPERV_MOUSE
+       tristate "Microsoft Hyper-V mouse driver"
+       depends on HYPERV
+       ---help---
+       Select this option to enable the Hyper-V mouse driver.
+
 config HID_SMARTJOYPLUS
        tristate "SmartJoy PLUS PS2/USB adapter support"
        depends on USB_HID
@@ -620,9 +636,19 @@ config HID_WIIMOTE
        depends on BT_HIDP
        depends on LEDS_CLASS
        select POWER_SUPPLY
+       select INPUT_FF_MEMLESS
        ---help---
        Support for the Nintendo Wii Remote bluetooth device.
 
+config HID_WIIMOTE_EXT
+       bool "Nintendo Wii Remote Extension support"
+       depends on HID_WIIMOTE
+       default HID_WIIMOTE
+       ---help---
+       Support for extension controllers of the Nintendo Wii Remote. Say yes
+       here if you want to use the Nintendo Motion+, Nunchuck or Classic
+       extension controllers with your Wii Remote.
+
 config HID_ZEROPLUS
        tristate "Zeroplus based game controller support"
        depends on USB_HID
index 1e0d2a638b28d71bdf0f0340e1e4bab062f17a62..8aefdc963cce319b41838e7655e036a9b8c5a775 100644 (file)
@@ -25,6 +25,14 @@ ifdef CONFIG_LOGIWHEELS_FF
        hid-logitech-y  += hid-lg4ff.o
 endif
 
+hid-wiimote-y          := hid-wiimote-core.o
+ifdef CONFIG_HID_WIIMOTE_EXT
+       hid-wiimote-y   += hid-wiimote-ext.o
+endif
+ifdef CONFIG_DEBUG_FS
+       hid-wiimote-y   += hid-wiimote-debug.o
+endif
+
 obj-$(CONFIG_HID_A4TECH)       += hid-a4tech.o
 obj-$(CONFIG_HID_ACRUX)                += hid-axff.o
 obj-$(CONFIG_HID_APPLE)                += hid-apple.o
@@ -38,6 +46,7 @@ obj-$(CONFIG_HID_ELECOM)      += hid-elecom.o
 obj-$(CONFIG_HID_EZKEY)                += hid-ezkey.o
 obj-$(CONFIG_HID_GYRATION)     += hid-gyration.o
 obj-$(CONFIG_HID_HOLTEK)       += hid-holtekff.o
+obj-$(CONFIG_HID_HYPERV_MOUSE) += hid-hyperv.o
 obj-$(CONFIG_HID_KENSINGTON)   += hid-kensington.o
 obj-$(CONFIG_HID_KEYTOUCH)     += hid-keytouch.o
 obj-$(CONFIG_HID_KYE)          += hid-kye.o
@@ -51,7 +60,6 @@ obj-$(CONFIG_HID_MULTITOUCH)  += hid-multitouch.o
 obj-$(CONFIG_HID_NTRIG)                += hid-ntrig.o
 obj-$(CONFIG_HID_ORTEK)                += hid-ortek.o
 obj-$(CONFIG_HID_PRODIKEYS)    += hid-prodikeys.o
-obj-$(CONFIG_HID_QUANTA)       += hid-quanta.o
 obj-$(CONFIG_HID_PANTHERLORD)  += hid-pl.o
 obj-$(CONFIG_HID_PETALYNX)     += hid-petalynx.o
 obj-$(CONFIG_HID_PICOLCD)      += hid-picolcd.o
@@ -59,6 +67,7 @@ obj-$(CONFIG_HID_PRIMAX)      += hid-primax.o
 obj-$(CONFIG_HID_ROCCAT)       += hid-roccat.o
 obj-$(CONFIG_HID_ROCCAT_COMMON)        += hid-roccat-common.o
 obj-$(CONFIG_HID_ROCCAT_ARVO)  += hid-roccat-arvo.o
+obj-$(CONFIG_HID_ROCCAT_ISKU)  += hid-roccat-isku.o
 obj-$(CONFIG_HID_ROCCAT_KONE)  += hid-roccat-kone.o
 obj-$(CONFIG_HID_ROCCAT_KONEPLUS)      += hid-roccat-koneplus.o
 obj-$(CONFIG_HID_ROCCAT_KOVAPLUS)      += hid-roccat-kovaplus.o
index af353842f75feaceadeedcc547eeb880519f86df..af08ce7207d972f797d6c2f6d2dbee3c038e0898 100644 (file)
@@ -90,7 +90,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
        struct hid_field *field;
 
        if (report->maxfield == HID_MAX_FIELDS) {
-               dbg_hid("too many fields in report\n");
+               hid_err(report->device, "too many fields in report\n");
                return NULL;
        }
 
@@ -121,7 +121,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
        usage = parser->local.usage[0];
 
        if (parser->collection_stack_ptr == HID_COLLECTION_STACK_SIZE) {
-               dbg_hid("collection stack overflow\n");
+               hid_err(parser->device, "collection stack overflow\n");
                return -1;
        }
 
@@ -129,7 +129,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
                collection = kmalloc(sizeof(struct hid_collection) *
                                parser->device->collection_size * 2, GFP_KERNEL);
                if (collection == NULL) {
-                       dbg_hid("failed to reallocate collection array\n");
+                       hid_err(parser->device, "failed to reallocate collection array\n");
                        return -1;
                }
                memcpy(collection, parser->device->collection,
@@ -165,7 +165,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
 static int close_collection(struct hid_parser *parser)
 {
        if (!parser->collection_stack_ptr) {
-               dbg_hid("collection stack underflow\n");
+               hid_err(parser->device, "collection stack underflow\n");
                return -1;
        }
        parser->collection_stack_ptr--;
@@ -197,7 +197,7 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
 static int hid_add_usage(struct hid_parser *parser, unsigned usage)
 {
        if (parser->local.usage_index >= HID_MAX_USAGES) {
-               dbg_hid("usage index exceeded\n");
+               hid_err(parser->device, "usage index exceeded\n");
                return -1;
        }
        parser->local.usage[parser->local.usage_index] = usage;
@@ -222,12 +222,13 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
 
        report = hid_register_report(parser->device, report_type, parser->global.report_id);
        if (!report) {
-               dbg_hid("hid_register_report failed\n");
+               hid_err(parser->device, "hid_register_report failed\n");
                return -1;
        }
 
        if (parser->global.logical_maximum < parser->global.logical_minimum) {
-               dbg_hid("logical range invalid %d %d\n", parser->global.logical_minimum, parser->global.logical_maximum);
+               hid_err(parser->device, "logical range invalid %d %d\n",
+                               parser->global.logical_minimum, parser->global.logical_maximum);
                return -1;
        }
 
@@ -307,7 +308,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
        case HID_GLOBAL_ITEM_TAG_PUSH:
 
                if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
-                       dbg_hid("global environment stack overflow\n");
+                       hid_err(parser->device, "global environment stack overflow\n");
                        return -1;
                }
 
@@ -318,7 +319,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
        case HID_GLOBAL_ITEM_TAG_POP:
 
                if (!parser->global_stack_ptr) {
-                       dbg_hid("global environment stack underflow\n");
+                       hid_err(parser->device, "global environment stack underflow\n");
                        return -1;
                }
 
@@ -362,8 +363,8 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
 
        case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
                parser->global.report_size = item_udata(item);
-               if (parser->global.report_size > 32) {
-                       dbg_hid("invalid report_size %d\n",
+               if (parser->global.report_size > 96) {
+                       hid_err(parser->device, "invalid report_size %d\n",
                                        parser->global.report_size);
                        return -1;
                }
@@ -372,7 +373,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
        case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
                parser->global.report_count = item_udata(item);
                if (parser->global.report_count > HID_MAX_USAGES) {
-                       dbg_hid("invalid report_count %d\n",
+                       hid_err(parser->device, "invalid report_count %d\n",
                                        parser->global.report_count);
                        return -1;
                }
@@ -381,13 +382,13 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
        case HID_GLOBAL_ITEM_TAG_REPORT_ID:
                parser->global.report_id = item_udata(item);
                if (parser->global.report_id == 0) {
-                       dbg_hid("report_id 0 is invalid\n");
+                       hid_err(parser->device, "report_id 0 is invalid\n");
                        return -1;
                }
                return 0;
 
        default:
-               dbg_hid("unknown global tag 0x%x\n", item->tag);
+               hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
                return -1;
        }
 }
@@ -414,14 +415,14 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
                         * items and the first delimiter set.
                         */
                        if (parser->local.delimiter_depth != 0) {
-                               dbg_hid("nested delimiters\n");
+                               hid_err(parser->device, "nested delimiters\n");
                                return -1;
                        }
                        parser->local.delimiter_depth++;
                        parser->local.delimiter_branch++;
                } else {
                        if (parser->local.delimiter_depth < 1) {
-                               dbg_hid("bogus close delimiter\n");
+                               hid_err(parser->device, "bogus close delimiter\n");
                                return -1;
                        }
                        parser->local.delimiter_depth--;
@@ -506,7 +507,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
                ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
                break;
        default:
-               dbg_hid("unknown main item tag 0x%x\n", item->tag);
+               hid_err(parser->device, "unknown main item tag 0x%x\n", item->tag);
                ret = 0;
        }
 
@@ -678,12 +679,12 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
        while ((start = fetch_item(start, end, &item)) != NULL) {
 
                if (item.format != HID_ITEM_FORMAT_SHORT) {
-                       dbg_hid("unexpected long global item\n");
+                       hid_err(device, "unexpected long global item\n");
                        goto err;
                }
 
                if (dispatch_type[item.type](parser, &item)) {
-                       dbg_hid("item %u %u %u %u parsing failed\n",
+                       hid_err(device, "item %u %u %u %u parsing failed\n",
                                item.format, (unsigned)item.size,
                                (unsigned)item.type, (unsigned)item.tag);
                        goto err;
@@ -691,11 +692,11 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
 
                if (start == end) {
                        if (parser->collection_stack_ptr) {
-                               dbg_hid("unbalanced collection at end of report description\n");
+                               hid_err(device, "unbalanced collection at end of report description\n");
                                goto err;
                        }
                        if (parser->local.delimiter_depth) {
-                               dbg_hid("unbalanced delimiter at end of report description\n");
+                               hid_err(device, "unbalanced delimiter at end of report description\n");
                                goto err;
                        }
                        vfree(parser);
@@ -703,7 +704,7 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
                }
        }
 
-       dbg_hid("item fetching failed at offset %d\n", (int)(end - start));
+       hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
 err:
        vfree(parser);
        return ret;
@@ -873,7 +874,7 @@ static void hid_process_event(struct hid_device *hid, struct hid_field *field,
                ret = hdrv->event(hid, field, usage, value);
                if (ret != 0) {
                        if (ret < 0)
-                               dbg_hid("%s's event failed with %d\n",
+                               hid_err(hid, "%s's event failed with %d\n",
                                                hdrv->name, ret);
                        return;
                }
@@ -995,12 +996,13 @@ int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
        hid_dump_input(field->report->device, field->usage + offset, value);
 
        if (offset >= field->report_count) {
-               dbg_hid("offset (%d) exceeds report_count (%d)\n", offset, field->report_count);
+               hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
+                               offset, field->report_count);
                return -1;
        }
        if (field->logical_minimum < 0) {
                if (value != snto32(s32ton(value, size), size)) {
-                       dbg_hid("value %d is out of range\n", value);
+                       hid_err(field->report->device, "value %d is out of range\n", value);
                        return -1;
                }
        }
@@ -1157,7 +1159,7 @@ static bool hid_match_one_id(struct hid_device *hdev,
                (id->product == HID_ANY_ID || id->product == hdev->product);
 }
 
-static const struct hid_device_id *hid_match_id(struct hid_device *hdev,
+const struct hid_device_id *hid_match_id(struct hid_device *hdev,
                const struct hid_device_id *id)
 {
        for (; id->bus; id++)
@@ -1404,11 +1406,13 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2515) },
        { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
@@ -1423,6 +1427,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT, USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6650) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) },
@@ -1498,11 +1503,15 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
        { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_PCI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
@@ -1544,11 +1553,21 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_XAT, USB_DEVICE_ID_XAT_CSR) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX1) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX1) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR1) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
@@ -1768,6 +1787,7 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
index ee80d733801d30cc4abe2d5909e682835013b551..01dd9a7daf7a7f27d58599ee2d1315210239a7da 100644 (file)
@@ -114,6 +114,14 @@ static const struct hid_usage_entry hid_usage_table[] = {
       {0, 0xbd, "FlareRelease"},
       {0, 0xbe, "LandingGear"},
       {0, 0xbf, "ToeBrake"},
+  {  6, 0, "GenericDeviceControls" },
+      {0, 0x20, "BatteryStrength" },
+      {0, 0x21, "WirelessChannel" },
+      {0, 0x22, "WirelessID" },
+      {0, 0x23, "DiscoverWirelessControl" },
+      {0, 0x24, "SecurityCodeCharacterEntered" },
+      {0, 0x25, "SecurityCodeCharactedErased" },
+      {0, 0x26, "SecurityCodeCleared" },
   {  7, 0, "Keyboard" },
   {  8, 0, "LED" },
       {0, 0x01, "NumLock"},
index 9bdde867a02fbad7a6b163d82babb9376dc1f78f..2630d483d2623a08eaf357fe41bef4bd29b7aaf0 100644 (file)
@@ -140,7 +140,7 @@ err:
 }
 
 static const struct hid_device_id ems_devices[] = {
-       { HID_USB_DEVICE(USB_VENDOR_ID_EMS, 0x118) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, ems_devices);
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
new file mode 100644 (file)
index 0000000..0c33ae9
--- /dev/null
@@ -0,0 +1,586 @@
+/*
+ *  Copyright (c) 2009, Citrix Systems, Inc.
+ *  Copyright (c) 2010, Microsoft Corporation.
+ *  Copyright (c) 2011, Novell Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/completion.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/hiddev.h>
+#include <linux/hyperv.h>
+
+
+struct hv_input_dev_info {
+       unsigned int size;
+       unsigned short vendor;
+       unsigned short product;
+       unsigned short version;
+       unsigned short reserved[11];
+};
+
+/* The maximum size of a synthetic input message. */
+#define SYNTHHID_MAX_INPUT_REPORT_SIZE 16
+
+/*
+ * Current version
+ *
+ * History:
+ * Beta, RC < 2008/1/22        1,0
+ * RC > 2008/1/22              2,0
+ */
+#define SYNTHHID_INPUT_VERSION_MAJOR   2
+#define SYNTHHID_INPUT_VERSION_MINOR   0
+#define SYNTHHID_INPUT_VERSION         (SYNTHHID_INPUT_VERSION_MINOR | \
+                                        (SYNTHHID_INPUT_VERSION_MAJOR << 16))
+
+
+#pragma pack(push, 1)
+/*
+ * Message types in the synthetic input protocol
+ */
+enum synthhid_msg_type {
+       SYNTH_HID_PROTOCOL_REQUEST,
+       SYNTH_HID_PROTOCOL_RESPONSE,
+       SYNTH_HID_INITIAL_DEVICE_INFO,
+       SYNTH_HID_INITIAL_DEVICE_INFO_ACK,
+       SYNTH_HID_INPUT_REPORT,
+       SYNTH_HID_MAX
+};
+
+/*
+ * Basic message structures.
+ */
+struct synthhid_msg_hdr {
+       enum synthhid_msg_type type;
+       u32 size;
+};
+
+struct synthhid_msg {
+       struct synthhid_msg_hdr header;
+       char data[1]; /* Enclosed message */
+};
+
+union synthhid_version {
+       struct {
+               u16 minor_version;
+               u16 major_version;
+       };
+       u32 version;
+};
+
+/*
+ * Protocol messages
+ */
+struct synthhid_protocol_request {
+       struct synthhid_msg_hdr header;
+       union synthhid_version version_requested;
+};
+
+struct synthhid_protocol_response {
+       struct synthhid_msg_hdr header;
+       union synthhid_version version_requested;
+       unsigned char approved;
+};
+
+struct synthhid_device_info {
+       struct synthhid_msg_hdr header;
+       struct hv_input_dev_info hid_dev_info;
+       struct hid_descriptor hid_descriptor;
+};
+
+struct synthhid_device_info_ack {
+       struct synthhid_msg_hdr header;
+       unsigned char reserved;
+};
+
+struct synthhid_input_report {
+       struct synthhid_msg_hdr header;
+       char buffer[1];
+};
+
+#pragma pack(pop)
+
+#define INPUTVSC_SEND_RING_BUFFER_SIZE         (10*PAGE_SIZE)
+#define INPUTVSC_RECV_RING_BUFFER_SIZE         (10*PAGE_SIZE)
+
+
+enum pipe_prot_msg_type {
+       PIPE_MESSAGE_INVALID,
+       PIPE_MESSAGE_DATA,
+       PIPE_MESSAGE_MAXIMUM
+};
+
+
+struct pipe_prt_msg {
+       enum pipe_prot_msg_type type;
+       u32 size;
+       char data[1];
+};
+
+struct  mousevsc_prt_msg {
+       enum pipe_prot_msg_type type;
+       u32 size;
+       union {
+               struct synthhid_protocol_request request;
+               struct synthhid_protocol_response response;
+               struct synthhid_device_info_ack ack;
+       };
+};
+
+/*
+ * Represents an mousevsc device
+ */
+struct mousevsc_dev {
+       struct hv_device        *device;
+       bool                    init_complete;
+       bool                    connected;
+       struct mousevsc_prt_msg protocol_req;
+       struct mousevsc_prt_msg protocol_resp;
+       /* Synchronize the request/response if needed */
+       struct completion       wait_event;
+       int                     dev_info_status;
+
+       struct hid_descriptor   *hid_desc;
+       unsigned char           *report_desc;
+       u32                     report_desc_size;
+       struct hv_input_dev_info hid_dev_info;
+       struct hid_device       *hid_device;
+};
+
+
+static struct mousevsc_dev *mousevsc_alloc_device(struct hv_device *device)
+{
+       struct mousevsc_dev *input_dev;
+
+       input_dev = kzalloc(sizeof(struct mousevsc_dev), GFP_KERNEL);
+
+       if (!input_dev)
+               return NULL;
+
+       input_dev->device = device;
+       hv_set_drvdata(device, input_dev);
+       init_completion(&input_dev->wait_event);
+       input_dev->init_complete = false;
+
+       return input_dev;
+}
+
+static void mousevsc_free_device(struct mousevsc_dev *device)
+{
+       kfree(device->hid_desc);
+       kfree(device->report_desc);
+       hv_set_drvdata(device->device, NULL);
+       kfree(device);
+}
+
+static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
+                               struct synthhid_device_info *device_info)
+{
+       int ret = 0;
+       struct hid_descriptor *desc;
+       struct mousevsc_prt_msg ack;
+
+       input_device->dev_info_status = -ENOMEM;
+
+       input_device->hid_dev_info = device_info->hid_dev_info;
+       desc = &device_info->hid_descriptor;
+       if (desc->bLength == 0)
+               goto cleanup;
+
+       input_device->hid_desc = kzalloc(desc->bLength, GFP_ATOMIC);
+
+       if (!input_device->hid_desc)
+               goto cleanup;
+
+       memcpy(input_device->hid_desc, desc, desc->bLength);
+
+       input_device->report_desc_size = desc->desc[0].wDescriptorLength;
+       if (input_device->report_desc_size == 0) {
+               input_device->dev_info_status = -EINVAL;
+               goto cleanup;
+       }
+
+       input_device->report_desc = kzalloc(input_device->report_desc_size,
+                                         GFP_ATOMIC);
+
+       if (!input_device->report_desc) {
+               input_device->dev_info_status = -ENOMEM;
+               goto cleanup;
+       }
+
+       memcpy(input_device->report_desc,
+              ((unsigned char *)desc) + desc->bLength,
+              desc->desc[0].wDescriptorLength);
+
+       /* Send the ack */
+       memset(&ack, 0, sizeof(struct mousevsc_prt_msg));
+
+       ack.type = PIPE_MESSAGE_DATA;
+       ack.size = sizeof(struct synthhid_device_info_ack);
+
+       ack.ack.header.type = SYNTH_HID_INITIAL_DEVICE_INFO_ACK;
+       ack.ack.header.size = 1;
+       ack.ack.reserved = 0;
+
+       ret = vmbus_sendpacket(input_device->device->channel,
+                       &ack,
+                       sizeof(struct pipe_prt_msg) - sizeof(unsigned char) +
+                       sizeof(struct synthhid_device_info_ack),
+                       (unsigned long)&ack,
+                       VM_PKT_DATA_INBAND,
+                       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+
+       if (!ret)
+               input_device->dev_info_status = 0;
+
+cleanup:
+       complete(&input_device->wait_event);
+
+       return;
+}
+
+static void mousevsc_on_receive(struct hv_device *device,
+                               struct vmpacket_descriptor *packet)
+{
+       struct pipe_prt_msg *pipe_msg;
+       struct synthhid_msg *hid_msg;
+       struct mousevsc_dev *input_dev = hv_get_drvdata(device);
+       struct synthhid_input_report *input_report;
+
+       pipe_msg = (struct pipe_prt_msg *)((unsigned long)packet +
+                                               (packet->offset8 << 3));
+
+       if (pipe_msg->type != PIPE_MESSAGE_DATA)
+               return;
+
+       hid_msg = (struct synthhid_msg *)pipe_msg->data;
+
+       switch (hid_msg->header.type) {
+       case SYNTH_HID_PROTOCOL_RESPONSE:
+               /*
+                * While it will be impossible for us to protect against
+                * malicious/buggy hypervisor/host, add a check here to
+                * ensure we don't corrupt memory.
+                */
+               if ((pipe_msg->size + sizeof(struct pipe_prt_msg)
+                       - sizeof(unsigned char))
+                       > sizeof(struct mousevsc_prt_msg)) {
+                       WARN_ON(1);
+                       break;
+               }
+
+               memcpy(&input_dev->protocol_resp, pipe_msg,
+                      pipe_msg->size + sizeof(struct pipe_prt_msg) -
+                      sizeof(unsigned char));
+               complete(&input_dev->wait_event);
+               break;
+
+       case SYNTH_HID_INITIAL_DEVICE_INFO:
+               WARN_ON(pipe_msg->size < sizeof(struct hv_input_dev_info));
+
+               /*
+                * Parse out the device info into device attr,
+                * hid desc and report desc
+                */
+               mousevsc_on_receive_device_info(input_dev,
+                       (struct synthhid_device_info *)pipe_msg->data);
+               break;
+       case SYNTH_HID_INPUT_REPORT:
+               input_report =
+                       (struct synthhid_input_report *)pipe_msg->data;
+               if (!input_dev->init_complete)
+                       break;
+               hid_input_report(input_dev->hid_device,
+                               HID_INPUT_REPORT, input_report->buffer,
+                               input_report->header.size, 1);
+               break;
+       default:
+               pr_err("unsupported hid msg type - type %d len %d",
+                      hid_msg->header.type, hid_msg->header.size);
+               break;
+       }
+
+}
+
+static void mousevsc_on_channel_callback(void *context)
+{
+       const int packet_size = 0x100;
+       int ret;
+       struct hv_device *device = context;
+       u32 bytes_recvd;
+       u64 req_id;
+       struct vmpacket_descriptor *desc;
+       unsigned char   *buffer;
+       int     bufferlen = packet_size;
+
+       buffer = kmalloc(bufferlen, GFP_ATOMIC);
+       if (!buffer)
+               return;
+
+       do {
+               ret = vmbus_recvpacket_raw(device->channel, buffer,
+                                       bufferlen, &bytes_recvd, &req_id);
+
+               switch (ret) {
+               case 0:
+                       if (bytes_recvd <= 0) {
+                               kfree(buffer);
+                               return;
+                       }
+                       desc = (struct vmpacket_descriptor *)buffer;
+
+                       switch (desc->type) {
+                       case VM_PKT_COMP:
+                               break;
+
+                       case VM_PKT_DATA_INBAND:
+                               mousevsc_on_receive(device, desc);
+                               break;
+
+                       default:
+                               pr_err("unhandled packet type %d, tid %llx len %d\n",
+                                       desc->type, req_id, bytes_recvd);
+                               break;
+                       }
+
+                       break;
+
+               case -ENOBUFS:
+                       kfree(buffer);
+                       /* Handle large packet */
+                       bufferlen = bytes_recvd;
+                       buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
+
+                       if (!buffer)
+                               return;
+
+                       break;
+               }
+       } while (1);
+
+}
+
+static int mousevsc_connect_to_vsp(struct hv_device *device)
+{
+       int ret = 0;
+       int t;
+       struct mousevsc_dev *input_dev = hv_get_drvdata(device);
+       struct mousevsc_prt_msg *request;
+       struct mousevsc_prt_msg *response;
+
+       request = &input_dev->protocol_req;
+       memset(request, 0, sizeof(struct mousevsc_prt_msg));
+
+       request->type = PIPE_MESSAGE_DATA;
+       request->size = sizeof(struct synthhid_protocol_request);
+       request->request.header.type = SYNTH_HID_PROTOCOL_REQUEST;
+       request->request.header.size = sizeof(unsigned int);
+       request->request.version_requested.version = SYNTHHID_INPUT_VERSION;
+
+       ret = vmbus_sendpacket(device->channel, request,
+                               sizeof(struct pipe_prt_msg) -
+                               sizeof(unsigned char) +
+                               sizeof(struct synthhid_protocol_request),
+                               (unsigned long)request,
+                               VM_PKT_DATA_INBAND,
+                               VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+       if (ret)
+               goto cleanup;
+
+       t = wait_for_completion_timeout(&input_dev->wait_event, 5*HZ);
+       if (!t) {
+               ret = -ETIMEDOUT;
+               goto cleanup;
+       }
+
+       response = &input_dev->protocol_resp;
+
+       if (!response->response.approved) {
+               pr_err("synthhid protocol request failed (version %d)\n",
+                      SYNTHHID_INPUT_VERSION);
+               ret = -ENODEV;
+               goto cleanup;
+       }
+
+       t = wait_for_completion_timeout(&input_dev->wait_event, 5*HZ);
+       if (!t) {
+               ret = -ETIMEDOUT;
+               goto cleanup;
+       }
+
+       /*
+        * We should have gotten the device attr, hid desc and report
+        * desc at this point
+        */
+       ret = input_dev->dev_info_status;
+
+cleanup:
+       return ret;
+}
+
+static int mousevsc_hid_open(struct hid_device *hid)
+{
+       return 0;
+}
+
+static int mousevsc_hid_start(struct hid_device *hid)
+{
+       return 0;
+}
+
+static void mousevsc_hid_close(struct hid_device *hid)
+{
+}
+
+static void mousevsc_hid_stop(struct hid_device *hid)
+{
+}
+
+static struct hid_ll_driver mousevsc_ll_driver = {
+       .open = mousevsc_hid_open,
+       .close = mousevsc_hid_close,
+       .start = mousevsc_hid_start,
+       .stop = mousevsc_hid_stop,
+};
+
+static struct hid_driver mousevsc_hid_driver;
+
+static int mousevsc_probe(struct hv_device *device,
+                       const struct hv_vmbus_device_id *dev_id)
+{
+       int ret;
+       struct mousevsc_dev *input_dev;
+       struct hid_device *hid_dev;
+
+       input_dev = mousevsc_alloc_device(device);
+
+       if (!input_dev)
+               return -ENOMEM;
+
+       ret = vmbus_open(device->channel,
+               INPUTVSC_SEND_RING_BUFFER_SIZE,
+               INPUTVSC_RECV_RING_BUFFER_SIZE,
+               NULL,
+               0,
+               mousevsc_on_channel_callback,
+               device
+               );
+
+       if (ret)
+               goto probe_err0;
+
+       ret = mousevsc_connect_to_vsp(device);
+
+       if (ret)
+               goto probe_err1;
+
+       /* workaround SA-167 */
+       if (input_dev->report_desc[14] == 0x25)
+               input_dev->report_desc[14] = 0x29;
+
+       hid_dev = hid_allocate_device();
+       if (IS_ERR(hid_dev)) {
+               ret = PTR_ERR(hid_dev);
+               goto probe_err1;
+       }
+
+       hid_dev->ll_driver = &mousevsc_ll_driver;
+       hid_dev->driver = &mousevsc_hid_driver;
+       hid_dev->bus = BUS_VIRTUAL;
+       hid_dev->vendor = input_dev->hid_dev_info.vendor;
+       hid_dev->product = input_dev->hid_dev_info.product;
+       hid_dev->version = input_dev->hid_dev_info.version;
+       input_dev->hid_device = hid_dev;
+
+       sprintf(hid_dev->name, "%s", "Microsoft Vmbus HID-compliant Mouse");
+
+       ret = hid_add_device(hid_dev);
+       if (ret)
+               goto probe_err1;
+
+       ret = hid_parse_report(hid_dev, input_dev->report_desc,
+                               input_dev->report_desc_size);
+
+       if (ret) {
+               hid_err(hid_dev, "parse failed\n");
+               goto probe_err2;
+       }
+
+       ret = hid_hw_start(hid_dev, HID_CONNECT_HIDINPUT | HID_CONNECT_HIDDEV);
+
+       if (ret) {
+               hid_err(hid_dev, "hw start failed\n");
+               goto probe_err2;
+       }
+
+       input_dev->connected = true;
+       input_dev->init_complete = true;
+
+       return ret;
+
+probe_err2:
+       hid_destroy_device(hid_dev);
+
+probe_err1:
+       vmbus_close(device->channel);
+
+probe_err0:
+       mousevsc_free_device(input_dev);
+
+       return ret;
+}
+
+
+static int mousevsc_remove(struct hv_device *dev)
+{
+       struct mousevsc_dev *input_dev = hv_get_drvdata(dev);
+
+       vmbus_close(dev->channel);
+       hid_destroy_device(input_dev->hid_device);
+       mousevsc_free_device(input_dev);
+
+       return 0;
+}
+
+static const struct hv_vmbus_device_id id_table[] = {
+       /* Mouse guid */
+       { VMBUS_DEVICE(0x9E, 0xB6, 0xA8, 0xCF, 0x4A, 0x5B, 0xc0, 0x4c,
+                      0xB9, 0x8B, 0x8B, 0xA1, 0xA1, 0xF3, 0xF9, 0x5A) },
+       { },
+};
+
+MODULE_DEVICE_TABLE(vmbus, id_table);
+
+static struct  hv_driver mousevsc_drv = {
+       .name = KBUILD_MODNAME,
+       .id_table = id_table,
+       .probe = mousevsc_probe,
+       .remove = mousevsc_remove,
+};
+
+static int __init mousevsc_init(void)
+{
+       return vmbus_driver_register(&mousevsc_drv);
+}
+
+static void __exit mousevsc_exit(void)
+{
+       vmbus_driver_unregister(&mousevsc_drv);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(HV_DRV_VERSION);
+module_init(mousevsc_init);
+module_exit(mousevsc_exit);
index 4a441a6f996748a923204fbcb53c4850eb3af061..b8574cddd95352a360ef7ff67200527185b3c1fb 100644 (file)
@@ -21,6 +21,7 @@
 #define USB_VENDOR_ID_3M               0x0596
 #define USB_DEVICE_ID_3M1968           0x0500
 #define USB_DEVICE_ID_3M2256           0x0502
+#define USB_DEVICE_ID_3M3266           0x0506
 
 #define USB_VENDOR_ID_A4TECH           0x09da
 #define USB_DEVICE_ID_A4TECH_WCP32PU   0x0006
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI  0x0239
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO   0x023a
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS   0x023b
+#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI  0x0255
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO   0x0256
 #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY   0x030a
 #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY    0x030b
 #define USB_DEVICE_ID_ATEN_4PORTKVM    0x2205
 #define USB_DEVICE_ID_ATEN_4PORTKVMC   0x2208
 
+#define USB_VENDOR_ID_ATMEL            0x03eb
+#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
+
 #define USB_VENDOR_ID_AVERMEDIA                0x07ca
 #define USB_DEVICE_ID_AVER_FM_MR800    0xb800
 
 
 #define USB_VENDOR_ID_DWAV             0x0eef
 #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER   0x0001
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH   0x480d
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1  0x720c
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2  0x72a1
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3  0x480e
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4  0x726b
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D      0x480d
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E      0x480e
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C      0x720c
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B      0x726b
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1      0x72a1
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA      0x72fa
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302      0x7302
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001      0xa001
 
 #define USB_VENDOR_ID_ELECOM           0x056e
 #define USB_DEVICE_ID_ELECOM_BM084     0x0061
 #define USB_VENDOR_ID_HANVON           0x20b3
 #define USB_DEVICE_ID_HANVON_MULTITOUCH        0x0a18
 
+#define USB_VENDOR_ID_HANVON_ALT       0x22ed
+#define USB_DEVICE_ID_HANVON_ALT_MULTITOUCH    0x1010
+
 #define USB_VENDOR_ID_HAPP             0x078b
 #define USB_DEVICE_ID_UGCI_DRIVING     0x0010
 #define USB_DEVICE_ID_UGCI_FLYING      0x0020
 #define USB_VENDOR_ID_PI_ENGINEERING   0x05f3
 #define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL 0xff
 
+#define USB_VENDOR_ID_PIXART                           0x093a
+#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN      0x8001
+#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1     0x8002
+#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2     0x8003
+
 #define USB_VENDOR_ID_PLAYDOTCOM       0x0b43
 #define USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII     0x0003
 
 #define USB_DEVICE_ID_PRODIGE_CORDLESS 0x3062
 
 #define USB_VENDOR_ID_QUANTA           0x0408
-#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH     0x3000
+#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH             0x3000
+#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001                0x3001
+#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008                0x3008
 #define USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN  0x3001
 
 #define USB_VENDOR_ID_ROCCAT           0x1e7d
 #define USB_DEVICE_ID_ROCCAT_ARVO      0x30d4
+#define USB_DEVICE_ID_ROCCAT_ISKU      0x319c
 #define USB_DEVICE_ID_ROCCAT_KONE      0x2ced
 #define USB_DEVICE_ID_ROCCAT_KONEPLUS  0x2d51
 #define USB_DEVICE_ID_ROCCAT_KOVAPLUS  0x2d50
 
 #define USB_VENDOR_ID_WACOM            0x056a
 #define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81
+#define USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH   0x00BD
 
 #define USB_VENDOR_ID_WALTOP                           0x172f
 #define USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH      0x0032
 #define USB_VENDOR_ID_XAT      0x2505
 #define USB_DEVICE_ID_XAT_CSR  0x0220
 
+#define USB_VENDOR_ID_XIROKU           0x1477
+#define USB_DEVICE_ID_XIROKU_SPX       0x1006
+#define USB_DEVICE_ID_XIROKU_MPX       0x1007
+#define USB_DEVICE_ID_XIROKU_CSR       0x100e
+#define USB_DEVICE_ID_XIROKU_SPX1      0x1021
+#define USB_DEVICE_ID_XIROKU_CSR1      0x1022
+#define USB_DEVICE_ID_XIROKU_MPX1      0x1023
+#define USB_DEVICE_ID_XIROKU_SPX2      0x1024
+#define USB_DEVICE_ID_XIROKU_CSR2      0x1025
+#define USB_DEVICE_ID_XIROKU_MPX2      0x1026
+
 #define USB_VENDOR_ID_YEALINK          0x6993
 #define USB_DEVICE_ID_YEALINK_P1K_P4K_B2K      0xb001
 
index f333139d1a4823b325f4ab6c5063cf926198ce26..9333d692a786b03df115ebe87d920e748b607dc6 100644 (file)
@@ -32,6 +32,8 @@
 #include <linux/hid.h>
 #include <linux/hid-debug.h>
 
+#include "hid-ids.h"
+
 #define unk    KEY_UNKNOWN
 
 static const unsigned char hid_keyboard[256] = {
@@ -271,6 +273,161 @@ static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
        return logical_extents / physical_extents;
 }
 
+#ifdef CONFIG_HID_BATTERY_STRENGTH
+static enum power_supply_property hidinput_battery_props[] = {
+       POWER_SUPPLY_PROP_PRESENT,
+       POWER_SUPPLY_PROP_ONLINE,
+       POWER_SUPPLY_PROP_CAPACITY,
+       POWER_SUPPLY_PROP_MODEL_NAME,
+       POWER_SUPPLY_PROP_STATUS
+};
+
+#define HID_BATTERY_QUIRK_PERCENT      (1 << 0) /* always reports percent */
+#define HID_BATTERY_QUIRK_FEATURE      (1 << 1) /* ask for feature report */
+
+static const struct hid_device_id hid_battery_quirks[] = {
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+                              USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
+         HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+       {}
+};
+
+static unsigned find_battery_quirk(struct hid_device *hdev)
+{
+       unsigned quirks = 0;
+       const struct hid_device_id *match;
+
+       match = hid_match_id(hdev, hid_battery_quirks);
+       if (match != NULL)
+               quirks = match->driver_data;
+
+       return quirks;
+}
+
+static int hidinput_get_battery_property(struct power_supply *psy,
+                                        enum power_supply_property prop,
+                                        union power_supply_propval *val)
+{
+       struct hid_device *dev = container_of(psy, struct hid_device, battery);
+       int ret = 0;
+       __u8 buf[2] = {};
+
+       switch (prop) {
+       case POWER_SUPPLY_PROP_PRESENT:
+       case POWER_SUPPLY_PROP_ONLINE:
+               val->intval = 1;
+               break;
+
+       case POWER_SUPPLY_PROP_CAPACITY:
+               ret = dev->hid_get_raw_report(dev, dev->battery_report_id,
+                                             buf, sizeof(buf),
+                                             dev->battery_report_type);
+
+               if (ret != 2) {
+                       if (ret >= 0)
+                               ret = -EINVAL;
+                       break;
+               }
+
+               if (dev->battery_min < dev->battery_max &&
+                   buf[1] >= dev->battery_min &&
+                   buf[1] <= dev->battery_max)
+                       val->intval = (100 * (buf[1] - dev->battery_min)) /
+                               (dev->battery_max - dev->battery_min);
+               break;
+
+       case POWER_SUPPLY_PROP_MODEL_NAME:
+               val->strval = dev->name;
+               break;
+
+       case POWER_SUPPLY_PROP_STATUS:
+               val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+               break;
+
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type, struct hid_field *field)
+{
+       struct power_supply *battery = &dev->battery;
+       int ret;
+       unsigned quirks;
+       s32 min, max;
+
+       if (field->usage->hid != HID_DC_BATTERYSTRENGTH)
+               return false;   /* no match */
+
+       if (battery->name != NULL)
+               goto out;       /* already initialized? */
+
+       battery->name = kasprintf(GFP_KERNEL, "hid-%s-battery", dev->uniq);
+       if (battery->name == NULL)
+               goto out;
+
+       battery->type = POWER_SUPPLY_TYPE_BATTERY;
+       battery->properties = hidinput_battery_props;
+       battery->num_properties = ARRAY_SIZE(hidinput_battery_props);
+       battery->use_for_apm = 0;
+       battery->get_property = hidinput_get_battery_property;
+
+       quirks = find_battery_quirk(dev);
+
+       hid_dbg(dev, "device %x:%x:%x %d quirks %d\n",
+               dev->bus, dev->vendor, dev->product, dev->version, quirks);
+
+       min = field->logical_minimum;
+       max = field->logical_maximum;
+
+       if (quirks & HID_BATTERY_QUIRK_PERCENT) {
+               min = 0;
+               max = 100;
+       }
+
+       if (quirks & HID_BATTERY_QUIRK_FEATURE)
+               report_type = HID_FEATURE_REPORT;
+
+       dev->battery_min = min;
+       dev->battery_max = max;
+       dev->battery_report_type = report_type;
+       dev->battery_report_id = field->report->id;
+
+       ret = power_supply_register(&dev->dev, battery);
+       if (ret != 0) {
+               hid_warn(dev, "can't register power supply: %d\n", ret);
+               kfree(battery->name);
+               battery->name = NULL;
+       }
+
+out:
+       return true;
+}
+
+static void hidinput_cleanup_battery(struct hid_device *dev)
+{
+       if (!dev->battery.name)
+               return;
+
+       power_supply_unregister(&dev->battery);
+       kfree(dev->battery.name);
+       dev->battery.name = NULL;
+}
+#else  /* !CONFIG_HID_BATTERY_STRENGTH */
+static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
+                                  struct hid_field *field)
+{
+       return false;
+}
+
+static void hidinput_cleanup_battery(struct hid_device *dev)
+{
+}
+#endif /* CONFIG_HID_BATTERY_STRENGTH */
+
 static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_field *field,
                                     struct hid_usage *usage)
 {
@@ -629,6 +786,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                }
                break;
 
+       case HID_UP_GENDEVCTRLS:
+               if (hidinput_setup_battery(device, HID_INPUT_REPORT, field))
+                       goto ignore;
+               else
+                       goto unknown;
+               break;
+
        case HID_UP_HPVENDOR:   /* Reported on a Dutch layout HP5308 */
                set_bit(EV_REP, input->evbit);
                switch (usage->hid & HID_USAGE) {
@@ -822,6 +986,12 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
                return;
        }
 
+       /* Ignore out-of-range values as per HID specification, section 5.10 */
+       if (value < field->logical_minimum || value > field->logical_maximum) {
+               dbg_hid("Ignoring out-of-range value %x\n", value);
+               return;
+       }
+
        /* report the usage code as scancode if the key status has changed */
        if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
                input_event(input, EV_MSC, MSC_SCAN, usage->hid);
@@ -861,6 +1031,48 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int
 }
 EXPORT_SYMBOL_GPL(hidinput_find_field);
 
+struct hid_field *hidinput_get_led_field(struct hid_device *hid)
+{
+       struct hid_report *report;
+       struct hid_field *field;
+       int i, j;
+
+       list_for_each_entry(report,
+                           &hid->report_enum[HID_OUTPUT_REPORT].report_list,
+                           list) {
+               for (i = 0; i < report->maxfield; i++) {
+                       field = report->field[i];
+                       for (j = 0; j < field->maxusage; j++)
+                               if (field->usage[j].type == EV_LED)
+                                       return field;
+               }
+       }
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(hidinput_get_led_field);
+
+unsigned int hidinput_count_leds(struct hid_device *hid)
+{
+       struct hid_report *report;
+       struct hid_field *field;
+       int i, j;
+       unsigned int count = 0;
+
+       list_for_each_entry(report,
+                           &hid->report_enum[HID_OUTPUT_REPORT].report_list,
+                           list) {
+               for (i = 0; i < report->maxfield; i++) {
+                       field = report->field[i];
+                       for (j = 0; j < field->maxusage; j++)
+                               if (field->usage[j].type == EV_LED &&
+                                   field->value[j])
+                                       count += 1;
+               }
+       }
+       return count;
+}
+EXPORT_SYMBOL_GPL(hidinput_count_leds);
+
 static int hidinput_open(struct input_dev *dev)
 {
        struct hid_device *hid = input_get_drvdata(dev);
@@ -882,15 +1094,17 @@ static void report_features(struct hid_device *hid)
        struct hid_report *rep;
        int i, j;
 
-       if (!drv->feature_mapping)
-               return;
-
        rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
        list_for_each_entry(rep, &rep_enum->report_list, list)
                for (i = 0; i < rep->maxfield; i++)
-                       for (j = 0; j < rep->field[i]->maxusage; j++)
-                               drv->feature_mapping(hid, rep->field[i],
-                                                    rep->field[i]->usage + j);
+                       for (j = 0; j < rep->field[i]->maxusage; j++) {
+                               /* Verify if Battery Strength feature is available */
+                               hidinput_setup_battery(hid, HID_FEATURE_REPORT, rep->field[i]);
+
+                               if (drv->feature_mapping)
+                                       drv->feature_mapping(hid, rep->field[i],
+                                                            rep->field[i]->usage + j);
+                       }
 }
 
 /*
@@ -1010,6 +1224,8 @@ void hidinput_disconnect(struct hid_device *hid)
 {
        struct hid_input *hidinput, *next;
 
+       hidinput_cleanup_battery(hid);
+
        list_for_each_entry_safe(hidinput, next, &hid->inputs, list) {
                list_del(&hidinput->list);
                input_unregister_device(hidinput->input);
index 103f30d93f76adb154518f5656bba6bbd98e24e5..6ecc9e2204409c1451e90086a4edd7f0639cdb01 100644 (file)
@@ -430,7 +430,7 @@ int lg4ff_init(struct hid_device *hid)
        }
 
        /* Add the device to device_list */
-       entry = (struct lg4ff_device_entry *)kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL);
+       entry = kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL);
        if (!entry) {
                hid_err(hid, "Cannot add device, insufficient memory.\n");
                return -ENOMEM;
index f1c909f1b23998f91d9b7b76d8cf31e53f65aadd..24fc4423b9373f749dab0f743f62692280d7db24 100644 (file)
@@ -50,7 +50,6 @@ MODULE_LICENSE("GPL");
 #define MT_QUIRK_ALWAYS_VALID          (1 << 4)
 #define MT_QUIRK_VALID_IS_INRANGE      (1 << 5)
 #define MT_QUIRK_VALID_IS_CONFIDENCE   (1 << 6)
-#define MT_QUIRK_EGALAX_XYZ_FIXUP      (1 << 7)
 #define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE   (1 << 8)
 
 struct mt_slot {
@@ -60,9 +59,19 @@ struct mt_slot {
        bool seen_in_this_frame;/* has this slot been updated */
 };
 
+struct mt_class {
+       __s32 name;     /* MT_CLS */
+       __s32 quirks;
+       __s32 sn_move;  /* Signal/noise ratio for move events */
+       __s32 sn_width; /* Signal/noise ratio for width events */
+       __s32 sn_height;        /* Signal/noise ratio for height events */
+       __s32 sn_pressure;      /* Signal/noise ratio for pressure events */
+       __u8 maxcontacts;
+};
+
 struct mt_device {
        struct mt_slot curdata; /* placeholder of incoming data */
-       struct mt_class *mtclass;       /* our mt device class */
+       struct mt_class mtclass;        /* our mt device class */
        unsigned last_field_index;      /* last field index of the report */
        unsigned last_slot_field;       /* the last field of a slot */
        int last_mt_collection; /* last known mt-related collection */
@@ -74,30 +83,23 @@ struct mt_device {
        struct mt_slot *slots;
 };
 
-struct mt_class {
-       __s32 name;     /* MT_CLS */
-       __s32 quirks;
-       __s32 sn_move;  /* Signal/noise ratio for move events */
-       __s32 sn_width; /* Signal/noise ratio for width events */
-       __s32 sn_height;        /* Signal/noise ratio for height events */
-       __s32 sn_pressure;      /* Signal/noise ratio for pressure events */
-       __u8 maxcontacts;
-};
-
 /* classes of device behavior */
 #define MT_CLS_DEFAULT                         0x0001
 
 #define MT_CLS_SERIAL                          0x0002
 #define MT_CLS_CONFIDENCE                      0x0003
-#define MT_CLS_CONFIDENCE_MINUS_ONE            0x0004
-#define MT_CLS_DUAL_INRANGE_CONTACTID          0x0005
-#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER      0x0006
-#define MT_CLS_DUAL_NSMU_CONTACTID             0x0007
+#define MT_CLS_CONFIDENCE_CONTACT_ID           0x0004
+#define MT_CLS_CONFIDENCE_MINUS_ONE            0x0005
+#define MT_CLS_DUAL_INRANGE_CONTACTID          0x0006
+#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER      0x0007
+#define MT_CLS_DUAL_NSMU_CONTACTID             0x0008
+#define MT_CLS_INRANGE_CONTACTNUMBER           0x0009
 
 /* vendor specific classes */
 #define MT_CLS_3M                              0x0101
 #define MT_CLS_CYPRESS                         0x0102
 #define MT_CLS_EGALAX                          0x0103
+#define MT_CLS_EGALAX_SERIAL                   0x0104
 
 #define MT_DEFAULT_MAXCONTACT  10
 
@@ -133,13 +135,16 @@ static int find_slot_from_contactid(struct mt_device *td)
        return -1;
 }
 
-struct mt_class mt_classes[] = {
+static struct mt_class mt_classes[] = {
        { .name = MT_CLS_DEFAULT,
                .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP },
        { .name = MT_CLS_SERIAL,
                .quirks = MT_QUIRK_ALWAYS_VALID},
        { .name = MT_CLS_CONFIDENCE,
                .quirks = MT_QUIRK_VALID_IS_CONFIDENCE },
+       { .name = MT_CLS_CONFIDENCE_CONTACT_ID,
+               .quirks = MT_QUIRK_VALID_IS_CONFIDENCE |
+                       MT_QUIRK_SLOT_IS_CONTACTID },
        { .name = MT_CLS_CONFIDENCE_MINUS_ONE,
                .quirks = MT_QUIRK_VALID_IS_CONFIDENCE |
                        MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE },
@@ -155,6 +160,9 @@ struct mt_class mt_classes[] = {
                .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
                        MT_QUIRK_SLOT_IS_CONTACTID,
                .maxcontacts = 2 },
+       { .name = MT_CLS_INRANGE_CONTACTNUMBER,
+               .quirks = MT_QUIRK_VALID_IS_INRANGE |
+                       MT_QUIRK_SLOT_IS_CONTACTNUMBER },
 
        /*
         * vendor specific classes
@@ -171,9 +179,13 @@ struct mt_class mt_classes[] = {
                .maxcontacts = 10 },
        { .name = MT_CLS_EGALAX,
                .quirks =  MT_QUIRK_SLOT_IS_CONTACTID |
-                       MT_QUIRK_VALID_IS_INRANGE |
-                       MT_QUIRK_EGALAX_XYZ_FIXUP,
-               .maxcontacts = 2,
+                       MT_QUIRK_VALID_IS_INRANGE,
+               .sn_move = 4096,
+               .sn_pressure = 32,
+       },
+       { .name = MT_CLS_EGALAX_SERIAL,
+               .quirks =  MT_QUIRK_SLOT_IS_CONTACTID |
+                       MT_QUIRK_ALWAYS_VALID,
                .sn_move = 4096,
                .sn_pressure = 32,
        },
@@ -181,6 +193,44 @@ struct mt_class mt_classes[] = {
        { }
 };
 
+static ssize_t mt_show_quirks(struct device *dev,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct mt_device *td = hid_get_drvdata(hdev);
+
+       return sprintf(buf, "%u\n", td->mtclass.quirks);
+}
+
+static ssize_t mt_set_quirks(struct device *dev,
+                         struct device_attribute *attr,
+                         const char *buf, size_t count)
+{
+       struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+       struct mt_device *td = hid_get_drvdata(hdev);
+
+       unsigned long val;
+
+       if (kstrtoul(buf, 0, &val))
+               return -EINVAL;
+
+       td->mtclass.quirks = val;
+
+       return count;
+}
+
+static DEVICE_ATTR(quirks, S_IWUSR | S_IRUGO, mt_show_quirks, mt_set_quirks);
+
+static struct attribute *sysfs_attrs[] = {
+       &dev_attr_quirks.attr,
+       NULL
+};
+
+static struct attribute_group mt_attribute_group = {
+       .attrs = sysfs_attrs
+};
+
 static void mt_feature_mapping(struct hid_device *hdev,
                struct hid_field *field, struct hid_usage *usage)
 {
@@ -192,9 +242,9 @@ static void mt_feature_mapping(struct hid_device *hdev,
                break;
        case HID_DG_CONTACTMAX:
                td->maxcontacts = field->value[0];
-               if (td->mtclass->maxcontacts)
+               if (td->mtclass.maxcontacts)
                        /* check if the maxcontacts is given by the class */
-                       td->maxcontacts = td->mtclass->maxcontacts;
+                       td->maxcontacts = td->mtclass.maxcontacts;
 
                break;
        }
@@ -214,8 +264,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                unsigned long **bit, int *max)
 {
        struct mt_device *td = hid_get_drvdata(hdev);
-       struct mt_class *cls = td->mtclass;
-       __s32 quirks = cls->quirks;
+       struct mt_class *cls = &td->mtclass;
 
        /* Only map fields from TouchScreen or TouchPad collections.
          * We need to ignore fields that belong to other collections
@@ -227,13 +276,17 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
        else
                return 0;
 
+       /* eGalax devices provide a Digitizer.Stylus input which overrides
+        * the correct Digitizers.Finger X/Y ranges.
+        * Let's just ignore this input. */
+       if (field->physical == HID_DG_STYLUS)
+               return -1;
+
        switch (usage->hid & HID_USAGE_PAGE) {
 
        case HID_UP_GENDESK:
                switch (usage->hid) {
                case HID_GD_X:
-                       if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
-                               field->logical_maximum = 32760;
                        hid_map_usage(hi, usage, bit, max,
                                        EV_ABS, ABS_MT_POSITION_X);
                        set_abs(hi->input, ABS_MT_POSITION_X, field,
@@ -246,8 +299,6 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                        }
                        return 1;
                case HID_GD_Y:
-                       if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
-                               field->logical_maximum = 32760;
                        hid_map_usage(hi, usage, bit, max,
                                        EV_ABS, ABS_MT_POSITION_Y);
                        set_abs(hi->input, ABS_MT_POSITION_Y, field,
@@ -315,8 +366,6 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                        }
                        return 1;
                case HID_DG_TIPPRESSURE:
-                       if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
-                               field->logical_minimum = 0;
                        hid_map_usage(hi, usage, bit, max,
                                        EV_ABS, ABS_MT_PRESSURE);
                        set_abs(hi->input, ABS_MT_PRESSURE, field,
@@ -363,7 +412,7 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi,
 
 static int mt_compute_slot(struct mt_device *td)
 {
-       __s32 quirks = td->mtclass->quirks;
+       __s32 quirks = td->mtclass.quirks;
 
        if (quirks & MT_QUIRK_SLOT_IS_CONTACTID)
                return td->curdata.contactid;
@@ -407,7 +456,7 @@ static void mt_emit_event(struct mt_device *td, struct input_dev *input)
 
        for (i = 0; i < td->maxcontacts; ++i) {
                struct mt_slot *s = &(td->slots[i]);
-               if ((td->mtclass->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) &&
+               if ((td->mtclass.quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) &&
                        !s->seen_in_this_frame) {
                        s->touch_state = false;
                }
@@ -444,7 +493,7 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
                                struct hid_usage *usage, __s32 value)
 {
        struct mt_device *td = hid_get_drvdata(hid);
-       __s32 quirks = td->mtclass->quirks;
+       __s32 quirks = td->mtclass.quirks;
 
        if (hid->claimed & HID_CLAIMED_INPUT && td->slots) {
                switch (usage->hid) {
@@ -552,7 +601,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
                dev_err(&hdev->dev, "cannot allocate multitouch data\n");
                return -ENOMEM;
        }
-       td->mtclass = mtclass;
+       td->mtclass = *mtclass;
        td->inputmode = -1;
        td->last_mt_collection = -1;
        hid_set_drvdata(hdev, td);
@@ -574,6 +623,8 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
                goto fail;
        }
 
+       ret = sysfs_create_group(&hdev->dev.kobj, &mt_attribute_group);
+
        mt_set_input_mode(hdev);
 
        return 0;
@@ -594,6 +645,7 @@ static int mt_reset_resume(struct hid_device *hdev)
 static void mt_remove(struct hid_device *hdev)
 {
        struct mt_device *td = hid_get_drvdata(hdev);
+       sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
        hid_hw_stop(hdev);
        kfree(td->slots);
        kfree(td);
@@ -609,12 +661,20 @@ static const struct hid_device_id mt_devices[] = {
        { .driver_data = MT_CLS_3M,
                HID_USB_DEVICE(USB_VENDOR_ID_3M,
                        USB_DEVICE_ID_3M2256) },
+       { .driver_data = MT_CLS_3M,
+               HID_USB_DEVICE(USB_VENDOR_ID_3M,
+                       USB_DEVICE_ID_3M3266) },
 
        /* ActionStar panels */
        { .driver_data = MT_CLS_DEFAULT,
                HID_USB_DEVICE(USB_VENDOR_ID_ACTIONSTAR,
                        USB_DEVICE_ID_ACTIONSTAR_1011) },
 
+       /* Atmel panels */
+       { .driver_data = MT_CLS_SERIAL,
+               HID_USB_DEVICE(USB_VENDOR_ID_ATMEL,
+                       USB_DEVICE_ID_ATMEL_MULTITOUCH) },
+
        /* Cando panels */
        { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
                HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
@@ -645,23 +705,32 @@ static const struct hid_device_id mt_devices[] = {
                        USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
 
        /* eGalax devices (resistive) */
-       {  .driver_data = MT_CLS_EGALAX,
+       { .driver_data = MT_CLS_EGALAX,
                HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
-                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
-       {  .driver_data = MT_CLS_EGALAX,
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
+       { .driver_data = MT_CLS_EGALAX,
                HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
-                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
 
        /* eGalax devices (capacitive) */
-       {  .driver_data = MT_CLS_EGALAX,
+       { .driver_data = MT_CLS_EGALAX,
                HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
-                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
-       {  .driver_data = MT_CLS_EGALAX,
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
+       { .driver_data = MT_CLS_EGALAX,
                HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
-                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
-       {  .driver_data = MT_CLS_EGALAX,
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
+       { .driver_data = MT_CLS_EGALAX,
                HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
-                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
+       { .driver_data = MT_CLS_EGALAX,
+               HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA) },
+       { .driver_data = MT_CLS_EGALAX,
+               HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
+       { .driver_data = MT_CLS_EGALAX_SERIAL,
+               HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
 
        /* Elo TouchSystems IntelliTouch Plus panel */
        { .driver_data = MT_CLS_DUAL_NSMU_CONTACTID,
@@ -678,6 +747,11 @@ static const struct hid_device_id mt_devices[] = {
                HID_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
                        USB_DEVICE_ID_GOODTOUCH_000f) },
 
+       /* Hanvon panels */
+       { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
+               HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT,
+                       USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
+
        /* Ideacom panel */
        { .driver_data = MT_CLS_SERIAL,
                HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM,
@@ -722,6 +796,17 @@ static const struct hid_device_id mt_devices[] = {
                HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT,
                        USB_DEVICE_ID_PENMOUNT_PCI) },
 
+       /* PixArt optical touch screen */
+       { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
+               HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
+                       USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN) },
+       { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
+               HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
+                       USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1) },
+       { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
+               HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
+                       USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2) },
+
        /* PixCir-based panels */
        { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
                HID_USB_DEVICE(USB_VENDOR_ID_HANVON,
@@ -730,6 +815,17 @@ static const struct hid_device_id mt_devices[] = {
                HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
                        USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) },
 
+       /* Quanta-based panels */
+       { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID,
+               HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+                       USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
+       { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID,
+               HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+                       USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001) },
+       { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID,
+               HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+                       USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008) },
+
        /* Stantum panels */
        { .driver_data = MT_CLS_CONFIDENCE,
                HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
@@ -758,6 +854,35 @@ static const struct hid_device_id mt_devices[] = {
                HID_USB_DEVICE(USB_VENDOR_ID_XAT,
                        USB_DEVICE_ID_XAT_CSR) },
 
+       /* Xiroku */
+       { .driver_data = MT_CLS_DEFAULT,
+               HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+                       USB_DEVICE_ID_XIROKU_SPX) },
+       { .driver_data = MT_CLS_DEFAULT,
+               HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+                       USB_DEVICE_ID_XIROKU_MPX) },
+       { .driver_data = MT_CLS_DEFAULT,
+               HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+                       USB_DEVICE_ID_XIROKU_CSR) },
+       { .driver_data = MT_CLS_DEFAULT,
+               HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+                       USB_DEVICE_ID_XIROKU_SPX1) },
+       { .driver_data = MT_CLS_DEFAULT,
+               HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+                       USB_DEVICE_ID_XIROKU_MPX1) },
+       { .driver_data = MT_CLS_DEFAULT,
+               HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+                       USB_DEVICE_ID_XIROKU_CSR1) },
+       { .driver_data = MT_CLS_DEFAULT,
+               HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+                       USB_DEVICE_ID_XIROKU_SPX2) },
+       { .driver_data = MT_CLS_DEFAULT,
+               HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+                       USB_DEVICE_ID_XIROKU_MPX2) },
+       { .driver_data = MT_CLS_DEFAULT,
+               HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+                       USB_DEVICE_ID_XIROKU_CSR2) },
+
        { }
 };
 MODULE_DEVICE_TABLE(hid, mt_devices);
index 01e7d2cd7c26a55170d2f7eda4e0a83cb37cb55d..12f9777c385d4038825d2fddcd59a18182e13b3c 100644 (file)
@@ -633,7 +633,7 @@ struct picolcd_fb_cleanup_item {
        struct picolcd_fb_cleanup_item *next;
 };
 static struct picolcd_fb_cleanup_item *fb_pending;
-DEFINE_SPINLOCK(fb_pending_lock);
+static DEFINE_SPINLOCK(fb_pending_lock);
 
 static void picolcd_fb_do_cleanup(struct work_struct *data)
 {
@@ -658,7 +658,7 @@ static void picolcd_fb_do_cleanup(struct work_struct *data)
        } while (item);
 }
 
-DECLARE_WORK(picolcd_fb_cleanup, picolcd_fb_do_cleanup);
+static DECLARE_WORK(picolcd_fb_cleanup, picolcd_fb_do_cleanup);
 
 static int picolcd_fb_open(struct fb_info *info, int u)
 {
diff --git a/drivers/hid/hid-quanta.c b/drivers/hid/hid-quanta.c
deleted file mode 100644 (file)
index 87a54df..0000000
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- *  HID driver for Quanta Optical Touch dual-touch panels
- *
- *  Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr>
- *
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <linux/device.h>
-#include <linux/hid.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
-MODULE_DESCRIPTION("Quanta dual-touch panel");
-MODULE_LICENSE("GPL");
-
-#include "hid-ids.h"
-
-struct quanta_data {
-       __u16 x, y;
-       __u8 id;
-       bool valid;             /* valid finger data, or just placeholder? */
-       bool first;             /* is this the first finger in this frame? */
-       bool activity_now;      /* at least one active finger in this frame? */
-       bool activity;          /* at least one active finger previously? */
-};
-
-static int quanta_input_mapping(struct hid_device *hdev, struct hid_input *hi,
-               struct hid_field *field, struct hid_usage *usage,
-               unsigned long **bit, int *max)
-{
-       switch (usage->hid & HID_USAGE_PAGE) {
-
-       case HID_UP_GENDESK:
-               switch (usage->hid) {
-               case HID_GD_X:
-                       hid_map_usage(hi, usage, bit, max,
-                                       EV_ABS, ABS_MT_POSITION_X);
-                       /* touchscreen emulation */
-                       input_set_abs_params(hi->input, ABS_X,
-                                               field->logical_minimum,
-                                               field->logical_maximum, 0, 0);
-                       return 1;
-               case HID_GD_Y:
-                       hid_map_usage(hi, usage, bit, max,
-                                       EV_ABS, ABS_MT_POSITION_Y);
-                       /* touchscreen emulation */
-                       input_set_abs_params(hi->input, ABS_Y,
-                                               field->logical_minimum,
-                                               field->logical_maximum, 0, 0);
-                       return 1;
-               }
-               return 0;
-
-       case HID_UP_DIGITIZER:
-               switch (usage->hid) {
-               case HID_DG_CONFIDENCE:
-               case HID_DG_TIPSWITCH:
-               case HID_DG_INPUTMODE:
-               case HID_DG_DEVICEINDEX:
-               case HID_DG_CONTACTCOUNT:
-               case HID_DG_CONTACTMAX:
-               case HID_DG_TIPPRESSURE:
-               case HID_DG_WIDTH:
-               case HID_DG_HEIGHT:
-                       return -1;
-               case HID_DG_INRANGE:
-                       /* touchscreen emulation */
-                       hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
-                       return 1;
-               case HID_DG_CONTACTID:
-                       hid_map_usage(hi, usage, bit, max,
-                                       EV_ABS, ABS_MT_TRACKING_ID);
-                       return 1;
-               }
-               return 0;
-
-       case 0xff000000:
-               /* ignore vendor-specific features */
-               return -1;
-       }
-
-       return 0;
-}
-
-static int quanta_input_mapped(struct hid_device *hdev, struct hid_input *hi,
-               struct hid_field *field, struct hid_usage *usage,
-               unsigned long **bit, int *max)
-{
-       if (usage->type == EV_KEY || usage->type == EV_ABS)
-               clear_bit(usage->code, *bit);
-
-       return 0;
-}
-
-/*
- * this function is called when a whole finger has been parsed,
- * so that it can decide what to send to the input layer.
- */
-static void quanta_filter_event(struct quanta_data *td, struct input_dev *input)
-{
-       
-       td->first = !td->first; /* touchscreen emulation */
-
-       if (!td->valid) {
-               /*
-                * touchscreen emulation: if no finger in this frame is valid
-                * and there previously was finger activity, this is a release
-                */ 
-               if (!td->first && !td->activity_now && td->activity) {
-                       input_event(input, EV_KEY, BTN_TOUCH, 0);
-                       td->activity = false;
-               }
-               return;
-       }
-
-       input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
-       input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
-       input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
-
-       input_mt_sync(input);
-       td->valid = false;
-
-       /* touchscreen emulation: if first active finger in this frame... */
-       if (!td->activity_now) {
-               /* if there was no previous activity, emit touch event */
-               if (!td->activity) {
-                       input_event(input, EV_KEY, BTN_TOUCH, 1);
-                       td->activity = true;
-               }
-               td->activity_now = true;
-               /* and in any case this is our preferred finger */
-               input_event(input, EV_ABS, ABS_X, td->x);
-               input_event(input, EV_ABS, ABS_Y, td->y);
-       }
-}
-
-
-static int quanta_event(struct hid_device *hid, struct hid_field *field,
-                               struct hid_usage *usage, __s32 value)
-{
-       struct quanta_data *td = hid_get_drvdata(hid);
-
-       if (hid->claimed & HID_CLAIMED_INPUT) {
-               struct input_dev *input = field->hidinput->input;
-
-               switch (usage->hid) {
-               case HID_DG_INRANGE:
-                       td->valid = !!value;
-                       break;
-               case HID_GD_X:
-                       td->x = value;
-                       break;
-               case HID_GD_Y:
-                       td->y = value;
-                       quanta_filter_event(td, input);
-                       break;
-               case HID_DG_CONTACTID:
-                       td->id = value;
-                       break;
-               case HID_DG_CONTACTCOUNT:
-                       /* touch emulation: this is the last field in a frame */
-                       td->first = false;
-                       td->activity_now = false;
-                       break;
-               case HID_DG_CONFIDENCE:
-               case HID_DG_TIPSWITCH:
-                       /* avoid interference from generic hidinput handling */
-                       break;
-
-               default:
-                       /* fallback to the generic hidinput handling */
-                       return 0;
-               }
-       }
-
-       /* we have handled the hidinput part, now remains hiddev */
-       if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
-               hid->hiddev_hid_event(hid, field, usage, value);
-
-       return 1;
-}
-
-static int quanta_probe(struct hid_device *hdev, const struct hid_device_id *id)
-{
-       int ret;
-       struct quanta_data *td;
-
-       td = kmalloc(sizeof(struct quanta_data), GFP_KERNEL);
-       if (!td) {
-               hid_err(hdev, "cannot allocate Quanta Touch data\n");
-               return -ENOMEM;
-       }
-       td->valid = false;
-       td->activity = false;
-       td->activity_now = false;
-       td->first = false;
-       hid_set_drvdata(hdev, td);
-
-       ret = hid_parse(hdev);
-       if (!ret)
-               ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
-
-       if (ret)
-               kfree(td);
-
-       return ret;
-}
-
-static void quanta_remove(struct hid_device *hdev)
-{
-       hid_hw_stop(hdev);
-       kfree(hid_get_drvdata(hdev));
-       hid_set_drvdata(hdev, NULL);
-}
-
-static const struct hid_device_id quanta_devices[] = {
-       { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
-                       USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
-                       USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
-       { }
-};
-MODULE_DEVICE_TABLE(hid, quanta_devices);
-
-static const struct hid_usage_id quanta_grabbed_usages[] = {
-       { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
-       { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
-};
-
-static struct hid_driver quanta_driver = {
-       .name = "quanta-touch",
-       .id_table = quanta_devices,
-       .probe = quanta_probe,
-       .remove = quanta_remove,
-       .input_mapping = quanta_input_mapping,
-       .input_mapped = quanta_input_mapped,
-       .usage_table = quanta_grabbed_usages,
-       .event = quanta_event,
-};
-
-static int __init quanta_init(void)
-{
-       return hid_register_driver(&quanta_driver);
-}
-
-static void __exit quanta_exit(void)
-{
-       hid_unregister_driver(&quanta_driver);
-}
-
-module_init(quanta_init);
-module_exit(quanta_exit);
-
index b07e7f96a3580e0168bce0a89e86b8091c94cd7a..a6d93992c75a4ee8d593729f867ca582975e0629 100644 (file)
@@ -49,12 +49,10 @@ int roccat_common_send(struct usb_device *usb_dev, uint report_id,
        char *buf;
        int len;
 
-       buf = kmalloc(size, GFP_KERNEL);
+       buf = kmemdup(data, size, GFP_KERNEL);
        if (buf == NULL)
                return -ENOMEM;
 
-       memcpy(buf, data, size);
-
        len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
                        HID_REQ_SET_REPORT,
                        USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
new file mode 100644 (file)
index 0000000..0e4a0ab
--- /dev/null
@@ -0,0 +1,487 @@
+/*
+ * Roccat Isku driver for Linux
+ *
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Roccat Isku is a gamer keyboard with macro keys that can be configured in
+ * 5 profiles.
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hid-roccat.h>
+#include "hid-ids.h"
+#include "hid-roccat-common.h"
+#include "hid-roccat-isku.h"
+
+static struct class *isku_class;
+
+static void isku_profile_activated(struct isku_device *isku, uint new_profile)
+{
+       isku->actual_profile = new_profile;
+}
+
+static int isku_receive(struct usb_device *usb_dev, uint command,
+               void *buf, uint size)
+{
+       return roccat_common_receive(usb_dev, command, buf, size);
+}
+
+static int isku_receive_control_status(struct usb_device *usb_dev)
+{
+       int retval;
+       struct isku_control control;
+
+       do {
+               msleep(50);
+               retval = isku_receive(usb_dev, ISKU_COMMAND_CONTROL,
+                               &control, sizeof(struct isku_control));
+
+               if (retval)
+                       return retval;
+
+               switch (control.value) {
+               case ISKU_CONTROL_VALUE_STATUS_OK:
+                       return 0;
+               case ISKU_CONTROL_VALUE_STATUS_WAIT:
+                       continue;
+               case ISKU_CONTROL_VALUE_STATUS_INVALID:
+               /* seems to be critical - replug necessary */
+               case ISKU_CONTROL_VALUE_STATUS_OVERLOAD:
+                       return -EINVAL;
+               default:
+                       hid_err(usb_dev, "isku_receive_control_status: "
+                                       "unknown response value 0x%x\n",
+                                       control.value);
+                       return -EINVAL;
+               }
+
+       } while (1);
+}
+
+static int isku_send(struct usb_device *usb_dev, uint command,
+               void const *buf, uint size)
+{
+       int retval;
+
+       retval = roccat_common_send(usb_dev, command, buf, size);
+       if (retval)
+               return retval;
+
+       return isku_receive_control_status(usb_dev);
+}
+
+static int isku_get_actual_profile(struct usb_device *usb_dev)
+{
+       struct isku_actual_profile buf;
+       int retval;
+
+       retval = isku_receive(usb_dev, ISKU_COMMAND_ACTUAL_PROFILE,
+                       &buf, sizeof(struct isku_actual_profile));
+       return retval ? retval : buf.actual_profile;
+}
+
+static int isku_set_actual_profile(struct usb_device *usb_dev, int new_profile)
+{
+       struct isku_actual_profile buf;
+
+       buf.command = ISKU_COMMAND_ACTUAL_PROFILE;
+       buf.size = sizeof(struct isku_actual_profile);
+       buf.actual_profile = new_profile;
+       return isku_send(usb_dev, ISKU_COMMAND_ACTUAL_PROFILE, &buf,
+                       sizeof(struct isku_actual_profile));
+}
+
+static ssize_t isku_sysfs_show_actual_profile(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct isku_device *isku =
+                       hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+       return snprintf(buf, PAGE_SIZE, "%d\n", isku->actual_profile);
+}
+
+static ssize_t isku_sysfs_set_actual_profile(struct device *dev,
+               struct device_attribute *attr, char const *buf, size_t size)
+{
+       struct isku_device *isku;
+       struct usb_device *usb_dev;
+       unsigned long profile;
+       int retval;
+       struct isku_roccat_report roccat_report;
+
+       dev = dev->parent->parent;
+       isku = hid_get_drvdata(dev_get_drvdata(dev));
+       usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
+       retval = strict_strtoul(buf, 10, &profile);
+       if (retval)
+               return retval;
+
+       if (profile > 4)
+               return -EINVAL;
+
+       mutex_lock(&isku->isku_lock);
+
+       retval = isku_set_actual_profile(usb_dev, profile);
+       if (retval) {
+               mutex_unlock(&isku->isku_lock);
+               return retval;
+       }
+
+       isku_profile_activated(isku, profile);
+
+       roccat_report.event = ISKU_REPORT_BUTTON_EVENT_PROFILE;
+       roccat_report.data1 = profile + 1;
+       roccat_report.data2 = 0;
+       roccat_report.profile = profile + 1;
+       roccat_report_event(isku->chrdev_minor, (uint8_t const *)&roccat_report);
+
+       mutex_unlock(&isku->isku_lock);
+
+       return size;
+}
+
+static struct device_attribute isku_attributes[] = {
+       __ATTR(actual_profile, 0660,
+                       isku_sysfs_show_actual_profile,
+                       isku_sysfs_set_actual_profile),
+       __ATTR_NULL
+};
+
+static ssize_t isku_sysfs_read(struct file *fp, struct kobject *kobj,
+               char *buf, loff_t off, size_t count,
+               size_t real_size, uint command)
+{
+       struct device *dev =
+                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct isku_device *isku = hid_get_drvdata(dev_get_drvdata(dev));
+       struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+       int retval;
+
+       if (off >= real_size)
+               return 0;
+
+       if (off != 0 || count != real_size)
+               return -EINVAL;
+
+       mutex_lock(&isku->isku_lock);
+       retval = isku_receive(usb_dev, command, buf, real_size);
+       mutex_unlock(&isku->isku_lock);
+
+       return retval ? retval : real_size;
+}
+
+static ssize_t isku_sysfs_write(struct file *fp, struct kobject *kobj,
+               void const *buf, loff_t off, size_t count,
+               size_t real_size, uint command)
+{
+       struct device *dev =
+                       container_of(kobj, struct device, kobj)->parent->parent;
+       struct isku_device *isku = hid_get_drvdata(dev_get_drvdata(dev));
+       struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+       int retval;
+
+       if (off != 0 || count != real_size)
+               return -EINVAL;
+
+       mutex_lock(&isku->isku_lock);
+       retval = isku_send(usb_dev, command, (void *)buf, real_size);
+       mutex_unlock(&isku->isku_lock);
+
+       return retval ? retval : real_size;
+}
+
+#define ISKU_SYSFS_W(thingy, THINGY) \
+static ssize_t isku_sysfs_write_ ## thingy(struct file *fp, struct kobject *kobj, \
+               struct bin_attribute *attr, char *buf, \
+               loff_t off, size_t count) \
+{ \
+       return isku_sysfs_write(fp, kobj, buf, off, count, \
+                       sizeof(struct isku_ ## thingy), ISKU_COMMAND_ ## THINGY); \
+}
+
+#define ISKU_SYSFS_R(thingy, THINGY) \
+static ssize_t isku_sysfs_read_ ## thingy(struct file *fp, struct kobject *kobj, \
+               struct bin_attribute *attr, char *buf, \
+               loff_t off, size_t count) \
+{ \
+       return isku_sysfs_read(fp, kobj, buf, off, count, \
+                       sizeof(struct isku_ ## thingy), ISKU_COMMAND_ ## THINGY); \
+}
+
+#define ISKU_SYSFS_RW(thingy, THINGY) \
+ISKU_SYSFS_R(thingy, THINGY) \
+ISKU_SYSFS_W(thingy, THINGY)
+
+#define ISKU_BIN_ATTR_RW(thingy) \
+{ \
+       .attr = { .name = #thingy, .mode = 0660 }, \
+       .size = sizeof(struct isku_ ## thingy), \
+       .read = isku_sysfs_read_ ## thingy, \
+       .write = isku_sysfs_write_ ## thingy \
+}
+
+#define ISKU_BIN_ATTR_R(thingy) \
+{ \
+       .attr = { .name = #thingy, .mode = 0440 }, \
+       .size = sizeof(struct isku_ ## thingy), \
+       .read = isku_sysfs_read_ ## thingy, \
+}
+
+#define ISKU_BIN_ATTR_W(thingy) \
+{ \
+       .attr = { .name = #thingy, .mode = 0220 }, \
+       .size = sizeof(struct isku_ ## thingy), \
+       .write = isku_sysfs_write_ ## thingy \
+}
+
+ISKU_SYSFS_RW(macro, MACRO)
+ISKU_SYSFS_RW(keys_function, KEYS_FUNCTION)
+ISKU_SYSFS_RW(keys_easyzone, KEYS_EASYZONE)
+ISKU_SYSFS_RW(keys_media, KEYS_MEDIA)
+ISKU_SYSFS_RW(keys_thumbster, KEYS_THUMBSTER)
+ISKU_SYSFS_RW(keys_macro, KEYS_MACRO)
+ISKU_SYSFS_RW(keys_capslock, KEYS_CAPSLOCK)
+ISKU_SYSFS_RW(light, LIGHT)
+ISKU_SYSFS_RW(key_mask, KEY_MASK)
+ISKU_SYSFS_RW(last_set, LAST_SET)
+ISKU_SYSFS_W(talk, TALK)
+ISKU_SYSFS_R(info, INFO)
+ISKU_SYSFS_W(control, CONTROL)
+
+static struct bin_attribute isku_bin_attributes[] = {
+       ISKU_BIN_ATTR_RW(macro),
+       ISKU_BIN_ATTR_RW(keys_function),
+       ISKU_BIN_ATTR_RW(keys_easyzone),
+       ISKU_BIN_ATTR_RW(keys_media),
+       ISKU_BIN_ATTR_RW(keys_thumbster),
+       ISKU_BIN_ATTR_RW(keys_macro),
+       ISKU_BIN_ATTR_RW(keys_capslock),
+       ISKU_BIN_ATTR_RW(light),
+       ISKU_BIN_ATTR_RW(key_mask),
+       ISKU_BIN_ATTR_RW(last_set),
+       ISKU_BIN_ATTR_W(talk),
+       ISKU_BIN_ATTR_R(info),
+       ISKU_BIN_ATTR_W(control),
+       __ATTR_NULL
+};
+
+static int isku_init_isku_device_struct(struct usb_device *usb_dev,
+               struct isku_device *isku)
+{
+       int retval;
+
+       mutex_init(&isku->isku_lock);
+
+       retval = isku_get_actual_profile(usb_dev);
+       if (retval < 0)
+               return retval;
+       isku_profile_activated(isku, retval);
+
+       return 0;
+}
+
+static int isku_init_specials(struct hid_device *hdev)
+{
+       struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+       struct usb_device *usb_dev = interface_to_usbdev(intf);
+       struct isku_device *isku;
+       int retval;
+
+       if (intf->cur_altsetting->desc.bInterfaceProtocol
+                       != ISKU_USB_INTERFACE_PROTOCOL) {
+               hid_set_drvdata(hdev, NULL);
+               return 0;
+       }
+
+       isku = kzalloc(sizeof(*isku), GFP_KERNEL);
+       if (!isku) {
+               hid_err(hdev, "can't alloc device descriptor\n");
+               return -ENOMEM;
+       }
+       hid_set_drvdata(hdev, isku);
+
+       retval = isku_init_isku_device_struct(usb_dev, isku);
+       if (retval) {
+               hid_err(hdev, "couldn't init struct isku_device\n");
+               goto exit_free;
+       }
+
+       retval = roccat_connect(isku_class, hdev,
+                       sizeof(struct isku_roccat_report));
+       if (retval < 0) {
+               hid_err(hdev, "couldn't init char dev\n");
+       } else {
+               isku->chrdev_minor = retval;
+               isku->roccat_claimed = 1;
+       }
+
+       return 0;
+exit_free:
+       kfree(isku);
+       return retval;
+}
+
+static void isku_remove_specials(struct hid_device *hdev)
+{
+       struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+       struct isku_device *isku;
+
+       if (intf->cur_altsetting->desc.bInterfaceProtocol
+                       != ISKU_USB_INTERFACE_PROTOCOL)
+               return;
+
+       isku = hid_get_drvdata(hdev);
+       if (isku->roccat_claimed)
+               roccat_disconnect(isku->chrdev_minor);
+       kfree(isku);
+}
+
+static int isku_probe(struct hid_device *hdev,
+               const struct hid_device_id *id)
+{
+       int retval;
+
+       retval = hid_parse(hdev);
+       if (retval) {
+               hid_err(hdev, "parse failed\n");
+               goto exit;
+       }
+
+       retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+       if (retval) {
+               hid_err(hdev, "hw start failed\n");
+               goto exit;
+       }
+
+       retval = isku_init_specials(hdev);
+       if (retval) {
+               hid_err(hdev, "couldn't install keyboard\n");
+               goto exit_stop;
+       }
+
+       return 0;
+
+exit_stop:
+       hid_hw_stop(hdev);
+exit:
+       return retval;
+}
+
+static void isku_remove(struct hid_device *hdev)
+{
+       isku_remove_specials(hdev);
+       hid_hw_stop(hdev);
+}
+
+static void isku_keep_values_up_to_date(struct isku_device *isku,
+               u8 const *data)
+{
+       struct isku_report_button const *button_report;
+
+       switch (data[0]) {
+       case ISKU_REPORT_NUMBER_BUTTON:
+               button_report = (struct isku_report_button const *)data;
+               switch (button_report->event) {
+               case ISKU_REPORT_BUTTON_EVENT_PROFILE:
+                       isku_profile_activated(isku, button_report->data1 - 1);
+                       break;
+               }
+               break;
+       }
+}
+
+static void isku_report_to_chrdev(struct isku_device const *isku,
+               u8 const *data)
+{
+       struct isku_roccat_report roccat_report;
+       struct isku_report_button const *button_report;
+
+       if (data[0] != ISKU_REPORT_NUMBER_BUTTON)
+               return;
+
+       button_report = (struct isku_report_button const *)data;
+
+       roccat_report.event = button_report->event;
+       roccat_report.data1 = button_report->data1;
+       roccat_report.data2 = button_report->data2;
+       roccat_report.profile = isku->actual_profile + 1;
+       roccat_report_event(isku->chrdev_minor,
+                       (uint8_t const *)&roccat_report);
+}
+
+static int isku_raw_event(struct hid_device *hdev,
+               struct hid_report *report, u8 *data, int size)
+{
+       struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+       struct isku_device *isku = hid_get_drvdata(hdev);
+
+       if (intf->cur_altsetting->desc.bInterfaceProtocol
+                       != ISKU_USB_INTERFACE_PROTOCOL)
+               return 0;
+
+       if (isku == NULL)
+               return 0;
+
+       isku_keep_values_up_to_date(isku, data);
+
+       if (isku->roccat_claimed)
+               isku_report_to_chrdev(isku, data);
+
+       return 0;
+}
+
+static const struct hid_device_id isku_devices[] = {
+       { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
+       { }
+};
+
+MODULE_DEVICE_TABLE(hid, isku_devices);
+
+static struct hid_driver isku_driver = {
+               .name = "isku",
+               .id_table = isku_devices,
+               .probe = isku_probe,
+               .remove = isku_remove,
+               .raw_event = isku_raw_event
+};
+
+static int __init isku_init(void)
+{
+       int retval;
+       isku_class = class_create(THIS_MODULE, "isku");
+       if (IS_ERR(isku_class))
+               return PTR_ERR(isku_class);
+       isku_class->dev_attrs = isku_attributes;
+       isku_class->dev_bin_attrs = isku_bin_attributes;
+
+       retval = hid_register_driver(&isku_driver);
+       if (retval)
+               class_destroy(isku_class);
+       return retval;
+}
+
+static void __exit isku_exit(void)
+{
+       hid_unregister_driver(&isku_driver);
+       class_destroy(isku_class);
+}
+
+module_init(isku_init);
+module_exit(isku_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Isku driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-isku.h b/drivers/hid/hid-roccat-isku.h
new file mode 100644 (file)
index 0000000..075f6ef
--- /dev/null
@@ -0,0 +1,147 @@
+#ifndef __HID_ROCCAT_ISKU_H
+#define __HID_ROCCAT_ISKU_H
+
+/*
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+enum {
+       ISKU_PROFILE_NUM = 5,
+       ISKU_USB_INTERFACE_PROTOCOL = 0,
+};
+
+struct isku_control {
+       uint8_t command; /* ISKU_COMMAND_CONTROL */
+       uint8_t value;
+       uint8_t request;
+} __packed;
+
+enum isku_control_values {
+       ISKU_CONTROL_VALUE_STATUS_OVERLOAD = 0,
+       ISKU_CONTROL_VALUE_STATUS_OK = 1,
+       ISKU_CONTROL_VALUE_STATUS_INVALID = 2,
+       ISKU_CONTROL_VALUE_STATUS_WAIT = 3,
+};
+
+struct isku_actual_profile {
+       uint8_t command; /* ISKU_COMMAND_ACTUAL_PROFILE */
+       uint8_t size; /* always 3 */
+       uint8_t actual_profile;
+} __packed;
+
+struct isku_key_mask {
+       uint8_t command; /* ISKU_COMMAND_KEY_MASK */
+       uint8_t size; /* 6 */
+       uint8_t profile_number; /* 0-4 */
+       uint8_t mask;
+       uint16_t checksum;
+} __packed;
+
+struct isku_keys_function {
+       uint8_t data[0x29];
+} __packed;
+
+struct isku_keys_easyzone {
+       uint8_t data[0x41];
+} __packed;
+
+struct isku_keys_media {
+       uint8_t data[0x1d];
+} __packed;
+
+struct isku_keys_thumbster {
+       uint8_t data[0x17];
+} __packed;
+
+struct isku_keys_macro {
+       uint8_t data[0x23];
+} __packed;
+
+struct isku_keys_capslock {
+       uint8_t data[0x6];
+} __packed;
+
+struct isku_macro {
+       uint8_t data[0x823];
+} __packed;
+
+struct isku_light {
+       uint8_t data[0xa];
+} __packed;
+
+struct isku_info {
+       uint8_t data[2];
+       uint8_t firmware_version;
+       uint8_t unknown[3];
+} __packed;
+
+struct isku_talk {
+       uint8_t data[0x10];
+} __packed;
+
+struct isku_last_set {
+       uint8_t data[0x14];
+} __packed;
+
+enum isku_commands {
+       ISKU_COMMAND_CONTROL = 0x4,
+       ISKU_COMMAND_ACTUAL_PROFILE = 0x5,
+       ISKU_COMMAND_KEY_MASK = 0x7,
+       ISKU_COMMAND_KEYS_FUNCTION = 0x8,
+       ISKU_COMMAND_KEYS_EASYZONE = 0x9,
+       ISKU_COMMAND_KEYS_MEDIA = 0xa,
+       ISKU_COMMAND_KEYS_THUMBSTER = 0xb,
+       ISKU_COMMAND_KEYS_MACRO = 0xd,
+       ISKU_COMMAND_MACRO = 0xe,
+       ISKU_COMMAND_INFO = 0xf,
+       ISKU_COMMAND_LIGHT = 0x10,
+       ISKU_COMMAND_KEYS_CAPSLOCK = 0x13,
+       ISKU_COMMAND_LAST_SET = 0x14,
+       ISKU_COMMAND_15 = 0x15,
+       ISKU_COMMAND_TALK = 0x16,
+       ISKU_COMMAND_FIRMWARE_WRITE = 0x1b,
+       ISKU_COMMAND_FIRMWARE_WRITE_CONTROL = 0x1c,
+};
+
+struct isku_report_button {
+       uint8_t number; /* ISKU_REPORT_NUMBER_BUTTON */
+       uint8_t zero;
+       uint8_t event;
+       uint8_t data1;
+       uint8_t data2;
+};
+
+enum isku_report_numbers {
+       ISKU_REPORT_NUMBER_BUTTON = 3,
+};
+
+enum isku_report_button_events {
+       ISKU_REPORT_BUTTON_EVENT_PROFILE = 0x2,
+};
+
+struct isku_roccat_report {
+       uint8_t event;
+       uint8_t data1;
+       uint8_t data2;
+       uint8_t profile;
+} __packed;
+
+struct isku_device {
+       int roccat_claimed;
+       int chrdev_minor;
+
+       struct mutex isku_lock;
+
+       int actual_profile;
+};
+
+#endif
index e2072afb34bbb7dce51c9bdb3b44f5b2867e9bd3..40090d602158bf06e03297edb40aa0c00edcd7f6 100644 (file)
@@ -78,12 +78,10 @@ static int kone_send(struct usb_device *usb_dev, uint usb_command,
        char *buf;
        int len;
 
-       buf = kmalloc(size, GFP_KERNEL);
+       buf = kmemdup(data, size, GFP_KERNEL);
        if (buf == NULL)
                return -ENOMEM;
 
-       memcpy(buf, data, size);
-
        len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
                        HID_REQ_SET_REPORT,
                        USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
index 17bb88f782b69b5cd0a44df7927742f412c6d1d0..f2183486a9b60882025d0eacc00070fa97253ce7 100644 (file)
@@ -9,6 +9,7 @@
  *  Copyright (c) 2008 Jiri Slaby <jirislaby@gmail.com>
  *  Copyright (c) 2006 Andrew Zabolotny <zap@homelink.ru>
  *  Copyright (c) 2009 Bastien Nocera <hadess@hadess.net>
+ *  Copyright (c) 2011 PrzemysÅ‚aw Firszt <przemo@firszt.eu>
  */
 
 /*
@@ -33,6 +34,7 @@
 struct wacom_data {
        __u16 tool;
        unsigned char butstate;
+       __u8 features;
        unsigned char high_speed;
 #ifdef CONFIG_HID_WACOM_POWER_SUPPLY
        int battery_capacity;
@@ -107,6 +109,19 @@ static int wacom_ac_get_property(struct power_supply *psy,
 }
 #endif
 
+static void wacom_set_features(struct hid_device *hdev)
+{
+       int ret;
+       __u8 rep_data[2];
+
+       /*set high speed, tablet mode*/
+       rep_data[0] = 0x03;
+       rep_data[1] = 0x20;
+       ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
+                               HID_FEATURE_REPORT);
+       return;
+}
+
 static void wacom_poke(struct hid_device *hdev, u8 speed)
 {
        struct wacom_data *wdata = hid_get_drvdata(hdev);
@@ -177,26 +192,13 @@ static ssize_t wacom_store_speed(struct device *dev,
 static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR | S_IWGRP,
                wacom_show_speed, wacom_store_speed);
 
-static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
-               u8 *raw_data, int size)
+static int wacom_gr_parse_report(struct hid_device *hdev,
+                       struct wacom_data *wdata,
+                       struct input_dev *input, unsigned char *data)
 {
-       struct wacom_data *wdata = hid_get_drvdata(hdev);
-       struct hid_input *hidinput;
-       struct input_dev *input;
-       unsigned char *data = (unsigned char *) raw_data;
        int tool, x, y, rw;
 
-       if (!(hdev->claimed & HID_CLAIMED_INPUT))
-               return 0;
-
        tool = 0;
-       hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
-       input = hidinput->input;
-
-       /* Check if this is a tablet report */
-       if (data[0] != 0x03)
-               return 0;
-
        /* Get X & Y positions */
        x = le16_to_cpu(*(__le16 *) &data[2]);
        y = le16_to_cpu(*(__le16 *) &data[4]);
@@ -304,6 +306,121 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
        return 1;
 }
 
+static void wacom_i4_parse_pen_report(struct wacom_data *wdata,
+                       struct input_dev *input, unsigned char *data)
+{
+       __u16 x, y, pressure;
+       __u32 id;
+
+       switch (data[1]) {
+       case 0x80: /* Out of proximity report */
+               wdata->tool = 0;
+               input_report_key(input, BTN_TOUCH, 0);
+               input_report_abs(input, ABS_PRESSURE, 0);
+               input_report_key(input, wdata->tool, 0);
+               input_sync(input);
+               break;
+       case 0xC2: /* Tool report */
+               id = ((data[2] << 4) | (data[3] >> 4) |
+                       ((data[7] & 0x0f) << 20) |
+                       ((data[8] & 0xf0) << 12)) & 0xfffff;
+
+               switch (id) {
+               case 0x802:
+                       wdata->tool = BTN_TOOL_PEN;
+                       break;
+               case 0x80A:
+                       wdata->tool = BTN_TOOL_RUBBER;
+                       break;
+               }
+               break;
+       default: /* Position/pressure report */
+               x = data[2] << 9 | data[3] << 1 | ((data[9] & 0x02) >> 1);
+               y = data[4] << 9 | data[5] << 1 | (data[9] & 0x01);
+               pressure = (data[6] << 3) | ((data[7] & 0xC0) >> 5)
+                       | (data[1] & 0x01);
+
+               input_report_key(input, BTN_TOUCH, pressure > 1);
+
+               input_report_key(input, BTN_STYLUS, data[1] & 0x02);
+               input_report_key(input, BTN_STYLUS2, data[1] & 0x04);
+               input_report_key(input, wdata->tool, 1);
+               input_report_abs(input, ABS_X, x);
+               input_report_abs(input, ABS_Y, y);
+               input_report_abs(input, ABS_PRESSURE, pressure);
+               input_sync(input);
+               break;
+       }
+
+       return;
+}
+
+static void wacom_i4_parse_report(struct hid_device *hdev,
+                       struct wacom_data *wdata,
+                       struct input_dev *input, unsigned char *data)
+{
+       switch (data[0]) {
+       case 0x00: /* Empty report */
+               break;
+       case 0x02: /* Pen report */
+               wacom_i4_parse_pen_report(wdata, input, data);
+               break;
+       case 0x03: /* Features Report */
+               wdata->features = data[2];
+               break;
+       case 0x0C: /* Button report */
+               break;
+       default:
+               hid_err(hdev, "Unknown report: %d,%d\n", data[0], data[1]);
+               break;
+       }
+}
+
+static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
+               u8 *raw_data, int size)
+{
+       struct wacom_data *wdata = hid_get_drvdata(hdev);
+       struct hid_input *hidinput;
+       struct input_dev *input;
+       unsigned char *data = (unsigned char *) raw_data;
+       int i;
+
+       if (!(hdev->claimed & HID_CLAIMED_INPUT))
+               return 0;
+
+       hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
+       input = hidinput->input;
+
+       /* Check if this is a tablet report */
+       if (data[0] != 0x03)
+               return 0;
+
+       switch (hdev->product) {
+       case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH:
+               return wacom_gr_parse_report(hdev, wdata, input, data);
+               break;
+       case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH:
+               i = 1;
+
+               switch (data[0]) {
+               case 0x04:
+                       wacom_i4_parse_report(hdev, wdata, input, data + i);
+                       i += 10;
+                       /* fall through */
+               case 0x03:
+                       wacom_i4_parse_report(hdev, wdata, input, data + i);
+                       i += 10;
+                       wacom_i4_parse_report(hdev, wdata, input, data + i);
+                       break;
+               default:
+                       hid_err(hdev, "Unknown report: %d,%d size:%d\n",
+                                       data[0], data[1], size);
+                       return 0;
+               }
+       }
+       return 1;
+}
+
 static int wacom_input_mapped(struct hid_device *hdev, struct hid_input *hi,
        struct hid_field *field, struct hid_usage *usage, unsigned long **bit,
                                                                int *max)
@@ -338,10 +455,19 @@ static int wacom_input_mapped(struct hid_device *hdev, struct hid_input *hi,
        __set_bit(BTN_TOOL_RUBBER, input->keybit);
        __set_bit(BTN_TOOL_MOUSE, input->keybit);
 
-       input_set_abs_params(input, ABS_X, 0, 16704, 4, 0);
-       input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0);
-       input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0);
-       input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0);
+       switch (hdev->product) {
+       case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH:
+               input_set_abs_params(input, ABS_X, 0, 16704, 4, 0);
+               input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0);
+               input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0);
+               input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0);
+               break;
+       case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH:
+               input_set_abs_params(input, ABS_X, 0, 40640, 4, 0);
+               input_set_abs_params(input, ABS_Y, 0, 25400, 4, 0);
+               input_set_abs_params(input, ABS_PRESSURE, 0, 2047, 0, 0);
+               break;
+       }
 
        return 0;
 }
@@ -378,8 +504,16 @@ static int wacom_probe(struct hid_device *hdev,
                hid_warn(hdev,
                         "can't create sysfs speed attribute err: %d\n", ret);
 
-       /* Set Wacom mode 2 with high reporting speed */
-       wacom_poke(hdev, 1);
+       switch (hdev->product) {
+       case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH:
+               /* Set Wacom mode 2 with high reporting speed */
+               wacom_poke(hdev, 1);
+               break;
+       case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH:
+               wdata->features = 0;
+               wacom_set_features(hdev);
+               break;
+       }
 
 #ifdef CONFIG_HID_WACOM_POWER_SUPPLY
        wdata->battery.properties = wacom_battery_props;
@@ -441,6 +575,7 @@ static void wacom_remove(struct hid_device *hdev)
 
 static const struct hid_device_id wacom_devices[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
 
        { }
 };
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
new file mode 100644 (file)
index 0000000..61881b3
--- /dev/null
@@ -0,0 +1,1316 @@
+/*
+ * HID driver for Nintendo Wiimote devices
+ * Copyright (c) 2011 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
+#include <linux/spinlock.h>
+#include "hid-ids.h"
+#include "hid-wiimote.h"
+
+enum wiiproto_keys {
+       WIIPROTO_KEY_LEFT,
+       WIIPROTO_KEY_RIGHT,
+       WIIPROTO_KEY_UP,
+       WIIPROTO_KEY_DOWN,
+       WIIPROTO_KEY_PLUS,
+       WIIPROTO_KEY_MINUS,
+       WIIPROTO_KEY_ONE,
+       WIIPROTO_KEY_TWO,
+       WIIPROTO_KEY_A,
+       WIIPROTO_KEY_B,
+       WIIPROTO_KEY_HOME,
+       WIIPROTO_KEY_COUNT
+};
+
+static __u16 wiiproto_keymap[] = {
+       KEY_LEFT,       /* WIIPROTO_KEY_LEFT */
+       KEY_RIGHT,      /* WIIPROTO_KEY_RIGHT */
+       KEY_UP,         /* WIIPROTO_KEY_UP */
+       KEY_DOWN,       /* WIIPROTO_KEY_DOWN */
+       KEY_NEXT,       /* WIIPROTO_KEY_PLUS */
+       KEY_PREVIOUS,   /* WIIPROTO_KEY_MINUS */
+       BTN_1,          /* WIIPROTO_KEY_ONE */
+       BTN_2,          /* WIIPROTO_KEY_TWO */
+       BTN_A,          /* WIIPROTO_KEY_A */
+       BTN_B,          /* WIIPROTO_KEY_B */
+       BTN_MODE,       /* WIIPROTO_KEY_HOME */
+};
+
+static enum power_supply_property wiimote_battery_props[] = {
+       POWER_SUPPLY_PROP_CAPACITY
+};
+
+static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,
+                                                               size_t count)
+{
+       __u8 *buf;
+       ssize_t ret;
+
+       if (!hdev->hid_output_raw_report)
+               return -ENODEV;
+
+       buf = kmemdup(buffer, count, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       ret = hdev->hid_output_raw_report(hdev, buf, count, HID_OUTPUT_REPORT);
+
+       kfree(buf);
+       return ret;
+}
+
+static void wiimote_worker(struct work_struct *work)
+{
+       struct wiimote_data *wdata = container_of(work, struct wiimote_data,
+                                                                       worker);
+       unsigned long flags;
+
+       spin_lock_irqsave(&wdata->qlock, flags);
+
+       while (wdata->head != wdata->tail) {
+               spin_unlock_irqrestore(&wdata->qlock, flags);
+               wiimote_hid_send(wdata->hdev, wdata->outq[wdata->tail].data,
+                                               wdata->outq[wdata->tail].size);
+               spin_lock_irqsave(&wdata->qlock, flags);
+
+               wdata->tail = (wdata->tail + 1) % WIIMOTE_BUFSIZE;
+       }
+
+       spin_unlock_irqrestore(&wdata->qlock, flags);
+}
+
+static void wiimote_queue(struct wiimote_data *wdata, const __u8 *buffer,
+                                                               size_t count)
+{
+       unsigned long flags;
+       __u8 newhead;
+
+       if (count > HID_MAX_BUFFER_SIZE) {
+               hid_warn(wdata->hdev, "Sending too large output report\n");
+               return;
+       }
+
+       /*
+        * Copy new request into our output queue and check whether the
+        * queue is full. If it is full, discard this request.
+        * If it is empty we need to start a new worker that will
+        * send out the buffer to the hid device.
+        * If the queue is not empty, then there must be a worker
+        * that is currently sending out our buffer and this worker
+        * will reschedule itself until the queue is empty.
+        */
+
+       spin_lock_irqsave(&wdata->qlock, flags);
+
+       memcpy(wdata->outq[wdata->head].data, buffer, count);
+       wdata->outq[wdata->head].size = count;
+       newhead = (wdata->head + 1) % WIIMOTE_BUFSIZE;
+
+       if (wdata->head == wdata->tail) {
+               wdata->head = newhead;
+               schedule_work(&wdata->worker);
+       } else if (newhead != wdata->tail) {
+               wdata->head = newhead;
+       } else {
+               hid_warn(wdata->hdev, "Output queue is full");
+       }
+
+       spin_unlock_irqrestore(&wdata->qlock, flags);
+}
+
+/*
+ * This sets the rumble bit on the given output report if rumble is
+ * currently enabled.
+ * \cmd1 must point to the second byte in the output report => &cmd[1]
+ * This must be called on nearly every output report before passing it
+ * into the output queue!
+ */
+static inline void wiiproto_keep_rumble(struct wiimote_data *wdata, __u8 *cmd1)
+{
+       if (wdata->state.flags & WIIPROTO_FLAG_RUMBLE)
+               *cmd1 |= 0x01;
+}
+
+static void wiiproto_req_rumble(struct wiimote_data *wdata, __u8 rumble)
+{
+       __u8 cmd[2];
+
+       rumble = !!rumble;
+       if (rumble == !!(wdata->state.flags & WIIPROTO_FLAG_RUMBLE))
+               return;
+
+       if (rumble)
+               wdata->state.flags |= WIIPROTO_FLAG_RUMBLE;
+       else
+               wdata->state.flags &= ~WIIPROTO_FLAG_RUMBLE;
+
+       cmd[0] = WIIPROTO_REQ_RUMBLE;
+       cmd[1] = 0;
+
+       wiiproto_keep_rumble(wdata, &cmd[1]);
+       wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
+{
+       __u8 cmd[2];
+
+       leds &= WIIPROTO_FLAGS_LEDS;
+       if ((wdata->state.flags & WIIPROTO_FLAGS_LEDS) == leds)
+               return;
+       wdata->state.flags = (wdata->state.flags & ~WIIPROTO_FLAGS_LEDS) | leds;
+
+       cmd[0] = WIIPROTO_REQ_LED;
+       cmd[1] = 0;
+
+       if (leds & WIIPROTO_FLAG_LED1)
+               cmd[1] |= 0x10;
+       if (leds & WIIPROTO_FLAG_LED2)
+               cmd[1] |= 0x20;
+       if (leds & WIIPROTO_FLAG_LED3)
+               cmd[1] |= 0x40;
+       if (leds & WIIPROTO_FLAG_LED4)
+               cmd[1] |= 0x80;
+
+       wiiproto_keep_rumble(wdata, &cmd[1]);
+       wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+/*
+ * Check what peripherals of the wiimote are currently
+ * active and select a proper DRM that supports all of
+ * the requested data inputs.
+ */
+static __u8 select_drm(struct wiimote_data *wdata)
+{
+       __u8 ir = wdata->state.flags & WIIPROTO_FLAGS_IR;
+       bool ext = wiiext_active(wdata);
+
+       if (ir == WIIPROTO_FLAG_IR_BASIC) {
+               if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
+                       return WIIPROTO_REQ_DRM_KAIE;
+               else
+                       return WIIPROTO_REQ_DRM_KIE;
+       } else if (ir == WIIPROTO_FLAG_IR_EXT) {
+               return WIIPROTO_REQ_DRM_KAI;
+       } else if (ir == WIIPROTO_FLAG_IR_FULL) {
+               return WIIPROTO_REQ_DRM_SKAI1;
+       } else {
+               if (wdata->state.flags & WIIPROTO_FLAG_ACCEL) {
+                       if (ext)
+                               return WIIPROTO_REQ_DRM_KAE;
+                       else
+                               return WIIPROTO_REQ_DRM_KA;
+               } else {
+                       if (ext)
+                               return WIIPROTO_REQ_DRM_KE;
+                       else
+                               return WIIPROTO_REQ_DRM_K;
+               }
+       }
+}
+
+void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
+{
+       __u8 cmd[3];
+
+       if (drm == WIIPROTO_REQ_NULL)
+               drm = select_drm(wdata);
+
+       cmd[0] = WIIPROTO_REQ_DRM;
+       cmd[1] = 0;
+       cmd[2] = drm;
+
+       wdata->state.drm = drm;
+       wiiproto_keep_rumble(wdata, &cmd[1]);
+       wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+static void wiiproto_req_status(struct wiimote_data *wdata)
+{
+       __u8 cmd[2];
+
+       cmd[0] = WIIPROTO_REQ_SREQ;
+       cmd[1] = 0;
+
+       wiiproto_keep_rumble(wdata, &cmd[1]);
+       wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+static void wiiproto_req_accel(struct wiimote_data *wdata, __u8 accel)
+{
+       accel = !!accel;
+       if (accel == !!(wdata->state.flags & WIIPROTO_FLAG_ACCEL))
+               return;
+
+       if (accel)
+               wdata->state.flags |= WIIPROTO_FLAG_ACCEL;
+       else
+               wdata->state.flags &= ~WIIPROTO_FLAG_ACCEL;
+
+       wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+}
+
+static void wiiproto_req_ir1(struct wiimote_data *wdata, __u8 flags)
+{
+       __u8 cmd[2];
+
+       cmd[0] = WIIPROTO_REQ_IR1;
+       cmd[1] = flags;
+
+       wiiproto_keep_rumble(wdata, &cmd[1]);
+       wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+static void wiiproto_req_ir2(struct wiimote_data *wdata, __u8 flags)
+{
+       __u8 cmd[2];
+
+       cmd[0] = WIIPROTO_REQ_IR2;
+       cmd[1] = flags;
+
+       wiiproto_keep_rumble(wdata, &cmd[1]);
+       wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+#define wiiproto_req_wreg(wdata, os, buf, sz) \
+                       wiiproto_req_wmem((wdata), false, (os), (buf), (sz))
+
+#define wiiproto_req_weeprom(wdata, os, buf, sz) \
+                       wiiproto_req_wmem((wdata), true, (os), (buf), (sz))
+
+static void wiiproto_req_wmem(struct wiimote_data *wdata, bool eeprom,
+                               __u32 offset, const __u8 *buf, __u8 size)
+{
+       __u8 cmd[22];
+
+       if (size > 16 || size == 0) {
+               hid_warn(wdata->hdev, "Invalid length %d wmem request\n", size);
+               return;
+       }
+
+       memset(cmd, 0, sizeof(cmd));
+       cmd[0] = WIIPROTO_REQ_WMEM;
+       cmd[2] = (offset >> 16) & 0xff;
+       cmd[3] = (offset >> 8) & 0xff;
+       cmd[4] = offset & 0xff;
+       cmd[5] = size;
+       memcpy(&cmd[6], buf, size);
+
+       if (!eeprom)
+               cmd[1] |= 0x04;
+
+       wiiproto_keep_rumble(wdata, &cmd[1]);
+       wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+void wiiproto_req_rmem(struct wiimote_data *wdata, bool eeprom, __u32 offset,
+                                                               __u16 size)
+{
+       __u8 cmd[7];
+
+       if (size == 0) {
+               hid_warn(wdata->hdev, "Invalid length %d rmem request\n", size);
+               return;
+       }
+
+       cmd[0] = WIIPROTO_REQ_RMEM;
+       cmd[1] = 0;
+       cmd[2] = (offset >> 16) & 0xff;
+       cmd[3] = (offset >> 8) & 0xff;
+       cmd[4] = offset & 0xff;
+       cmd[5] = (size >> 8) & 0xff;
+       cmd[6] = size & 0xff;
+
+       if (!eeprom)
+               cmd[1] |= 0x04;
+
+       wiiproto_keep_rumble(wdata, &cmd[1]);
+       wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+/* requries the cmd-mutex to be held */
+int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
+                                               const __u8 *wmem, __u8 size)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wiimote_cmd_set(wdata, WIIPROTO_REQ_WMEM, 0);
+       wiiproto_req_wreg(wdata, offset, wmem, size);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       ret = wiimote_cmd_wait(wdata);
+       if (!ret && wdata->state.cmd_err)
+               ret = -EIO;
+
+       return ret;
+}
+
+/* requries the cmd-mutex to be held */
+ssize_t wiimote_cmd_read(struct wiimote_data *wdata, __u32 offset, __u8 *rmem,
+                                                               __u8 size)
+{
+       unsigned long flags;
+       ssize_t ret;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wdata->state.cmd_read_size = size;
+       wdata->state.cmd_read_buf = rmem;
+       wiimote_cmd_set(wdata, WIIPROTO_REQ_RMEM, offset & 0xffff);
+       wiiproto_req_rreg(wdata, offset, size);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       ret = wiimote_cmd_wait(wdata);
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wdata->state.cmd_read_buf = NULL;
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       if (!ret) {
+               if (wdata->state.cmd_read_size == 0)
+                       ret = -EIO;
+               else
+                       ret = wdata->state.cmd_read_size;
+       }
+
+       return ret;
+}
+
+static int wiimote_battery_get_property(struct power_supply *psy,
+                                               enum power_supply_property psp,
+                                               union power_supply_propval *val)
+{
+       struct wiimote_data *wdata = container_of(psy,
+                                               struct wiimote_data, battery);
+       int ret = 0, state;
+       unsigned long flags;
+
+       ret = wiimote_cmd_acquire(wdata);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wiimote_cmd_set(wdata, WIIPROTO_REQ_SREQ, 0);
+       wiiproto_req_status(wdata);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       ret = wiimote_cmd_wait(wdata);
+       state = wdata->state.cmd_battery;
+       wiimote_cmd_release(wdata);
+
+       if (ret)
+               return ret;
+
+       switch (psp) {
+               case POWER_SUPPLY_PROP_CAPACITY:
+                       val->intval = state * 100 / 255;
+                       break;
+               default:
+                       ret = -EINVAL;
+                       break;
+       }
+
+       return ret;
+}
+
+static int wiimote_init_ir(struct wiimote_data *wdata, __u16 mode)
+{
+       int ret;
+       unsigned long flags;
+       __u8 format = 0;
+       static const __u8 data_enable[] = { 0x01 };
+       static const __u8 data_sens1[] = { 0x02, 0x00, 0x00, 0x71, 0x01,
+                                               0x00, 0xaa, 0x00, 0x64 };
+       static const __u8 data_sens2[] = { 0x63, 0x03 };
+       static const __u8 data_fin[] = { 0x08 };
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+
+       if (mode == (wdata->state.flags & WIIPROTO_FLAGS_IR)) {
+               spin_unlock_irqrestore(&wdata->state.lock, flags);
+               return 0;
+       }
+
+       if (mode == 0) {
+               wdata->state.flags &= ~WIIPROTO_FLAGS_IR;
+               wiiproto_req_ir1(wdata, 0);
+               wiiproto_req_ir2(wdata, 0);
+               wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+               spin_unlock_irqrestore(&wdata->state.lock, flags);
+               return 0;
+       }
+
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       ret = wiimote_cmd_acquire(wdata);
+       if (ret)
+               return ret;
+
+       /* send PIXEL CLOCK ENABLE cmd first */
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wiimote_cmd_set(wdata, WIIPROTO_REQ_IR1, 0);
+       wiiproto_req_ir1(wdata, 0x06);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       ret = wiimote_cmd_wait(wdata);
+       if (ret)
+               goto unlock;
+       if (wdata->state.cmd_err) {
+               ret = -EIO;
+               goto unlock;
+       }
+
+       /* enable IR LOGIC */
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wiimote_cmd_set(wdata, WIIPROTO_REQ_IR2, 0);
+       wiiproto_req_ir2(wdata, 0x06);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       ret = wiimote_cmd_wait(wdata);
+       if (ret)
+               goto unlock;
+       if (wdata->state.cmd_err) {
+               ret = -EIO;
+               goto unlock;
+       }
+
+       /* enable IR cam but do not make it send data, yet */
+       ret = wiimote_cmd_write(wdata, 0xb00030, data_enable,
+                                                       sizeof(data_enable));
+       if (ret)
+               goto unlock;
+
+       /* write first sensitivity block */
+       ret = wiimote_cmd_write(wdata, 0xb00000, data_sens1,
+                                                       sizeof(data_sens1));
+       if (ret)
+               goto unlock;
+
+       /* write second sensitivity block */
+       ret = wiimote_cmd_write(wdata, 0xb0001a, data_sens2,
+                                                       sizeof(data_sens2));
+       if (ret)
+               goto unlock;
+
+       /* put IR cam into desired state */
+       switch (mode) {
+               case WIIPROTO_FLAG_IR_FULL:
+                       format = 5;
+                       break;
+               case WIIPROTO_FLAG_IR_EXT:
+                       format = 3;
+                       break;
+               case WIIPROTO_FLAG_IR_BASIC:
+                       format = 1;
+                       break;
+       }
+       ret = wiimote_cmd_write(wdata, 0xb00033, &format, sizeof(format));
+       if (ret)
+               goto unlock;
+
+       /* make IR cam send data */
+       ret = wiimote_cmd_write(wdata, 0xb00030, data_fin, sizeof(data_fin));
+       if (ret)
+               goto unlock;
+
+       /* request new DRM mode compatible to IR mode */
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wdata->state.flags &= ~WIIPROTO_FLAGS_IR;
+       wdata->state.flags |= mode & WIIPROTO_FLAGS_IR;
+       wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+unlock:
+       wiimote_cmd_release(wdata);
+       return ret;
+}
+
+static enum led_brightness wiimote_leds_get(struct led_classdev *led_dev)
+{
+       struct wiimote_data *wdata;
+       struct device *dev = led_dev->dev->parent;
+       int i;
+       unsigned long flags;
+       bool value = false;
+
+       wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev));
+
+       for (i = 0; i < 4; ++i) {
+               if (wdata->leds[i] == led_dev) {
+                       spin_lock_irqsave(&wdata->state.lock, flags);
+                       value = wdata->state.flags & WIIPROTO_FLAG_LED(i + 1);
+                       spin_unlock_irqrestore(&wdata->state.lock, flags);
+                       break;
+               }
+       }
+
+       return value ? LED_FULL : LED_OFF;
+}
+
+static void wiimote_leds_set(struct led_classdev *led_dev,
+                                               enum led_brightness value)
+{
+       struct wiimote_data *wdata;
+       struct device *dev = led_dev->dev->parent;
+       int i;
+       unsigned long flags;
+       __u8 state, flag;
+
+       wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev));
+
+       for (i = 0; i < 4; ++i) {
+               if (wdata->leds[i] == led_dev) {
+                       flag = WIIPROTO_FLAG_LED(i + 1);
+                       spin_lock_irqsave(&wdata->state.lock, flags);
+                       state = wdata->state.flags;
+                       if (value == LED_OFF)
+                               wiiproto_req_leds(wdata, state & ~flag);
+                       else
+                               wiiproto_req_leds(wdata, state | flag);
+                       spin_unlock_irqrestore(&wdata->state.lock, flags);
+                       break;
+               }
+       }
+}
+
+static int wiimote_ff_play(struct input_dev *dev, void *data,
+                                                       struct ff_effect *eff)
+{
+       struct wiimote_data *wdata = input_get_drvdata(dev);
+       __u8 value;
+       unsigned long flags;
+
+       /*
+        * The wiimote supports only a single rumble motor so if any magnitude
+        * is set to non-zero then we start the rumble motor. If both are set to
+        * zero, we stop the rumble motor.
+        */
+
+       if (eff->u.rumble.strong_magnitude || eff->u.rumble.weak_magnitude)
+               value = 1;
+       else
+               value = 0;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wiiproto_req_rumble(wdata, value);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       return 0;
+}
+
+static int wiimote_input_open(struct input_dev *dev)
+{
+       struct wiimote_data *wdata = input_get_drvdata(dev);
+
+       return hid_hw_open(wdata->hdev);
+}
+
+static void wiimote_input_close(struct input_dev *dev)
+{
+       struct wiimote_data *wdata = input_get_drvdata(dev);
+
+       hid_hw_close(wdata->hdev);
+}
+
+static int wiimote_accel_open(struct input_dev *dev)
+{
+       struct wiimote_data *wdata = input_get_drvdata(dev);
+       int ret;
+       unsigned long flags;
+
+       ret = hid_hw_open(wdata->hdev);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wiiproto_req_accel(wdata, true);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       return 0;
+}
+
+static void wiimote_accel_close(struct input_dev *dev)
+{
+       struct wiimote_data *wdata = input_get_drvdata(dev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wiiproto_req_accel(wdata, false);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       hid_hw_close(wdata->hdev);
+}
+
+static int wiimote_ir_open(struct input_dev *dev)
+{
+       struct wiimote_data *wdata = input_get_drvdata(dev);
+       int ret;
+
+       ret = hid_hw_open(wdata->hdev);
+       if (ret)
+               return ret;
+
+       ret = wiimote_init_ir(wdata, WIIPROTO_FLAG_IR_BASIC);
+       if (ret) {
+               hid_hw_close(wdata->hdev);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void wiimote_ir_close(struct input_dev *dev)
+{
+       struct wiimote_data *wdata = input_get_drvdata(dev);
+
+       wiimote_init_ir(wdata, 0);
+       hid_hw_close(wdata->hdev);
+}
+
+static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
+{
+       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_LEFT],
+                                                       !!(payload[0] & 0x01));
+       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_RIGHT],
+                                                       !!(payload[0] & 0x02));
+       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_DOWN],
+                                                       !!(payload[0] & 0x04));
+       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_UP],
+                                                       !!(payload[0] & 0x08));
+       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_PLUS],
+                                                       !!(payload[0] & 0x10));
+       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_TWO],
+                                                       !!(payload[1] & 0x01));
+       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_ONE],
+                                                       !!(payload[1] & 0x02));
+       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_B],
+                                                       !!(payload[1] & 0x04));
+       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_A],
+                                                       !!(payload[1] & 0x08));
+       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_MINUS],
+                                                       !!(payload[1] & 0x10));
+       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_HOME],
+                                                       !!(payload[1] & 0x80));
+       input_sync(wdata->input);
+}
+
+static void handler_accel(struct wiimote_data *wdata, const __u8 *payload)
+{
+       __u16 x, y, z;
+
+       if (!(wdata->state.flags & WIIPROTO_FLAG_ACCEL))
+               return;
+
+       /*
+        * payload is: BB BB XX YY ZZ
+        * Accelerometer data is encoded into 3 10bit values. XX, YY and ZZ
+        * contain the upper 8 bits of each value. The lower 2 bits are
+        * contained in the buttons data BB BB.
+        * Bits 6 and 7 of the first buttons byte BB is the lower 2 bits of the
+        * X accel value. Bit 5 of the second buttons byte is the 2nd bit of Y
+        * accel value and bit 6 is the second bit of the Z value.
+        * The first bit of Y and Z values is not available and always set to 0.
+        * 0x200 is returned on no movement.
+        */
+
+       x = payload[2] << 2;
+       y = payload[3] << 2;
+       z = payload[4] << 2;
+
+       x |= (payload[0] >> 5) & 0x3;
+       y |= (payload[1] >> 4) & 0x2;
+       z |= (payload[1] >> 5) & 0x2;
+
+       input_report_abs(wdata->accel, ABS_RX, x - 0x200);
+       input_report_abs(wdata->accel, ABS_RY, y - 0x200);
+       input_report_abs(wdata->accel, ABS_RZ, z - 0x200);
+       input_sync(wdata->accel);
+}
+
+#define ir_to_input0(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+                                                       ABS_HAT0X, ABS_HAT0Y)
+#define ir_to_input1(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+                                                       ABS_HAT1X, ABS_HAT1Y)
+#define ir_to_input2(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+                                                       ABS_HAT2X, ABS_HAT2Y)
+#define ir_to_input3(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+                                                       ABS_HAT3X, ABS_HAT3Y)
+
+static void __ir_to_input(struct wiimote_data *wdata, const __u8 *ir,
+                                               bool packed, __u8 xid, __u8 yid)
+{
+       __u16 x, y;
+
+       if (!(wdata->state.flags & WIIPROTO_FLAGS_IR))
+               return;
+
+       /*
+        * Basic IR data is encoded into 3 bytes. The first two bytes are the
+        * upper 8 bit of the X/Y data, the 3rd byte contains the lower 2 bits
+        * of both.
+        * If data is packed, then the 3rd byte is put first and slightly
+        * reordered. This allows to interleave packed and non-packed data to
+        * have two IR sets in 5 bytes instead of 6.
+        * The resulting 10bit X/Y values are passed to the ABS_HATXY input dev.
+        */
+
+       if (packed) {
+               x = ir[1] << 2;
+               y = ir[2] << 2;
+
+               x |= ir[0] & 0x3;
+               y |= (ir[0] >> 2) & 0x3;
+       } else {
+               x = ir[0] << 2;
+               y = ir[1] << 2;
+
+               x |= (ir[2] >> 4) & 0x3;
+               y |= (ir[2] >> 6) & 0x3;
+       }
+
+       input_report_abs(wdata->ir, xid, x);
+       input_report_abs(wdata->ir, yid, y);
+}
+
+static void handler_status(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
+
+       /* on status reports the drm is reset so we need to resend the drm */
+       wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+
+       wiiext_event(wdata, payload[2] & 0x02);
+
+       if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_SREQ, 0)) {
+               wdata->state.cmd_battery = payload[5];
+               wiimote_cmd_complete(wdata);
+       }
+}
+
+static void handler_data(struct wiimote_data *wdata, const __u8 *payload)
+{
+       __u16 offset = payload[3] << 8 | payload[4];
+       __u8 size = (payload[2] >> 4) + 1;
+       __u8 err = payload[2] & 0x0f;
+
+       handler_keys(wdata, payload);
+
+       if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_RMEM, offset)) {
+               if (err)
+                       size = 0;
+               else if (size > wdata->state.cmd_read_size)
+                       size = wdata->state.cmd_read_size;
+
+               wdata->state.cmd_read_size = size;
+               if (wdata->state.cmd_read_buf)
+                       memcpy(wdata->state.cmd_read_buf, &payload[5], size);
+               wiimote_cmd_complete(wdata);
+       }
+}
+
+static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
+{
+       __u8 err = payload[3];
+       __u8 cmd = payload[2];
+
+       handler_keys(wdata, payload);
+
+       if (wiimote_cmd_pending(wdata, cmd, 0)) {
+               wdata->state.cmd_err = err;
+               wiimote_cmd_complete(wdata);
+       } else if (err) {
+               hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err,
+                                                                       cmd);
+       }
+}
+
+static void handler_drm_KA(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
+       handler_accel(wdata, payload);
+}
+
+static void handler_drm_KE(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
+       wiiext_handle(wdata, &payload[2]);
+}
+
+static void handler_drm_KAI(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
+       handler_accel(wdata, payload);
+       ir_to_input0(wdata, &payload[5], false);
+       ir_to_input1(wdata, &payload[8], false);
+       ir_to_input2(wdata, &payload[11], false);
+       ir_to_input3(wdata, &payload[14], false);
+       input_sync(wdata->ir);
+}
+
+static void handler_drm_KEE(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
+       wiiext_handle(wdata, &payload[2]);
+}
+
+static void handler_drm_KIE(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
+       ir_to_input0(wdata, &payload[2], false);
+       ir_to_input1(wdata, &payload[4], true);
+       ir_to_input2(wdata, &payload[7], false);
+       ir_to_input3(wdata, &payload[9], true);
+       input_sync(wdata->ir);
+       wiiext_handle(wdata, &payload[12]);
+}
+
+static void handler_drm_KAE(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
+       handler_accel(wdata, payload);
+       wiiext_handle(wdata, &payload[5]);
+}
+
+static void handler_drm_KAIE(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
+       handler_accel(wdata, payload);
+       ir_to_input0(wdata, &payload[5], false);
+       ir_to_input1(wdata, &payload[7], true);
+       ir_to_input2(wdata, &payload[10], false);
+       ir_to_input3(wdata, &payload[12], true);
+       input_sync(wdata->ir);
+       wiiext_handle(wdata, &payload[15]);
+}
+
+static void handler_drm_E(struct wiimote_data *wdata, const __u8 *payload)
+{
+       wiiext_handle(wdata, payload);
+}
+
+static void handler_drm_SKAI1(struct wiimote_data *wdata, const __u8 *payload)
+{
+       handler_keys(wdata, payload);
+
+       wdata->state.accel_split[0] = payload[2];
+       wdata->state.accel_split[1] = (payload[0] >> 1) & (0x10 | 0x20);
+       wdata->state.accel_split[1] |= (payload[1] << 1) & (0x40 | 0x80);
+
+       ir_to_input0(wdata, &payload[3], false);
+       ir_to_input1(wdata, &payload[12], false);
+       input_sync(wdata->ir);
+}
+
+static void handler_drm_SKAI2(struct wiimote_data *wdata, const __u8 *payload)
+{
+       __u8 buf[5];
+
+       handler_keys(wdata, payload);
+
+       wdata->state.accel_split[1] |= (payload[0] >> 5) & (0x01 | 0x02);
+       wdata->state.accel_split[1] |= (payload[1] >> 3) & (0x04 | 0x08);
+
+       buf[0] = 0;
+       buf[1] = 0;
+       buf[2] = wdata->state.accel_split[0];
+       buf[3] = payload[2];
+       buf[4] = wdata->state.accel_split[1];
+       handler_accel(wdata, buf);
+
+       ir_to_input2(wdata, &payload[3], false);
+       ir_to_input3(wdata, &payload[12], false);
+       input_sync(wdata->ir);
+}
+
+struct wiiproto_handler {
+       __u8 id;
+       size_t size;
+       void (*func)(struct wiimote_data *wdata, const __u8 *payload);
+};
+
+static struct wiiproto_handler handlers[] = {
+       { .id = WIIPROTO_REQ_STATUS, .size = 6, .func = handler_status },
+       { .id = WIIPROTO_REQ_DATA, .size = 21, .func = handler_data },
+       { .id = WIIPROTO_REQ_RETURN, .size = 4, .func = handler_return },
+       { .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys },
+       { .id = WIIPROTO_REQ_DRM_KA, .size = 5, .func = handler_drm_KA },
+       { .id = WIIPROTO_REQ_DRM_KE, .size = 10, .func = handler_drm_KE },
+       { .id = WIIPROTO_REQ_DRM_KAI, .size = 17, .func = handler_drm_KAI },
+       { .id = WIIPROTO_REQ_DRM_KEE, .size = 21, .func = handler_drm_KEE },
+       { .id = WIIPROTO_REQ_DRM_KAE, .size = 21, .func = handler_drm_KAE },
+       { .id = WIIPROTO_REQ_DRM_KIE, .size = 21, .func = handler_drm_KIE },
+       { .id = WIIPROTO_REQ_DRM_KAIE, .size = 21, .func = handler_drm_KAIE },
+       { .id = WIIPROTO_REQ_DRM_E, .size = 21, .func = handler_drm_E },
+       { .id = WIIPROTO_REQ_DRM_SKAI1, .size = 21, .func = handler_drm_SKAI1 },
+       { .id = WIIPROTO_REQ_DRM_SKAI2, .size = 21, .func = handler_drm_SKAI2 },
+       { .id = 0 }
+};
+
+static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
+                                                       u8 *raw_data, int size)
+{
+       struct wiimote_data *wdata = hid_get_drvdata(hdev);
+       struct wiiproto_handler *h;
+       int i;
+       unsigned long flags;
+       bool handled = false;
+
+       if (size < 1)
+               return -EINVAL;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+
+       for (i = 0; handlers[i].id; ++i) {
+               h = &handlers[i];
+               if (h->id == raw_data[0] && h->size < size) {
+                       h->func(wdata, &raw_data[1]);
+                       handled = true;
+               }
+       }
+
+       if (!handled)
+               hid_warn(hdev, "Unhandled report %hhu size %d\n", raw_data[0],
+                                                                       size);
+
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       return 0;
+}
+
+static void wiimote_leds_destroy(struct wiimote_data *wdata)
+{
+       int i;
+       struct led_classdev *led;
+
+       for (i = 0; i < 4; ++i) {
+               if (wdata->leds[i]) {
+                       led = wdata->leds[i];
+                       wdata->leds[i] = NULL;
+                       led_classdev_unregister(led);
+                       kfree(led);
+               }
+       }
+}
+
+static int wiimote_leds_create(struct wiimote_data *wdata)
+{
+       int i, ret;
+       struct device *dev = &wdata->hdev->dev;
+       size_t namesz = strlen(dev_name(dev)) + 9;
+       struct led_classdev *led;
+       char *name;
+
+       for (i = 0; i < 4; ++i) {
+               led = kzalloc(sizeof(struct led_classdev) + namesz, GFP_KERNEL);
+               if (!led) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+               name = (void*)&led[1];
+               snprintf(name, namesz, "%s:blue:p%d", dev_name(dev), i);
+               led->name = name;
+               led->brightness = 0;
+               led->max_brightness = 1;
+               led->brightness_get = wiimote_leds_get;
+               led->brightness_set = wiimote_leds_set;
+
+               ret = led_classdev_register(dev, led);
+               if (ret) {
+                       kfree(led);
+                       goto err;
+               }
+               wdata->leds[i] = led;
+       }
+
+       return 0;
+
+err:
+       wiimote_leds_destroy(wdata);
+       return ret;
+}
+
+static struct wiimote_data *wiimote_create(struct hid_device *hdev)
+{
+       struct wiimote_data *wdata;
+       int i;
+
+       wdata = kzalloc(sizeof(*wdata), GFP_KERNEL);
+       if (!wdata)
+               return NULL;
+
+       wdata->input = input_allocate_device();
+       if (!wdata->input)
+               goto err;
+
+       wdata->hdev = hdev;
+       hid_set_drvdata(hdev, wdata);
+
+       input_set_drvdata(wdata->input, wdata);
+       wdata->input->open = wiimote_input_open;
+       wdata->input->close = wiimote_input_close;
+       wdata->input->dev.parent = &wdata->hdev->dev;
+       wdata->input->id.bustype = wdata->hdev->bus;
+       wdata->input->id.vendor = wdata->hdev->vendor;
+       wdata->input->id.product = wdata->hdev->product;
+       wdata->input->id.version = wdata->hdev->version;
+       wdata->input->name = WIIMOTE_NAME;
+
+       set_bit(EV_KEY, wdata->input->evbit);
+       for (i = 0; i < WIIPROTO_KEY_COUNT; ++i)
+               set_bit(wiiproto_keymap[i], wdata->input->keybit);
+
+       set_bit(FF_RUMBLE, wdata->input->ffbit);
+       if (input_ff_create_memless(wdata->input, NULL, wiimote_ff_play))
+               goto err_input;
+
+       wdata->accel = input_allocate_device();
+       if (!wdata->accel)
+               goto err_input;
+
+       input_set_drvdata(wdata->accel, wdata);
+       wdata->accel->open = wiimote_accel_open;
+       wdata->accel->close = wiimote_accel_close;
+       wdata->accel->dev.parent = &wdata->hdev->dev;
+       wdata->accel->id.bustype = wdata->hdev->bus;
+       wdata->accel->id.vendor = wdata->hdev->vendor;
+       wdata->accel->id.product = wdata->hdev->product;
+       wdata->accel->id.version = wdata->hdev->version;
+       wdata->accel->name = WIIMOTE_NAME " Accelerometer";
+
+       set_bit(EV_ABS, wdata->accel->evbit);
+       set_bit(ABS_RX, wdata->accel->absbit);
+       set_bit(ABS_RY, wdata->accel->absbit);
+       set_bit(ABS_RZ, wdata->accel->absbit);
+       input_set_abs_params(wdata->accel, ABS_RX, -500, 500, 2, 4);
+       input_set_abs_params(wdata->accel, ABS_RY, -500, 500, 2, 4);
+       input_set_abs_params(wdata->accel, ABS_RZ, -500, 500, 2, 4);
+
+       wdata->ir = input_allocate_device();
+       if (!wdata->ir)
+               goto err_ir;
+
+       input_set_drvdata(wdata->ir, wdata);
+       wdata->ir->open = wiimote_ir_open;
+       wdata->ir->close = wiimote_ir_close;
+       wdata->ir->dev.parent = &wdata->hdev->dev;
+       wdata->ir->id.bustype = wdata->hdev->bus;
+       wdata->ir->id.vendor = wdata->hdev->vendor;
+       wdata->ir->id.product = wdata->hdev->product;
+       wdata->ir->id.version = wdata->hdev->version;
+       wdata->ir->name = WIIMOTE_NAME " IR";
+
+       set_bit(EV_ABS, wdata->ir->evbit);
+       set_bit(ABS_HAT0X, wdata->ir->absbit);
+       set_bit(ABS_HAT0Y, wdata->ir->absbit);
+       set_bit(ABS_HAT1X, wdata->ir->absbit);
+       set_bit(ABS_HAT1Y, wdata->ir->absbit);
+       set_bit(ABS_HAT2X, wdata->ir->absbit);
+       set_bit(ABS_HAT2Y, wdata->ir->absbit);
+       set_bit(ABS_HAT3X, wdata->ir->absbit);
+       set_bit(ABS_HAT3Y, wdata->ir->absbit);
+       input_set_abs_params(wdata->ir, ABS_HAT0X, 0, 1023, 2, 4);
+       input_set_abs_params(wdata->ir, ABS_HAT0Y, 0, 767, 2, 4);
+       input_set_abs_params(wdata->ir, ABS_HAT1X, 0, 1023, 2, 4);
+       input_set_abs_params(wdata->ir, ABS_HAT1Y, 0, 767, 2, 4);
+       input_set_abs_params(wdata->ir, ABS_HAT2X, 0, 1023, 2, 4);
+       input_set_abs_params(wdata->ir, ABS_HAT2Y, 0, 767, 2, 4);
+       input_set_abs_params(wdata->ir, ABS_HAT3X, 0, 1023, 2, 4);
+       input_set_abs_params(wdata->ir, ABS_HAT3Y, 0, 767, 2, 4);
+
+       spin_lock_init(&wdata->qlock);
+       INIT_WORK(&wdata->worker, wiimote_worker);
+
+       spin_lock_init(&wdata->state.lock);
+       init_completion(&wdata->state.ready);
+       mutex_init(&wdata->state.sync);
+       wdata->state.drm = WIIPROTO_REQ_DRM_K;
+
+       return wdata;
+
+err_ir:
+       input_free_device(wdata->accel);
+err_input:
+       input_free_device(wdata->input);
+err:
+       kfree(wdata);
+       return NULL;
+}
+
+static void wiimote_destroy(struct wiimote_data *wdata)
+{
+       wiidebug_deinit(wdata);
+       wiiext_deinit(wdata);
+       wiimote_leds_destroy(wdata);
+
+       power_supply_unregister(&wdata->battery);
+       input_unregister_device(wdata->accel);
+       input_unregister_device(wdata->ir);
+       input_unregister_device(wdata->input);
+       cancel_work_sync(&wdata->worker);
+       hid_hw_stop(wdata->hdev);
+
+       kfree(wdata);
+}
+
+static int wiimote_hid_probe(struct hid_device *hdev,
+                               const struct hid_device_id *id)
+{
+       struct wiimote_data *wdata;
+       int ret;
+
+       hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
+       wdata = wiimote_create(hdev);
+       if (!wdata) {
+               hid_err(hdev, "Can't alloc device\n");
+               return -ENOMEM;
+       }
+
+       ret = hid_parse(hdev);
+       if (ret) {
+               hid_err(hdev, "HID parse failed\n");
+               goto err;
+       }
+
+       ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+       if (ret) {
+               hid_err(hdev, "HW start failed\n");
+               goto err;
+       }
+
+       ret = input_register_device(wdata->accel);
+       if (ret) {
+               hid_err(hdev, "Cannot register input device\n");
+               goto err_stop;
+       }
+
+       ret = input_register_device(wdata->ir);
+       if (ret) {
+               hid_err(hdev, "Cannot register input device\n");
+               goto err_ir;
+       }
+
+       ret = input_register_device(wdata->input);
+       if (ret) {
+               hid_err(hdev, "Cannot register input device\n");
+               goto err_input;
+       }
+
+       wdata->battery.properties = wiimote_battery_props;
+       wdata->battery.num_properties = ARRAY_SIZE(wiimote_battery_props);
+       wdata->battery.get_property = wiimote_battery_get_property;
+       wdata->battery.name = "wiimote_battery";
+       wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+       wdata->battery.use_for_apm = 0;
+
+       ret = power_supply_register(&wdata->hdev->dev, &wdata->battery);
+       if (ret) {
+               hid_err(hdev, "Cannot register battery device\n");
+               goto err_battery;
+       }
+
+       ret = wiimote_leds_create(wdata);
+       if (ret)
+               goto err_free;
+
+       ret = wiiext_init(wdata);
+       if (ret)
+               goto err_free;
+
+       ret = wiidebug_init(wdata);
+       if (ret)
+               goto err_free;
+
+       hid_info(hdev, "New device registered\n");
+
+       /* by default set led1 after device initialization */
+       spin_lock_irq(&wdata->state.lock);
+       wiiproto_req_leds(wdata, WIIPROTO_FLAG_LED1);
+       spin_unlock_irq(&wdata->state.lock);
+
+       return 0;
+
+err_free:
+       wiimote_destroy(wdata);
+       return ret;
+
+err_battery:
+       input_unregister_device(wdata->input);
+       wdata->input = NULL;
+err_input:
+       input_unregister_device(wdata->ir);
+       wdata->ir = NULL;
+err_ir:
+       input_unregister_device(wdata->accel);
+       wdata->accel = NULL;
+err_stop:
+       hid_hw_stop(hdev);
+err:
+       input_free_device(wdata->ir);
+       input_free_device(wdata->accel);
+       input_free_device(wdata->input);
+       kfree(wdata);
+       return ret;
+}
+
+static void wiimote_hid_remove(struct hid_device *hdev)
+{
+       struct wiimote_data *wdata = hid_get_drvdata(hdev);
+
+       hid_info(hdev, "Device removed\n");
+       wiimote_destroy(wdata);
+}
+
+static const struct hid_device_id wiimote_hid_devices[] = {
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
+                               USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+       { }
+};
+MODULE_DEVICE_TABLE(hid, wiimote_hid_devices);
+
+static struct hid_driver wiimote_hid_driver = {
+       .name = "wiimote",
+       .id_table = wiimote_hid_devices,
+       .probe = wiimote_hid_probe,
+       .remove = wiimote_hid_remove,
+       .raw_event = wiimote_hid_event,
+};
+
+static int __init wiimote_init(void)
+{
+       int ret;
+
+       ret = hid_register_driver(&wiimote_hid_driver);
+       if (ret)
+               pr_err("Can't register wiimote hid driver\n");
+
+       return ret;
+}
+
+static void __exit wiimote_exit(void)
+{
+       hid_unregister_driver(&wiimote_hid_driver);
+}
+
+module_init(wiimote_init);
+module_exit(wiimote_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
+MODULE_DESCRIPTION(WIIMOTE_NAME " Device Driver");
diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
new file mode 100644 (file)
index 0000000..17dabc1
--- /dev/null
@@ -0,0 +1,227 @@
+/*
+ * Debug support for HID Nintendo Wiimote devices
+ * Copyright (c) 2011 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include "hid-wiimote.h"
+
+struct wiimote_debug {
+       struct wiimote_data *wdata;
+       struct dentry *eeprom;
+       struct dentry *drm;
+};
+
+static int wiidebug_eeprom_open(struct inode *i, struct file *f)
+{
+       f->private_data = i->i_private;
+       return 0;
+}
+
+static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
+                                                               loff_t *off)
+{
+       struct wiimote_debug *dbg = f->private_data;
+       struct wiimote_data *wdata = dbg->wdata;
+       unsigned long flags;
+       ssize_t ret;
+       char buf[16];
+       __u16 size;
+
+       if (s == 0)
+               return -EINVAL;
+       if (*off > 0xffffff)
+               return 0;
+       if (s > 16)
+               s = 16;
+
+       ret = wiimote_cmd_acquire(wdata);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wdata->state.cmd_read_size = s;
+       wdata->state.cmd_read_buf = buf;
+       wiimote_cmd_set(wdata, WIIPROTO_REQ_RMEM, *off & 0xffff);
+       wiiproto_req_reeprom(wdata, *off, s);
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       ret = wiimote_cmd_wait(wdata);
+       if (!ret)
+               size = wdata->state.cmd_read_size;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wdata->state.cmd_read_buf = NULL;
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       wiimote_cmd_release(wdata);
+
+       if (ret)
+               return ret;
+       else if (size == 0)
+               return -EIO;
+
+       if (copy_to_user(u, buf, size))
+               return -EFAULT;
+
+       *off += size;
+       ret = size;
+
+       return ret;
+}
+
+static const struct file_operations wiidebug_eeprom_fops = {
+       .owner = THIS_MODULE,
+       .open = wiidebug_eeprom_open,
+       .read = wiidebug_eeprom_read,
+       .llseek = generic_file_llseek,
+};
+
+static const char *wiidebug_drmmap[] = {
+       [WIIPROTO_REQ_NULL] = "NULL",
+       [WIIPROTO_REQ_DRM_K] = "K",
+       [WIIPROTO_REQ_DRM_KA] = "KA",
+       [WIIPROTO_REQ_DRM_KE] = "KE",
+       [WIIPROTO_REQ_DRM_KAI] = "KAI",
+       [WIIPROTO_REQ_DRM_KEE] = "KEE",
+       [WIIPROTO_REQ_DRM_KAE] = "KAE",
+       [WIIPROTO_REQ_DRM_KIE] = "KIE",
+       [WIIPROTO_REQ_DRM_KAIE] = "KAIE",
+       [WIIPROTO_REQ_DRM_E] = "E",
+       [WIIPROTO_REQ_DRM_SKAI1] = "SKAI1",
+       [WIIPROTO_REQ_DRM_SKAI2] = "SKAI2",
+       [WIIPROTO_REQ_MAX] = NULL
+};
+
+static int wiidebug_drm_show(struct seq_file *f, void *p)
+{
+       struct wiimote_debug *dbg = f->private;
+       const char *str = NULL;
+       unsigned long flags;
+       __u8 drm;
+
+       spin_lock_irqsave(&dbg->wdata->state.lock, flags);
+       drm = dbg->wdata->state.drm;
+       spin_unlock_irqrestore(&dbg->wdata->state.lock, flags);
+
+       if (drm < WIIPROTO_REQ_MAX)
+               str = wiidebug_drmmap[drm];
+       if (!str)
+               str = "unknown";
+
+       seq_printf(f, "%s\n", str);
+
+       return 0;
+}
+
+static int wiidebug_drm_open(struct inode *i, struct file *f)
+{
+       return single_open(f, wiidebug_drm_show, i->i_private);
+}
+
+static ssize_t wiidebug_drm_write(struct file *f, const char __user *u,
+                                                       size_t s, loff_t *off)
+{
+       struct wiimote_debug *dbg = f->private_data;
+       unsigned long flags;
+       char buf[16];
+       ssize_t len;
+       int i;
+
+       if (s == 0)
+               return -EINVAL;
+
+       len = min((size_t) 15, s);
+       if (copy_from_user(buf, u, len))
+               return -EFAULT;
+
+       buf[15] = 0;
+
+       for (i = 0; i < WIIPROTO_REQ_MAX; ++i) {
+               if (!wiidebug_drmmap[i])
+                       continue;
+               if (!strcasecmp(buf, wiidebug_drmmap[i]))
+                       break;
+       }
+
+       if (i == WIIPROTO_REQ_MAX)
+               i = simple_strtoul(buf, NULL, 10);
+
+       spin_lock_irqsave(&dbg->wdata->state.lock, flags);
+       wiiproto_req_drm(dbg->wdata, (__u8) i);
+       spin_unlock_irqrestore(&dbg->wdata->state.lock, flags);
+
+       return len;
+}
+
+static const struct file_operations wiidebug_drm_fops = {
+       .owner = THIS_MODULE,
+       .open = wiidebug_drm_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .write = wiidebug_drm_write,
+       .release = single_release,
+};
+
+int wiidebug_init(struct wiimote_data *wdata)
+{
+       struct wiimote_debug *dbg;
+       unsigned long flags;
+       int ret = -ENOMEM;
+
+       dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
+       if (!dbg)
+               return -ENOMEM;
+
+       dbg->wdata = wdata;
+
+       dbg->eeprom = debugfs_create_file("eeprom", S_IRUSR,
+               dbg->wdata->hdev->debug_dir, dbg, &wiidebug_eeprom_fops);
+       if (!dbg->eeprom)
+               goto err;
+
+       dbg->drm = debugfs_create_file("drm", S_IRUSR,
+                       dbg->wdata->hdev->debug_dir, dbg, &wiidebug_drm_fops);
+       if (!dbg->drm)
+               goto err_drm;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wdata->debug = dbg;
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       return 0;
+
+err_drm:
+       debugfs_remove(dbg->eeprom);
+err:
+       kfree(dbg);
+       return ret;
+}
+
+void wiidebug_deinit(struct wiimote_data *wdata)
+{
+       struct wiimote_debug *dbg = wdata->debug;
+       unsigned long flags;
+
+       if (!dbg)
+               return;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wdata->debug = NULL;
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       debugfs_remove(dbg->drm);
+       debugfs_remove(dbg->eeprom);
+       kfree(dbg);
+}
diff --git a/drivers/hid/hid-wiimote-ext.c b/drivers/hid/hid-wiimote-ext.c
new file mode 100644 (file)
index 0000000..aa95870
--- /dev/null
@@ -0,0 +1,752 @@
+/*
+ * HID driver for Nintendo Wiimote extension devices
+ * Copyright (c) 2011 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include "hid-wiimote.h"
+
+struct wiimote_ext {
+       struct wiimote_data *wdata;
+       struct work_struct worker;
+       struct input_dev *input;
+       struct input_dev *mp_input;
+
+       atomic_t opened;
+       atomic_t mp_opened;
+       bool plugged;
+       bool mp_plugged;
+       bool motionp;
+       __u8 ext_type;
+};
+
+enum wiiext_type {
+       WIIEXT_NONE,            /* placeholder */
+       WIIEXT_CLASSIC,         /* Nintendo classic controller */
+       WIIEXT_NUNCHUCK,        /* Nintendo nunchuck controller */
+};
+
+enum wiiext_keys {
+       WIIEXT_KEY_C,
+       WIIEXT_KEY_Z,
+       WIIEXT_KEY_A,
+       WIIEXT_KEY_B,
+       WIIEXT_KEY_X,
+       WIIEXT_KEY_Y,
+       WIIEXT_KEY_ZL,
+       WIIEXT_KEY_ZR,
+       WIIEXT_KEY_PLUS,
+       WIIEXT_KEY_MINUS,
+       WIIEXT_KEY_HOME,
+       WIIEXT_KEY_LEFT,
+       WIIEXT_KEY_RIGHT,
+       WIIEXT_KEY_UP,
+       WIIEXT_KEY_DOWN,
+       WIIEXT_KEY_LT,
+       WIIEXT_KEY_RT,
+       WIIEXT_KEY_COUNT
+};
+
+static __u16 wiiext_keymap[] = {
+       BTN_C,          /* WIIEXT_KEY_C */
+       BTN_Z,          /* WIIEXT_KEY_Z */
+       BTN_A,          /* WIIEXT_KEY_A */
+       BTN_B,          /* WIIEXT_KEY_B */
+       BTN_X,          /* WIIEXT_KEY_X */
+       BTN_Y,          /* WIIEXT_KEY_Y */
+       BTN_TL2,        /* WIIEXT_KEY_ZL */
+       BTN_TR2,        /* WIIEXT_KEY_ZR */
+       KEY_NEXT,       /* WIIEXT_KEY_PLUS */
+       KEY_PREVIOUS,   /* WIIEXT_KEY_MINUS */
+       BTN_MODE,       /* WIIEXT_KEY_HOME */
+       KEY_LEFT,       /* WIIEXT_KEY_LEFT */
+       KEY_RIGHT,      /* WIIEXT_KEY_RIGHT */
+       KEY_UP,         /* WIIEXT_KEY_UP */
+       KEY_DOWN,       /* WIIEXT_KEY_DOWN */
+       BTN_TL,         /* WIIEXT_KEY_LT */
+       BTN_TR,         /* WIIEXT_KEY_RT */
+};
+
+/* diable all extensions */
+static void ext_disable(struct wiimote_ext *ext)
+{
+       unsigned long flags;
+       __u8 wmem = 0x55;
+
+       if (!wiimote_cmd_acquire(ext->wdata)) {
+               wiimote_cmd_write(ext->wdata, 0xa400f0, &wmem, sizeof(wmem));
+               wiimote_cmd_release(ext->wdata);
+       }
+
+       spin_lock_irqsave(&ext->wdata->state.lock, flags);
+       ext->motionp = false;
+       ext->ext_type = WIIEXT_NONE;
+       wiiproto_req_drm(ext->wdata, WIIPROTO_REQ_NULL);
+       spin_unlock_irqrestore(&ext->wdata->state.lock, flags);
+}
+
+static bool motionp_read(struct wiimote_ext *ext)
+{
+       __u8 rmem[2], wmem;
+       ssize_t ret;
+       bool avail = false;
+
+       if (!atomic_read(&ext->mp_opened))
+               return false;
+
+       if (wiimote_cmd_acquire(ext->wdata))
+               return false;
+
+       /* initialize motion plus */
+       wmem = 0x55;
+       ret = wiimote_cmd_write(ext->wdata, 0xa600f0, &wmem, sizeof(wmem));
+       if (ret)
+               goto error;
+
+       /* read motion plus ID */
+       ret = wiimote_cmd_read(ext->wdata, 0xa600fe, rmem, 2);
+       if (ret == 2 || rmem[1] == 0x5)
+               avail = true;
+
+error:
+       wiimote_cmd_release(ext->wdata);
+       return avail;
+}
+
+static __u8 ext_read(struct wiimote_ext *ext)
+{
+       ssize_t ret;
+       __u8 rmem[2], wmem;
+       __u8 type = WIIEXT_NONE;
+
+       if (!ext->plugged || !atomic_read(&ext->opened))
+               return WIIEXT_NONE;
+
+       if (wiimote_cmd_acquire(ext->wdata))
+               return WIIEXT_NONE;
+
+       /* initialize extension */
+       wmem = 0x55;
+       ret = wiimote_cmd_write(ext->wdata, 0xa400f0, &wmem, sizeof(wmem));
+       if (!ret) {
+               /* disable encryption */
+               wmem = 0x0;
+               wiimote_cmd_write(ext->wdata, 0xa400fb, &wmem, sizeof(wmem));
+       }
+
+       /* read extension ID */
+       ret = wiimote_cmd_read(ext->wdata, 0xa400fe, rmem, 2);
+       if (ret == 2) {
+               if (rmem[0] == 0 && rmem[1] == 0)
+                       type = WIIEXT_NUNCHUCK;
+               else if (rmem[0] == 0x01 && rmem[1] == 0x01)
+                       type = WIIEXT_CLASSIC;
+       }
+
+       wiimote_cmd_release(ext->wdata);
+
+       return type;
+}
+
+static void ext_enable(struct wiimote_ext *ext, bool motionp, __u8 ext_type)
+{
+       unsigned long flags;
+       __u8 wmem;
+       int ret;
+
+       if (motionp) {
+               if (wiimote_cmd_acquire(ext->wdata))
+                       return;
+
+               if (ext_type == WIIEXT_CLASSIC)
+                       wmem = 0x07;
+               else if (ext_type == WIIEXT_NUNCHUCK)
+                       wmem = 0x05;
+               else
+                       wmem = 0x04;
+
+               ret = wiimote_cmd_write(ext->wdata, 0xa600fe, &wmem, sizeof(wmem));
+               wiimote_cmd_release(ext->wdata);
+               if (ret)
+                       return;
+       }
+
+       spin_lock_irqsave(&ext->wdata->state.lock, flags);
+       ext->motionp = motionp;
+       ext->ext_type = ext_type;
+       wiiproto_req_drm(ext->wdata, WIIPROTO_REQ_NULL);
+       spin_unlock_irqrestore(&ext->wdata->state.lock, flags);
+}
+
+static void wiiext_worker(struct work_struct *work)
+{
+       struct wiimote_ext *ext = container_of(work, struct wiimote_ext,
+                                                                       worker);
+       bool motionp;
+       __u8 ext_type;
+
+       ext_disable(ext);
+       motionp = motionp_read(ext);
+       ext_type = ext_read(ext);
+       ext_enable(ext, motionp, ext_type);
+}
+
+/* schedule work only once, otherwise mark for reschedule */
+static void wiiext_schedule(struct wiimote_ext *ext)
+{
+       queue_work(system_nrt_wq, &ext->worker);
+}
+
+/*
+ * Reacts on extension port events
+ * Whenever the driver gets an event from the wiimote that an extension has been
+ * plugged or unplugged, this funtion shall be called. It checks what extensions
+ * are connected and initializes and activates them.
+ * This can be called in atomic context. The initialization is done in a
+ * separate worker thread. The state.lock spinlock must be held by the caller.
+ */
+void wiiext_event(struct wiimote_data *wdata, bool plugged)
+{
+       if (!wdata->ext)
+               return;
+
+       if (wdata->ext->plugged == plugged)
+               return;
+
+       wdata->ext->plugged = plugged;
+
+       if (!plugged)
+               wdata->ext->mp_plugged = false;
+
+       /*
+        * We need to call wiiext_schedule(wdata->ext) here, however, the
+        * extension initialization logic is not fully understood and so
+        * automatic initialization is not supported, yet.
+        */
+}
+
+/*
+ * Returns true if the current DRM mode should contain extension data and false
+ * if there is no interest in extension data.
+ * All supported extensions send 6 byte extension data so any DRM that contains
+ * extension bytes is fine.
+ * The caller must hold the state.lock spinlock.
+ */
+bool wiiext_active(struct wiimote_data *wdata)
+{
+       if (!wdata->ext)
+               return false;
+
+       return wdata->ext->motionp || wdata->ext->ext_type;
+}
+
+static void handler_motionp(struct wiimote_ext *ext, const __u8 *payload)
+{
+       __s32 x, y, z;
+       bool plugged;
+
+       /*        |   8    7    6    5    4    3 |  2  |  1  |
+        *   -----+------------------------------+-----+-----+
+        *    1   |               Yaw Speed <7:0>            |
+        *    2   |              Roll Speed <7:0>            |
+        *    3   |             Pitch Speed <7:0>            |
+        *   -----+------------------------------+-----+-----+
+        *    4   |       Yaw Speed <13:8>       | Yaw |Pitch|
+        *   -----+------------------------------+-----+-----+
+        *    5   |      Roll Speed <13:8>       |Roll | Ext |
+        *   -----+------------------------------+-----+-----+
+        *    6   |     Pitch Speed <13:8>       |  1  |  0  |
+        *   -----+------------------------------+-----+-----+
+        * The single bits Yaw, Roll, Pitch in the lower right corner specify
+        * whether the wiimote is rotating fast (0) or slow (1). Speed for slow
+        * roation is 440 deg/s and for fast rotation 2000 deg/s. To get a
+        * linear scale we multiply by 2000/440 = ~4.5454 which is 18 for fast
+        * and 9 for slow.
+        * If the wiimote is not rotating the sensor reports 2^13 = 8192.
+        * Ext specifies whether an extension is connected to the motionp.
+        */
+
+       x = payload[0];
+       y = payload[1];
+       z = payload[2];
+
+       x |= (((__u16)payload[3]) << 6) & 0xff00;
+       y |= (((__u16)payload[4]) << 6) & 0xff00;
+       z |= (((__u16)payload[5]) << 6) & 0xff00;
+
+       x -= 8192;
+       y -= 8192;
+       z -= 8192;
+
+       if (!(payload[3] & 0x02))
+               x *= 18;
+       else
+               x *= 9;
+       if (!(payload[4] & 0x02))
+               y *= 18;
+       else
+               y *= 9;
+       if (!(payload[3] & 0x01))
+               z *= 18;
+       else
+               z *= 9;
+
+       input_report_abs(ext->mp_input, ABS_RX, x);
+       input_report_abs(ext->mp_input, ABS_RY, y);
+       input_report_abs(ext->mp_input, ABS_RZ, z);
+       input_sync(ext->mp_input);
+
+       plugged = payload[5] & 0x01;
+       if (plugged != ext->mp_plugged)
+               ext->mp_plugged = plugged;
+}
+
+static void handler_nunchuck(struct wiimote_ext *ext, const __u8 *payload)
+{
+       __s16 x, y, z, bx, by;
+
+       /*   Byte |   8    7 |  6    5 |  4    3 |  2 |  1  |
+        *   -----+----------+---------+---------+----+-----+
+        *    1   |              Button X <7:0>             |
+        *    2   |              Button Y <7:0>             |
+        *   -----+----------+---------+---------+----+-----+
+        *    3   |               Speed X <9:2>             |
+        *    4   |               Speed Y <9:2>             |
+        *    5   |               Speed Z <9:2>             |
+        *   -----+----------+---------+---------+----+-----+
+        *    6   | Z <1:0>  | Y <1:0> | X <1:0> | BC | BZ  |
+        *   -----+----------+---------+---------+----+-----+
+        * Button X/Y is the analog stick. Speed X, Y and Z are the
+        * accelerometer data in the same format as the wiimote's accelerometer.
+        * The 6th byte contains the LSBs of the accelerometer data.
+        * BC and BZ are the C and Z buttons: 0 means pressed
+        *
+        * If reported interleaved with motionp, then the layout changes. The
+        * 5th and 6th byte changes to:
+        *   -----+-----------------------------------+-----+
+        *    5   |            Speed Z <9:3>          | EXT |
+        *   -----+--------+-----+-----+----+----+----+-----+
+        *    6   |Z <2:1> |Y <1>|X <1>| BC | BZ | 0  |  0  |
+        *   -----+--------+-----+-----+----+----+----+-----+
+        * All three accelerometer values lose their LSB. The other data is
+        * still available but slightly moved.
+        *
+        * Center data for button values is 128. Center value for accelerometer
+        * values it 512 / 0x200
+        */
+
+       bx = payload[0];
+       by = payload[1];
+       bx -= 128;
+       by -= 128;
+
+       x = payload[2] << 2;
+       y = payload[3] << 2;
+       z = payload[4] << 2;
+
+       if (ext->motionp) {
+               x |= (payload[5] >> 3) & 0x02;
+               y |= (payload[5] >> 4) & 0x02;
+               z &= ~0x4;
+               z |= (payload[5] >> 5) & 0x06;
+       } else {
+               x |= (payload[5] >> 2) & 0x03;
+               y |= (payload[5] >> 4) & 0x03;
+               z |= (payload[5] >> 6) & 0x03;
+       }
+
+       x -= 0x200;
+       y -= 0x200;
+       z -= 0x200;
+
+       input_report_abs(ext->input, ABS_HAT0X, bx);
+       input_report_abs(ext->input, ABS_HAT0Y, by);
+
+       input_report_abs(ext->input, ABS_RX, x);
+       input_report_abs(ext->input, ABS_RY, y);
+       input_report_abs(ext->input, ABS_RZ, z);
+
+       if (ext->motionp) {
+               input_report_key(ext->input,
+                       wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x04));
+               input_report_key(ext->input,
+                       wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x08));
+       } else {
+               input_report_key(ext->input,
+                       wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x01));
+               input_report_key(ext->input,
+                       wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x02));
+       }
+
+       input_sync(ext->input);
+}
+
+static void handler_classic(struct wiimote_ext *ext, const __u8 *payload)
+{
+       __s8 rx, ry, lx, ly, lt, rt;
+
+       /*   Byte |  8  |  7  |  6  |  5  |  4  |  3  |  2  |  1  |
+        *   -----+-----+-----+-----+-----+-----+-----+-----+-----+
+        *    1   | RX <5:4>  |              LX <5:0>             |
+        *    2   | RX <3:2>  |              LY <5:0>             |
+        *   -----+-----+-----+-----+-----------------------------+
+        *    3   |RX<1>| LT <5:4>  |         RY <5:1>            |
+        *   -----+-----+-----------+-----------------------------+
+        *    4   |     LT <3:1>    |         RT <5:1>            |
+        *   -----+-----+-----+-----+-----+-----+-----+-----+-----+
+        *    5   | BDR | BDD | BLT | B-  | BH  | B+  | BRT |  1  |
+        *   -----+-----+-----+-----+-----+-----+-----+-----+-----+
+        *    6   | BZL | BB  | BY  | BA  | BX  | BZR | BDL | BDU |
+        *   -----+-----+-----+-----+-----+-----+-----+-----+-----+
+        * All buttons are 0 if pressed
+        * RX and RY are right analog stick
+        * LX and LY are left analog stick
+        * LT is left trigger, RT is right trigger
+        * BLT is 0 if left trigger is fully pressed
+        * BRT is 0 if right trigger is fully pressed
+        * BDR, BDD, BDL, BDU form the D-Pad with right, down, left, up buttons
+        * BZL is left Z button and BZR is right Z button
+        * B-, BH, B+ are +, HOME and - buttons
+        * BB, BY, BA, BX are A, B, X, Y buttons
+        * LSB of RX, RY, LT, and RT are not transmitted and always 0.
+        *
+        * With motionp enabled it changes slightly to this:
+        *   Byte |  8  |  7  |  6  |  5  |  4  |  3  |  2  |  1  |
+        *   -----+-----+-----+-----+-----+-----+-----+-----+-----+
+        *    1   | RX <4:3>  |          LX <5:1>           | BDU |
+        *    2   | RX <2:1>  |          LY <5:1>           | BDL |
+        *   -----+-----+-----+-----+-----------------------+-----+
+        *    3   |RX<0>| LT <4:3>  |         RY <4:0>            |
+        *   -----+-----+-----------+-----------------------------+
+        *    4   |     LT <2:0>    |         RT <4:0>            |
+        *   -----+-----+-----+-----+-----+-----+-----+-----+-----+
+        *    5   | BDR | BDD | BLT | B-  | BH  | B+  | BRT | EXT |
+        *   -----+-----+-----+-----+-----+-----+-----+-----+-----+
+        *    6   | BZL | BB  | BY  | BA  | BX  | BZR |  0  |  0  |
+        *   -----+-----+-----+-----+-----+-----+-----+-----+-----+
+        * Only the LSBs of LX and LY are lost. BDU and BDL are moved, the rest
+        * is the same as before.
+        */
+
+       if (ext->motionp) {
+               lx = payload[0] & 0x3e;
+               ly = payload[0] & 0x3e;
+       } else {
+               lx = payload[0] & 0x3f;
+               ly = payload[0] & 0x3f;
+       }
+
+       rx = (payload[0] >> 3) & 0x14;
+       rx |= (payload[1] >> 5) & 0x06;
+       rx |= (payload[2] >> 7) & 0x01;
+       ry = payload[2] & 0x1f;
+
+       rt = payload[3] & 0x1f;
+       lt = (payload[2] >> 2) & 0x18;
+       lt |= (payload[3] >> 5) & 0x07;
+
+       rx <<= 1;
+       ry <<= 1;
+       rt <<= 1;
+       lt <<= 1;
+
+       input_report_abs(ext->input, ABS_HAT1X, lx - 0x20);
+       input_report_abs(ext->input, ABS_HAT1Y, ly - 0x20);
+       input_report_abs(ext->input, ABS_HAT2X, rx - 0x20);
+       input_report_abs(ext->input, ABS_HAT2Y, ry - 0x20);
+       input_report_abs(ext->input, ABS_HAT3X, rt - 0x20);
+       input_report_abs(ext->input, ABS_HAT3Y, lt - 0x20);
+
+       input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_RIGHT],
+                                                       !!(payload[4] & 0x80));
+       input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_DOWN],
+                                                       !!(payload[4] & 0x40));
+       input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_LT],
+                                                       !!(payload[4] & 0x20));
+       input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_MINUS],
+                                                       !!(payload[4] & 0x10));
+       input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_HOME],
+                                                       !!(payload[4] & 0x08));
+       input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_PLUS],
+                                                       !!(payload[4] & 0x04));
+       input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_RT],
+                                                       !!(payload[4] & 0x02));
+       input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_ZL],
+                                                       !!(payload[5] & 0x80));
+       input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_B],
+                                                       !!(payload[5] & 0x40));
+       input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_Y],
+                                                       !!(payload[5] & 0x20));
+       input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_A],
+                                                       !!(payload[5] & 0x10));
+       input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_X],
+                                                       !!(payload[5] & 0x08));
+       input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_ZR],
+                                                       !!(payload[5] & 0x04));
+
+       if (ext->motionp) {
+               input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_UP],
+                                                       !!(payload[0] & 0x01));
+               input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_LEFT],
+                                                       !!(payload[1] & 0x01));
+       } else {
+               input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_UP],
+                                                       !!(payload[5] & 0x01));
+               input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_LEFT],
+                                                       !!(payload[5] & 0x02));
+       }
+
+       input_sync(ext->input);
+}
+
+/* call this with state.lock spinlock held */
+void wiiext_handle(struct wiimote_data *wdata, const __u8 *payload)
+{
+       struct wiimote_ext *ext = wdata->ext;
+
+       if (!ext)
+               return;
+
+       if (ext->motionp && (payload[5] & 0x02)) {
+               handler_motionp(ext, payload);
+       } else if (ext->ext_type == WIIEXT_NUNCHUCK) {
+               handler_nunchuck(ext, payload);
+       } else if (ext->ext_type == WIIEXT_CLASSIC) {
+               handler_classic(ext, payload);
+       }
+}
+
+static ssize_t wiiext_show(struct device *dev, struct device_attribute *attr,
+                                                               char *buf)
+{
+       struct wiimote_data *wdata = dev_to_wii(dev);
+       __u8 type = WIIEXT_NONE;
+       bool motionp = false;
+       unsigned long flags;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       if (wdata->ext) {
+               motionp = wdata->ext->motionp;
+               type = wdata->ext->ext_type;
+       }
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       if (type == WIIEXT_NUNCHUCK) {
+               if (motionp)
+                       return sprintf(buf, "motionp+nunchuck\n");
+               else
+                       return sprintf(buf, "nunchuck\n");
+       } else if (type == WIIEXT_CLASSIC) {
+               if (motionp)
+                       return sprintf(buf, "motionp+classic\n");
+               else
+                       return sprintf(buf, "classic\n");
+       } else {
+               if (motionp)
+                       return sprintf(buf, "motionp\n");
+               else
+                       return sprintf(buf, "none\n");
+       }
+}
+
+static DEVICE_ATTR(extension, S_IRUGO, wiiext_show, NULL);
+
+static int wiiext_input_open(struct input_dev *dev)
+{
+       struct wiimote_ext *ext = input_get_drvdata(dev);
+       int ret;
+
+       ret = hid_hw_open(ext->wdata->hdev);
+       if (ret)
+               return ret;
+
+       atomic_inc(&ext->opened);
+       wiiext_schedule(ext);
+
+       return 0;
+}
+
+static void wiiext_input_close(struct input_dev *dev)
+{
+       struct wiimote_ext *ext = input_get_drvdata(dev);
+
+       atomic_dec(&ext->opened);
+       wiiext_schedule(ext);
+       hid_hw_close(ext->wdata->hdev);
+}
+
+static int wiiext_mp_open(struct input_dev *dev)
+{
+       struct wiimote_ext *ext = input_get_drvdata(dev);
+       int ret;
+
+       ret = hid_hw_open(ext->wdata->hdev);
+       if (ret)
+               return ret;
+
+       atomic_inc(&ext->mp_opened);
+       wiiext_schedule(ext);
+
+       return 0;
+}
+
+static void wiiext_mp_close(struct input_dev *dev)
+{
+       struct wiimote_ext *ext = input_get_drvdata(dev);
+
+       atomic_dec(&ext->mp_opened);
+       wiiext_schedule(ext);
+       hid_hw_close(ext->wdata->hdev);
+}
+
+/* Initializes the extension driver of a wiimote */
+int wiiext_init(struct wiimote_data *wdata)
+{
+       struct wiimote_ext *ext;
+       unsigned long flags;
+       int ret, i;
+
+       ext = kzalloc(sizeof(*ext), GFP_KERNEL);
+       if (!ext)
+               return -ENOMEM;
+
+       ext->wdata = wdata;
+       INIT_WORK(&ext->worker, wiiext_worker);
+
+       ext->input = input_allocate_device();
+       if (!ext->input) {
+               ret = -ENOMEM;
+               goto err_input;
+       }
+
+       input_set_drvdata(ext->input, ext);
+       ext->input->open = wiiext_input_open;
+       ext->input->close = wiiext_input_close;
+       ext->input->dev.parent = &wdata->hdev->dev;
+       ext->input->id.bustype = wdata->hdev->bus;
+       ext->input->id.vendor = wdata->hdev->vendor;
+       ext->input->id.product = wdata->hdev->product;
+       ext->input->id.version = wdata->hdev->version;
+       ext->input->name = WIIMOTE_NAME " Extension";
+
+       set_bit(EV_KEY, ext->input->evbit);
+       for (i = 0; i < WIIEXT_KEY_COUNT; ++i)
+               set_bit(wiiext_keymap[i], ext->input->keybit);
+
+       set_bit(EV_ABS, ext->input->evbit);
+       set_bit(ABS_HAT0X, ext->input->absbit);
+       set_bit(ABS_HAT0Y, ext->input->absbit);
+       set_bit(ABS_HAT1X, ext->input->absbit);
+       set_bit(ABS_HAT1Y, ext->input->absbit);
+       set_bit(ABS_HAT2X, ext->input->absbit);
+       set_bit(ABS_HAT2Y, ext->input->absbit);
+       set_bit(ABS_HAT3X, ext->input->absbit);
+       set_bit(ABS_HAT3Y, ext->input->absbit);
+       input_set_abs_params(ext->input, ABS_HAT0X, -120, 120, 2, 4);
+       input_set_abs_params(ext->input, ABS_HAT0Y, -120, 120, 2, 4);
+       input_set_abs_params(ext->input, ABS_HAT1X, -30, 30, 1, 1);
+       input_set_abs_params(ext->input, ABS_HAT1Y, -30, 30, 1, 1);
+       input_set_abs_params(ext->input, ABS_HAT2X, -30, 30, 1, 1);
+       input_set_abs_params(ext->input, ABS_HAT2Y, -30, 30, 1, 1);
+       input_set_abs_params(ext->input, ABS_HAT3X, -30, 30, 1, 1);
+       input_set_abs_params(ext->input, ABS_HAT3Y, -30, 30, 1, 1);
+       set_bit(ABS_RX, ext->input->absbit);
+       set_bit(ABS_RY, ext->input->absbit);
+       set_bit(ABS_RZ, ext->input->absbit);
+       input_set_abs_params(ext->input, ABS_RX, -500, 500, 2, 4);
+       input_set_abs_params(ext->input, ABS_RY, -500, 500, 2, 4);
+       input_set_abs_params(ext->input, ABS_RZ, -500, 500, 2, 4);
+
+       ret = input_register_device(ext->input);
+       if (ret) {
+               input_free_device(ext->input);
+               goto err_input;
+       }
+
+       ext->mp_input = input_allocate_device();
+       if (!ext->mp_input) {
+               ret = -ENOMEM;
+               goto err_mp;
+       }
+
+       input_set_drvdata(ext->mp_input, ext);
+       ext->mp_input->open = wiiext_mp_open;
+       ext->mp_input->close = wiiext_mp_close;
+       ext->mp_input->dev.parent = &wdata->hdev->dev;
+       ext->mp_input->id.bustype = wdata->hdev->bus;
+       ext->mp_input->id.vendor = wdata->hdev->vendor;
+       ext->mp_input->id.product = wdata->hdev->product;
+       ext->mp_input->id.version = wdata->hdev->version;
+       ext->mp_input->name = WIIMOTE_NAME " Motion+";
+
+       set_bit(EV_ABS, ext->mp_input->evbit);
+       set_bit(ABS_RX, ext->mp_input->absbit);
+       set_bit(ABS_RY, ext->mp_input->absbit);
+       set_bit(ABS_RZ, ext->mp_input->absbit);
+       input_set_abs_params(ext->mp_input, ABS_RX, -160000, 160000, 4, 8);
+       input_set_abs_params(ext->mp_input, ABS_RY, -160000, 160000, 4, 8);
+       input_set_abs_params(ext->mp_input, ABS_RZ, -160000, 160000, 4, 8);
+
+       ret = input_register_device(ext->mp_input);
+       if (ret) {
+               input_free_device(ext->mp_input);
+               goto err_mp;
+       }
+
+       ret = device_create_file(&wdata->hdev->dev, &dev_attr_extension);
+       if (ret)
+               goto err_dev;
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wdata->ext = ext;
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       return 0;
+
+err_dev:
+       input_unregister_device(ext->mp_input);
+err_mp:
+       input_unregister_device(ext->input);
+err_input:
+       kfree(ext);
+       return ret;
+}
+
+/* Deinitializes the extension driver of a wiimote */
+void wiiext_deinit(struct wiimote_data *wdata)
+{
+       struct wiimote_ext *ext = wdata->ext;
+       unsigned long flags;
+
+       if (!ext)
+               return;
+
+       /*
+        * We first unset wdata->ext to avoid further input from the wiimote
+        * core. The worker thread does not access this pointer so it is not
+        * affected by this.
+        * We kill the worker after this so it does not get respawned during
+        * deinitialization.
+        */
+
+       spin_lock_irqsave(&wdata->state.lock, flags);
+       wdata->ext = NULL;
+       spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+       device_remove_file(&wdata->hdev->dev, &dev_attr_extension);
+       input_unregister_device(ext->mp_input);
+       input_unregister_device(ext->input);
+
+       cancel_work_sync(&ext->worker);
+       kfree(ext);
+}
diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c
deleted file mode 100644 (file)
index 76739c0..0000000
+++ /dev/null
@@ -1,1346 +0,0 @@
-/*
- * HID driver for Nintendo Wiimote devices
- * Copyright (c) 2011 David Herrmann
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <linux/completion.h>
-#include <linux/device.h>
-#include <linux/hid.h>
-#include <linux/input.h>
-#include <linux/leds.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/power_supply.h>
-#include <linux/spinlock.h>
-#include "hid-ids.h"
-
-#define WIIMOTE_VERSION "0.2"
-#define WIIMOTE_NAME "Nintendo Wii Remote"
-#define WIIMOTE_BUFSIZE 32
-
-struct wiimote_buf {
-       __u8 data[HID_MAX_BUFFER_SIZE];
-       size_t size;
-};
-
-struct wiimote_state {
-       spinlock_t lock;
-       __u8 flags;
-       __u8 accel_split[2];
-
-       /* synchronous cmd requests */
-       struct mutex sync;
-       struct completion ready;
-       int cmd;
-       __u32 opt;
-
-       /* results of synchronous requests */
-       __u8 cmd_battery;
-       __u8 cmd_err;
-};
-
-struct wiimote_data {
-       struct hid_device *hdev;
-       struct input_dev *input;
-       struct led_classdev *leds[4];
-       struct input_dev *accel;
-       struct input_dev *ir;
-       struct power_supply battery;
-
-       spinlock_t qlock;
-       __u8 head;
-       __u8 tail;
-       struct wiimote_buf outq[WIIMOTE_BUFSIZE];
-       struct work_struct worker;
-
-       struct wiimote_state state;
-};
-
-#define WIIPROTO_FLAG_LED1             0x01
-#define WIIPROTO_FLAG_LED2             0x02
-#define WIIPROTO_FLAG_LED3             0x04
-#define WIIPROTO_FLAG_LED4             0x08
-#define WIIPROTO_FLAG_RUMBLE           0x10
-#define WIIPROTO_FLAG_ACCEL            0x20
-#define WIIPROTO_FLAG_IR_BASIC         0x40
-#define WIIPROTO_FLAG_IR_EXT           0x80
-#define WIIPROTO_FLAG_IR_FULL          0xc0 /* IR_BASIC | IR_EXT */
-#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
-                                       WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
-#define WIIPROTO_FLAGS_IR (WIIPROTO_FLAG_IR_BASIC | WIIPROTO_FLAG_IR_EXT | \
-                                                       WIIPROTO_FLAG_IR_FULL)
-
-/* return flag for led \num */
-#define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1))
-
-enum wiiproto_reqs {
-       WIIPROTO_REQ_NULL = 0x0,
-       WIIPROTO_REQ_RUMBLE = 0x10,
-       WIIPROTO_REQ_LED = 0x11,
-       WIIPROTO_REQ_DRM = 0x12,
-       WIIPROTO_REQ_IR1 = 0x13,
-       WIIPROTO_REQ_SREQ = 0x15,
-       WIIPROTO_REQ_WMEM = 0x16,
-       WIIPROTO_REQ_RMEM = 0x17,
-       WIIPROTO_REQ_IR2 = 0x1a,
-       WIIPROTO_REQ_STATUS = 0x20,
-       WIIPROTO_REQ_DATA = 0x21,
-       WIIPROTO_REQ_RETURN = 0x22,
-       WIIPROTO_REQ_DRM_K = 0x30,
-       WIIPROTO_REQ_DRM_KA = 0x31,
-       WIIPROTO_REQ_DRM_KE = 0x32,
-       WIIPROTO_REQ_DRM_KAI = 0x33,
-       WIIPROTO_REQ_DRM_KEE = 0x34,
-       WIIPROTO_REQ_DRM_KAE = 0x35,
-       WIIPROTO_REQ_DRM_KIE = 0x36,
-       WIIPROTO_REQ_DRM_KAIE = 0x37,
-       WIIPROTO_REQ_DRM_E = 0x3d,
-       WIIPROTO_REQ_DRM_SKAI1 = 0x3e,
-       WIIPROTO_REQ_DRM_SKAI2 = 0x3f,
-};
-
-enum wiiproto_keys {
-       WIIPROTO_KEY_LEFT,
-       WIIPROTO_KEY_RIGHT,
-       WIIPROTO_KEY_UP,
-       WIIPROTO_KEY_DOWN,
-       WIIPROTO_KEY_PLUS,
-       WIIPROTO_KEY_MINUS,
-       WIIPROTO_KEY_ONE,
-       WIIPROTO_KEY_TWO,
-       WIIPROTO_KEY_A,
-       WIIPROTO_KEY_B,
-       WIIPROTO_KEY_HOME,
-       WIIPROTO_KEY_COUNT
-};
-
-static __u16 wiiproto_keymap[] = {
-       KEY_LEFT,       /* WIIPROTO_KEY_LEFT */
-       KEY_RIGHT,      /* WIIPROTO_KEY_RIGHT */
-       KEY_UP,         /* WIIPROTO_KEY_UP */
-       KEY_DOWN,       /* WIIPROTO_KEY_DOWN */
-       KEY_NEXT,       /* WIIPROTO_KEY_PLUS */
-       KEY_PREVIOUS,   /* WIIPROTO_KEY_MINUS */
-       BTN_1,          /* WIIPROTO_KEY_ONE */
-       BTN_2,          /* WIIPROTO_KEY_TWO */
-       BTN_A,          /* WIIPROTO_KEY_A */
-       BTN_B,          /* WIIPROTO_KEY_B */
-       BTN_MODE,       /* WIIPROTO_KEY_HOME */
-};
-
-static enum power_supply_property wiimote_battery_props[] = {
-       POWER_SUPPLY_PROP_CAPACITY
-};
-
-/* requires the state.lock spinlock to be held */
-static inline bool wiimote_cmd_pending(struct wiimote_data *wdata, int cmd,
-                                                               __u32 opt)
-{
-       return wdata->state.cmd == cmd && wdata->state.opt == opt;
-}
-
-/* requires the state.lock spinlock to be held */
-static inline void wiimote_cmd_complete(struct wiimote_data *wdata)
-{
-       wdata->state.cmd = WIIPROTO_REQ_NULL;
-       complete(&wdata->state.ready);
-}
-
-static inline int wiimote_cmd_acquire(struct wiimote_data *wdata)
-{
-       return mutex_lock_interruptible(&wdata->state.sync) ? -ERESTARTSYS : 0;
-}
-
-/* requires the state.lock spinlock to be held */
-static inline void wiimote_cmd_set(struct wiimote_data *wdata, int cmd,
-                                                               __u32 opt)
-{
-       INIT_COMPLETION(wdata->state.ready);
-       wdata->state.cmd = cmd;
-       wdata->state.opt = opt;
-}
-
-static inline void wiimote_cmd_release(struct wiimote_data *wdata)
-{
-       mutex_unlock(&wdata->state.sync);
-}
-
-static inline int wiimote_cmd_wait(struct wiimote_data *wdata)
-{
-       int ret;
-
-       ret = wait_for_completion_interruptible_timeout(&wdata->state.ready, HZ);
-       if (ret < 0)
-               return -ERESTARTSYS;
-       else if (ret == 0)
-               return -EIO;
-       else
-               return 0;
-}
-
-static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,
-                                                               size_t count)
-{
-       __u8 *buf;
-       ssize_t ret;
-
-       if (!hdev->hid_output_raw_report)
-               return -ENODEV;
-
-       buf = kmemdup(buffer, count, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       ret = hdev->hid_output_raw_report(hdev, buf, count, HID_OUTPUT_REPORT);
-
-       kfree(buf);
-       return ret;
-}
-
-static void wiimote_worker(struct work_struct *work)
-{
-       struct wiimote_data *wdata = container_of(work, struct wiimote_data,
-                                                                       worker);
-       unsigned long flags;
-
-       spin_lock_irqsave(&wdata->qlock, flags);
-
-       while (wdata->head != wdata->tail) {
-               spin_unlock_irqrestore(&wdata->qlock, flags);
-               wiimote_hid_send(wdata->hdev, wdata->outq[wdata->tail].data,
-                                               wdata->outq[wdata->tail].size);
-               spin_lock_irqsave(&wdata->qlock, flags);
-
-               wdata->tail = (wdata->tail + 1) % WIIMOTE_BUFSIZE;
-       }
-
-       spin_unlock_irqrestore(&wdata->qlock, flags);
-}
-
-static void wiimote_queue(struct wiimote_data *wdata, const __u8 *buffer,
-                                                               size_t count)
-{
-       unsigned long flags;
-       __u8 newhead;
-
-       if (count > HID_MAX_BUFFER_SIZE) {
-               hid_warn(wdata->hdev, "Sending too large output report\n");
-               return;
-       }
-
-       /*
-        * Copy new request into our output queue and check whether the
-        * queue is full. If it is full, discard this request.
-        * If it is empty we need to start a new worker that will
-        * send out the buffer to the hid device.
-        * If the queue is not empty, then there must be a worker
-        * that is currently sending out our buffer and this worker
-        * will reschedule itself until the queue is empty.
-        */
-
-       spin_lock_irqsave(&wdata->qlock, flags);
-
-       memcpy(wdata->outq[wdata->head].data, buffer, count);
-       wdata->outq[wdata->head].size = count;
-       newhead = (wdata->head + 1) % WIIMOTE_BUFSIZE;
-
-       if (wdata->head == wdata->tail) {
-               wdata->head = newhead;
-               schedule_work(&wdata->worker);
-       } else if (newhead != wdata->tail) {
-               wdata->head = newhead;
-       } else {
-               hid_warn(wdata->hdev, "Output queue is full");
-       }
-
-       spin_unlock_irqrestore(&wdata->qlock, flags);
-}
-
-/*
- * This sets the rumble bit on the given output report if rumble is
- * currently enabled.
- * \cmd1 must point to the second byte in the output report => &cmd[1]
- * This must be called on nearly every output report before passing it
- * into the output queue!
- */
-static inline void wiiproto_keep_rumble(struct wiimote_data *wdata, __u8 *cmd1)
-{
-       if (wdata->state.flags & WIIPROTO_FLAG_RUMBLE)
-               *cmd1 |= 0x01;
-}
-
-static void wiiproto_req_rumble(struct wiimote_data *wdata, __u8 rumble)
-{
-       __u8 cmd[2];
-
-       rumble = !!rumble;
-       if (rumble == !!(wdata->state.flags & WIIPROTO_FLAG_RUMBLE))
-               return;
-
-       if (rumble)
-               wdata->state.flags |= WIIPROTO_FLAG_RUMBLE;
-       else
-               wdata->state.flags &= ~WIIPROTO_FLAG_RUMBLE;
-
-       cmd[0] = WIIPROTO_REQ_RUMBLE;
-       cmd[1] = 0;
-
-       wiiproto_keep_rumble(wdata, &cmd[1]);
-       wiimote_queue(wdata, cmd, sizeof(cmd));
-}
-
-static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
-{
-       __u8 cmd[2];
-
-       leds &= WIIPROTO_FLAGS_LEDS;
-       if ((wdata->state.flags & WIIPROTO_FLAGS_LEDS) == leds)
-               return;
-       wdata->state.flags = (wdata->state.flags & ~WIIPROTO_FLAGS_LEDS) | leds;
-
-       cmd[0] = WIIPROTO_REQ_LED;
-       cmd[1] = 0;
-
-       if (leds & WIIPROTO_FLAG_LED1)
-               cmd[1] |= 0x10;
-       if (leds & WIIPROTO_FLAG_LED2)
-               cmd[1] |= 0x20;
-       if (leds & WIIPROTO_FLAG_LED3)
-               cmd[1] |= 0x40;
-       if (leds & WIIPROTO_FLAG_LED4)
-               cmd[1] |= 0x80;
-
-       wiiproto_keep_rumble(wdata, &cmd[1]);
-       wiimote_queue(wdata, cmd, sizeof(cmd));
-}
-
-/*
- * Check what peripherals of the wiimote are currently
- * active and select a proper DRM that supports all of
- * the requested data inputs.
- */
-static __u8 select_drm(struct wiimote_data *wdata)
-{
-       __u8 ir = wdata->state.flags & WIIPROTO_FLAGS_IR;
-
-       if (ir == WIIPROTO_FLAG_IR_BASIC) {
-               if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
-                       return WIIPROTO_REQ_DRM_KAIE;
-               else
-                       return WIIPROTO_REQ_DRM_KIE;
-       } else if (ir == WIIPROTO_FLAG_IR_EXT) {
-               return WIIPROTO_REQ_DRM_KAI;
-       } else if (ir == WIIPROTO_FLAG_IR_FULL) {
-               return WIIPROTO_REQ_DRM_SKAI1;
-       } else {
-               if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
-                       return WIIPROTO_REQ_DRM_KA;
-               else
-                       return WIIPROTO_REQ_DRM_K;
-       }
-}
-
-static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
-{
-       __u8 cmd[3];
-
-       if (drm == WIIPROTO_REQ_NULL)
-               drm = select_drm(wdata);
-
-       cmd[0] = WIIPROTO_REQ_DRM;
-       cmd[1] = 0;
-       cmd[2] = drm;
-
-       wiiproto_keep_rumble(wdata, &cmd[1]);
-       wiimote_queue(wdata, cmd, sizeof(cmd));
-}
-
-static void wiiproto_req_status(struct wiimote_data *wdata)
-{
-       __u8 cmd[2];
-
-       cmd[0] = WIIPROTO_REQ_SREQ;
-       cmd[1] = 0;
-
-       wiiproto_keep_rumble(wdata, &cmd[1]);
-       wiimote_queue(wdata, cmd, sizeof(cmd));
-}
-
-static void wiiproto_req_accel(struct wiimote_data *wdata, __u8 accel)
-{
-       accel = !!accel;
-       if (accel == !!(wdata->state.flags & WIIPROTO_FLAG_ACCEL))
-               return;
-
-       if (accel)
-               wdata->state.flags |= WIIPROTO_FLAG_ACCEL;
-       else
-               wdata->state.flags &= ~WIIPROTO_FLAG_ACCEL;
-
-       wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
-}
-
-static void wiiproto_req_ir1(struct wiimote_data *wdata, __u8 flags)
-{
-       __u8 cmd[2];
-
-       cmd[0] = WIIPROTO_REQ_IR1;
-       cmd[1] = flags;
-
-       wiiproto_keep_rumble(wdata, &cmd[1]);
-       wiimote_queue(wdata, cmd, sizeof(cmd));
-}
-
-static void wiiproto_req_ir2(struct wiimote_data *wdata, __u8 flags)
-{
-       __u8 cmd[2];
-
-       cmd[0] = WIIPROTO_REQ_IR2;
-       cmd[1] = flags;
-
-       wiiproto_keep_rumble(wdata, &cmd[1]);
-       wiimote_queue(wdata, cmd, sizeof(cmd));
-}
-
-#define wiiproto_req_wreg(wdata, os, buf, sz) \
-                       wiiproto_req_wmem((wdata), false, (os), (buf), (sz))
-
-#define wiiproto_req_weeprom(wdata, os, buf, sz) \
-                       wiiproto_req_wmem((wdata), true, (os), (buf), (sz))
-
-static void wiiproto_req_wmem(struct wiimote_data *wdata, bool eeprom,
-                               __u32 offset, const __u8 *buf, __u8 size)
-{
-       __u8 cmd[22];
-
-       if (size > 16 || size == 0) {
-               hid_warn(wdata->hdev, "Invalid length %d wmem request\n", size);
-               return;
-       }
-
-       memset(cmd, 0, sizeof(cmd));
-       cmd[0] = WIIPROTO_REQ_WMEM;
-       cmd[2] = (offset >> 16) & 0xff;
-       cmd[3] = (offset >> 8) & 0xff;
-       cmd[4] = offset & 0xff;
-       cmd[5] = size;
-       memcpy(&cmd[6], buf, size);
-
-       if (!eeprom)
-               cmd[1] |= 0x04;
-
-       wiiproto_keep_rumble(wdata, &cmd[1]);
-       wiimote_queue(wdata, cmd, sizeof(cmd));
-}
-
-/* requries the cmd-mutex to be held */
-static int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
-                                               const __u8 *wmem, __u8 size)
-{
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&wdata->state.lock, flags);
-       wiimote_cmd_set(wdata, WIIPROTO_REQ_WMEM, 0);
-       wiiproto_req_wreg(wdata, offset, wmem, size);
-       spin_unlock_irqrestore(&wdata->state.lock, flags);
-
-       ret = wiimote_cmd_wait(wdata);
-       if (!ret && wdata->state.cmd_err)
-               ret = -EIO;
-
-       return ret;
-}
-
-static int wiimote_battery_get_property(struct power_supply *psy,
-                                               enum power_supply_property psp,
-                                               union power_supply_propval *val)
-{
-       struct wiimote_data *wdata = container_of(psy,
-                                               struct wiimote_data, battery);
-       int ret = 0, state;
-       unsigned long flags;
-
-       ret = wiimote_cmd_acquire(wdata);
-       if (ret)
-               return ret;
-
-       spin_lock_irqsave(&wdata->state.lock, flags);
-       wiimote_cmd_set(wdata, WIIPROTO_REQ_SREQ, 0);
-       wiiproto_req_status(wdata);
-       spin_unlock_irqrestore(&wdata->state.lock, flags);
-
-       ret = wiimote_cmd_wait(wdata);
-       state = wdata->state.cmd_battery;
-       wiimote_cmd_release(wdata);
-
-       if (ret)
-               return ret;
-
-       switch (psp) {
-               case POWER_SUPPLY_PROP_CAPACITY:
-                       val->intval = state * 100 / 255;
-                       break;
-               default:
-                       ret = -EINVAL;
-                       break;
-       }
-
-       return ret;
-}
-
-static int wiimote_init_ir(struct wiimote_data *wdata, __u16 mode)
-{
-       int ret;
-       unsigned long flags;
-       __u8 format = 0;
-       static const __u8 data_enable[] = { 0x01 };
-       static const __u8 data_sens1[] = { 0x02, 0x00, 0x00, 0x71, 0x01,
-                                               0x00, 0xaa, 0x00, 0x64 };
-       static const __u8 data_sens2[] = { 0x63, 0x03 };
-       static const __u8 data_fin[] = { 0x08 };
-
-       spin_lock_irqsave(&wdata->state.lock, flags);
-
-       if (mode == (wdata->state.flags & WIIPROTO_FLAGS_IR)) {
-               spin_unlock_irqrestore(&wdata->state.lock, flags);
-               return 0;
-       }
-
-       if (mode == 0) {
-               wdata->state.flags &= ~WIIPROTO_FLAGS_IR;
-               wiiproto_req_ir1(wdata, 0);
-               wiiproto_req_ir2(wdata, 0);
-               wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
-               spin_unlock_irqrestore(&wdata->state.lock, flags);
-               return 0;
-       }
-
-       spin_unlock_irqrestore(&wdata->state.lock, flags);
-
-       ret = wiimote_cmd_acquire(wdata);
-       if (ret)
-               return ret;
-
-       /* send PIXEL CLOCK ENABLE cmd first */
-       spin_lock_irqsave(&wdata->state.lock, flags);
-       wiimote_cmd_set(wdata, WIIPROTO_REQ_IR1, 0);
-       wiiproto_req_ir1(wdata, 0x06);
-       spin_unlock_irqrestore(&wdata->state.lock, flags);
-
-       ret = wiimote_cmd_wait(wdata);
-       if (ret)
-               goto unlock;
-       if (wdata->state.cmd_err) {
-               ret = -EIO;
-               goto unlock;
-       }
-
-       /* enable IR LOGIC */
-       spin_lock_irqsave(&wdata->state.lock, flags);
-       wiimote_cmd_set(wdata, WIIPROTO_REQ_IR2, 0);
-       wiiproto_req_ir2(wdata, 0x06);
-       spin_unlock_irqrestore(&wdata->state.lock, flags);
-
-       ret = wiimote_cmd_wait(wdata);
-       if (ret)
-               goto unlock;
-       if (wdata->state.cmd_err) {
-               ret = -EIO;
-               goto unlock;
-       }
-
-       /* enable IR cam but do not make it send data, yet */
-       ret = wiimote_cmd_write(wdata, 0xb00030, data_enable,
-                                                       sizeof(data_enable));
-       if (ret)
-               goto unlock;
-
-       /* write first sensitivity block */
-       ret = wiimote_cmd_write(wdata, 0xb00000, data_sens1,
-                                                       sizeof(data_sens1));
-       if (ret)
-               goto unlock;
-
-       /* write second sensitivity block */
-       ret = wiimote_cmd_write(wdata, 0xb0001a, data_sens2,
-                                                       sizeof(data_sens2));
-       if (ret)
-               goto unlock;
-
-       /* put IR cam into desired state */
-       switch (mode) {
-               case WIIPROTO_FLAG_IR_FULL:
-                       format = 5;
-                       break;
-               case WIIPROTO_FLAG_IR_EXT:
-                       format = 3;
-                       break;
-               case WIIPROTO_FLAG_IR_BASIC:
-                       format = 1;
-                       break;
-       }
-       ret = wiimote_cmd_write(wdata, 0xb00033, &format, sizeof(format));
-       if (ret)
-               goto unlock;
-
-       /* make IR cam send data */
-       ret = wiimote_cmd_write(wdata, 0xb00030, data_fin, sizeof(data_fin));
-       if (ret)
-               goto unlock;
-
-       /* request new DRM mode compatible to IR mode */
-       spin_lock_irqsave(&wdata->state.lock, flags);
-       wdata->state.flags &= ~WIIPROTO_FLAGS_IR;
-       wdata->state.flags |= mode & WIIPROTO_FLAGS_IR;
-       wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
-       spin_unlock_irqrestore(&wdata->state.lock, flags);
-
-unlock:
-       wiimote_cmd_release(wdata);
-       return ret;
-}
-
-static enum led_brightness wiimote_leds_get(struct led_classdev *led_dev)
-{
-       struct wiimote_data *wdata;
-       struct device *dev = led_dev->dev->parent;
-       int i;
-       unsigned long flags;
-       bool value = false;
-
-       wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev));
-
-       for (i = 0; i < 4; ++i) {
-               if (wdata->leds[i] == led_dev) {
-                       spin_lock_irqsave(&wdata->state.lock, flags);
-                       value = wdata->state.flags & WIIPROTO_FLAG_LED(i + 1);
-                       spin_unlock_irqrestore(&wdata->state.lock, flags);
-                       break;
-               }
-       }
-
-       return value ? LED_FULL : LED_OFF;
-}
-
-static void wiimote_leds_set(struct led_classdev *led_dev,
-                                               enum led_brightness value)
-{
-       struct wiimote_data *wdata;
-       struct device *dev = led_dev->dev->parent;
-       int i;
-       unsigned long flags;
-       __u8 state, flag;
-
-       wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev));
-
-       for (i = 0; i < 4; ++i) {
-               if (wdata->leds[i] == led_dev) {
-                       flag = WIIPROTO_FLAG_LED(i + 1);
-                       spin_lock_irqsave(&wdata->state.lock, flags);
-                       state = wdata->state.flags;
-                       if (value == LED_OFF)
-                               wiiproto_req_leds(wdata, state & ~flag);
-                       else
-                               wiiproto_req_leds(wdata, state | flag);
-                       spin_unlock_irqrestore(&wdata->state.lock, flags);
-                       break;
-               }
-       }
-}
-
-static int wiimote_ff_play(struct input_dev *dev, void *data,
-                                                       struct ff_effect *eff)
-{
-       struct wiimote_data *wdata = input_get_drvdata(dev);
-       __u8 value;
-       unsigned long flags;
-
-       /*
-        * The wiimote supports only a single rumble motor so if any magnitude
-        * is set to non-zero then we start the rumble motor. If both are set to
-        * zero, we stop the rumble motor.
-        */
-
-       if (eff->u.rumble.strong_magnitude || eff->u.rumble.weak_magnitude)
-               value = 1;
-       else
-               value = 0;
-
-       spin_lock_irqsave(&wdata->state.lock, flags);
-       wiiproto_req_rumble(wdata, value);
-       spin_unlock_irqrestore(&wdata->state.lock, flags);
-
-       return 0;
-}
-
-static int wiimote_input_open(struct input_dev *dev)
-{
-       struct wiimote_data *wdata = input_get_drvdata(dev);
-
-       return hid_hw_open(wdata->hdev);
-}
-
-static void wiimote_input_close(struct input_dev *dev)
-{
-       struct wiimote_data *wdata = input_get_drvdata(dev);
-
-       hid_hw_close(wdata->hdev);
-}
-
-static int wiimote_accel_open(struct input_dev *dev)
-{
-       struct wiimote_data *wdata = input_get_drvdata(dev);
-       int ret;
-       unsigned long flags;
-
-       ret = hid_hw_open(wdata->hdev);
-       if (ret)
-               return ret;
-
-       spin_lock_irqsave(&wdata->state.lock, flags);
-       wiiproto_req_accel(wdata, true);
-       spin_unlock_irqrestore(&wdata->state.lock, flags);
-
-       return 0;
-}
-
-static void wiimote_accel_close(struct input_dev *dev)
-{
-       struct wiimote_data *wdata = input_get_drvdata(dev);
-       unsigned long flags;
-
-       spin_lock_irqsave(&wdata->state.lock, flags);
-       wiiproto_req_accel(wdata, false);
-       spin_unlock_irqrestore(&wdata->state.lock, flags);
-
-       hid_hw_close(wdata->hdev);
-}
-
-static int wiimote_ir_open(struct input_dev *dev)
-{
-       struct wiimote_data *wdata = input_get_drvdata(dev);
-       int ret;
-
-       ret = hid_hw_open(wdata->hdev);
-       if (ret)
-               return ret;
-
-       ret = wiimote_init_ir(wdata, WIIPROTO_FLAG_IR_BASIC);
-       if (ret) {
-               hid_hw_close(wdata->hdev);
-               return ret;
-       }
-
-       return 0;
-}
-
-static void wiimote_ir_close(struct input_dev *dev)
-{
-       struct wiimote_data *wdata = input_get_drvdata(dev);
-
-       wiimote_init_ir(wdata, 0);
-       hid_hw_close(wdata->hdev);
-}
-
-static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
-{
-       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_LEFT],
-                                                       !!(payload[0] & 0x01));
-       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_RIGHT],
-                                                       !!(payload[0] & 0x02));
-       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_DOWN],
-                                                       !!(payload[0] & 0x04));
-       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_UP],
-                                                       !!(payload[0] & 0x08));
-       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_PLUS],
-                                                       !!(payload[0] & 0x10));
-       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_TWO],
-                                                       !!(payload[1] & 0x01));
-       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_ONE],
-                                                       !!(payload[1] & 0x02));
-       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_B],
-                                                       !!(payload[1] & 0x04));
-       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_A],
-                                                       !!(payload[1] & 0x08));
-       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_MINUS],
-                                                       !!(payload[1] & 0x10));
-       input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_HOME],
-                                                       !!(payload[1] & 0x80));
-       input_sync(wdata->input);
-}
-
-static void handler_accel(struct wiimote_data *wdata, const __u8 *payload)
-{
-       __u16 x, y, z;
-
-       if (!(wdata->state.flags & WIIPROTO_FLAG_ACCEL))
-               return;
-
-       /*
-        * payload is: BB BB XX YY ZZ
-        * Accelerometer data is encoded into 3 10bit values. XX, YY and ZZ
-        * contain the upper 8 bits of each value. The lower 2 bits are
-        * contained in the buttons data BB BB.
-        * Bits 6 and 7 of the first buttons byte BB is the lower 2 bits of the
-        * X accel value. Bit 5 of the second buttons byte is the 2nd bit of Y
-        * accel value and bit 6 is the second bit of the Z value.
-        * The first bit of Y and Z values is not available and always set to 0.
-        * 0x200 is returned on no movement.
-        */
-
-       x = payload[2] << 2;
-       y = payload[3] << 2;
-       z = payload[4] << 2;
-
-       x |= (payload[0] >> 5) & 0x3;
-       y |= (payload[1] >> 4) & 0x2;
-       z |= (payload[1] >> 5) & 0x2;
-
-       input_report_abs(wdata->accel, ABS_RX, x - 0x200);
-       input_report_abs(wdata->accel, ABS_RY, y - 0x200);
-       input_report_abs(wdata->accel, ABS_RZ, z - 0x200);
-       input_sync(wdata->accel);
-}
-
-#define ir_to_input0(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
-                                                       ABS_HAT0X, ABS_HAT0Y)
-#define ir_to_input1(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
-                                                       ABS_HAT1X, ABS_HAT1Y)
-#define ir_to_input2(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
-                                                       ABS_HAT2X, ABS_HAT2Y)
-#define ir_to_input3(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
-                                                       ABS_HAT3X, ABS_HAT3Y)
-
-static void __ir_to_input(struct wiimote_data *wdata, const __u8 *ir,
-                                               bool packed, __u8 xid, __u8 yid)
-{
-       __u16 x, y;
-
-       if (!(wdata->state.flags & WIIPROTO_FLAGS_IR))
-               return;
-
-       /*
-        * Basic IR data is encoded into 3 bytes. The first two bytes are the
-        * upper 8 bit of the X/Y data, the 3rd byte contains the lower 2 bits
-        * of both.
-        * If data is packed, then the 3rd byte is put first and slightly
-        * reordered. This allows to interleave packed and non-packed data to
-        * have two IR sets in 5 bytes instead of 6.
-        * The resulting 10bit X/Y values are passed to the ABS_HATXY input dev.
-        */
-
-       if (packed) {
-               x = ir[1] << 2;
-               y = ir[2] << 2;
-
-               x |= ir[0] & 0x3;
-               y |= (ir[0] >> 2) & 0x3;
-       } else {
-               x = ir[0] << 2;
-               y = ir[1] << 2;
-
-               x |= (ir[2] >> 4) & 0x3;
-               y |= (ir[2] >> 6) & 0x3;
-       }
-
-       input_report_abs(wdata->ir, xid, x);
-       input_report_abs(wdata->ir, yid, y);
-}
-
-static void handler_status(struct wiimote_data *wdata, const __u8 *payload)
-{
-       handler_keys(wdata, payload);
-
-       /* on status reports the drm is reset so we need to resend the drm */
-       wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
-
-       if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_SREQ, 0)) {
-               wdata->state.cmd_battery = payload[5];
-               wiimote_cmd_complete(wdata);
-       }
-}
-
-static void handler_data(struct wiimote_data *wdata, const __u8 *payload)
-{
-       handler_keys(wdata, payload);
-}
-
-static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
-{
-       __u8 err = payload[3];
-       __u8 cmd = payload[2];
-
-       handler_keys(wdata, payload);
-
-       if (wiimote_cmd_pending(wdata, cmd, 0)) {
-               wdata->state.cmd_err = err;
-               wiimote_cmd_complete(wdata);
-       } else if (err) {
-               hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err,
-                                                                       cmd);
-       }
-}
-
-static void handler_drm_KA(struct wiimote_data *wdata, const __u8 *payload)
-{
-       handler_keys(wdata, payload);
-       handler_accel(wdata, payload);
-}
-
-static void handler_drm_KE(struct wiimote_data *wdata, const __u8 *payload)
-{
-       handler_keys(wdata, payload);
-}
-
-static void handler_drm_KAI(struct wiimote_data *wdata, const __u8 *payload)
-{
-       handler_keys(wdata, payload);
-       handler_accel(wdata, payload);
-       ir_to_input0(wdata, &payload[5], false);
-       ir_to_input1(wdata, &payload[8], false);
-       ir_to_input2(wdata, &payload[11], false);
-       ir_to_input3(wdata, &payload[14], false);
-       input_sync(wdata->ir);
-}
-
-static void handler_drm_KEE(struct wiimote_data *wdata, const __u8 *payload)
-{
-       handler_keys(wdata, payload);
-}
-
-static void handler_drm_KIE(struct wiimote_data *wdata, const __u8 *payload)
-{
-       handler_keys(wdata, payload);
-       ir_to_input0(wdata, &payload[2], false);
-       ir_to_input1(wdata, &payload[4], true);
-       ir_to_input2(wdata, &payload[7], false);
-       ir_to_input3(wdata, &payload[9], true);
-       input_sync(wdata->ir);
-}
-
-static void handler_drm_KAE(struct wiimote_data *wdata, const __u8 *payload)
-{
-       handler_keys(wdata, payload);
-       handler_accel(wdata, payload);
-}
-
-static void handler_drm_KAIE(struct wiimote_data *wdata, const __u8 *payload)
-{
-       handler_keys(wdata, payload);
-       handler_accel(wdata, payload);
-       ir_to_input0(wdata, &payload[5], false);
-       ir_to_input1(wdata, &payload[7], true);
-       ir_to_input2(wdata, &payload[10], false);
-       ir_to_input3(wdata, &payload[12], true);
-       input_sync(wdata->ir);
-}
-
-static void handler_drm_E(struct wiimote_data *wdata, const __u8 *payload)
-{
-}
-
-static void handler_drm_SKAI1(struct wiimote_data *wdata, const __u8 *payload)
-{
-       handler_keys(wdata, payload);
-
-       wdata->state.accel_split[0] = payload[2];
-       wdata->state.accel_split[1] = (payload[0] >> 1) & (0x10 | 0x20);
-       wdata->state.accel_split[1] |= (payload[1] << 1) & (0x40 | 0x80);
-
-       ir_to_input0(wdata, &payload[3], false);
-       ir_to_input1(wdata, &payload[12], false);
-       input_sync(wdata->ir);
-}
-
-static void handler_drm_SKAI2(struct wiimote_data *wdata, const __u8 *payload)
-{
-       __u8 buf[5];
-
-       handler_keys(wdata, payload);
-
-       wdata->state.accel_split[1] |= (payload[0] >> 5) & (0x01 | 0x02);
-       wdata->state.accel_split[1] |= (payload[1] >> 3) & (0x04 | 0x08);
-
-       buf[0] = 0;
-       buf[1] = 0;
-       buf[2] = wdata->state.accel_split[0];
-       buf[3] = payload[2];
-       buf[4] = wdata->state.accel_split[1];
-       handler_accel(wdata, buf);
-
-       ir_to_input2(wdata, &payload[3], false);
-       ir_to_input3(wdata, &payload[12], false);
-       input_sync(wdata->ir);
-}
-
-struct wiiproto_handler {
-       __u8 id;
-       size_t size;
-       void (*func)(struct wiimote_data *wdata, const __u8 *payload);
-};
-
-static struct wiiproto_handler handlers[] = {
-       { .id = WIIPROTO_REQ_STATUS, .size = 6, .func = handler_status },
-       { .id = WIIPROTO_REQ_DATA, .size = 21, .func = handler_data },
-       { .id = WIIPROTO_REQ_RETURN, .size = 4, .func = handler_return },
-       { .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys },
-       { .id = WIIPROTO_REQ_DRM_KA, .size = 5, .func = handler_drm_KA },
-       { .id = WIIPROTO_REQ_DRM_KE, .size = 10, .func = handler_drm_KE },
-       { .id = WIIPROTO_REQ_DRM_KAI, .size = 17, .func = handler_drm_KAI },
-       { .id = WIIPROTO_REQ_DRM_KEE, .size = 21, .func = handler_drm_KEE },
-       { .id = WIIPROTO_REQ_DRM_KAE, .size = 21, .func = handler_drm_KAE },
-       { .id = WIIPROTO_REQ_DRM_KIE, .size = 21, .func = handler_drm_KIE },
-       { .id = WIIPROTO_REQ_DRM_KAIE, .size = 21, .func = handler_drm_KAIE },
-       { .id = WIIPROTO_REQ_DRM_E, .size = 21, .func = handler_drm_E },
-       { .id = WIIPROTO_REQ_DRM_SKAI1, .size = 21, .func = handler_drm_SKAI1 },
-       { .id = WIIPROTO_REQ_DRM_SKAI2, .size = 21, .func = handler_drm_SKAI2 },
-       { .id = 0 }
-};
-
-static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
-                                                       u8 *raw_data, int size)
-{
-       struct wiimote_data *wdata = hid_get_drvdata(hdev);
-       struct wiiproto_handler *h;
-       int i;
-       unsigned long flags;
-       bool handled = false;
-
-       if (size < 1)
-               return -EINVAL;
-
-       spin_lock_irqsave(&wdata->state.lock, flags);
-
-       for (i = 0; handlers[i].id; ++i) {
-               h = &handlers[i];
-               if (h->id == raw_data[0] && h->size < size) {
-                       h->func(wdata, &raw_data[1]);
-                       handled = true;
-               }
-       }
-
-       if (!handled)
-               hid_warn(hdev, "Unhandled report %hhu size %d\n", raw_data[0],
-                                                                       size);
-
-       spin_unlock_irqrestore(&wdata->state.lock, flags);
-
-       return 0;
-}
-
-static void wiimote_leds_destroy(struct wiimote_data *wdata)
-{
-       int i;
-       struct led_classdev *led;
-
-       for (i = 0; i < 4; ++i) {
-               if (wdata->leds[i]) {
-                       led = wdata->leds[i];
-                       wdata->leds[i] = NULL;
-                       led_classdev_unregister(led);
-                       kfree(led);
-               }
-       }
-}
-
-static int wiimote_leds_create(struct wiimote_data *wdata)
-{
-       int i, ret;
-       struct device *dev = &wdata->hdev->dev;
-       size_t namesz = strlen(dev_name(dev)) + 9;
-       struct led_classdev *led;
-       char *name;
-
-       for (i = 0; i < 4; ++i) {
-               led = kzalloc(sizeof(struct led_classdev) + namesz, GFP_KERNEL);
-               if (!led) {
-                       ret = -ENOMEM;
-                       goto err;
-               }
-               name = (void*)&led[1];
-               snprintf(name, namesz, "%s:blue:p%d", dev_name(dev), i);
-               led->name = name;
-               led->brightness = 0;
-               led->max_brightness = 1;
-               led->brightness_get = wiimote_leds_get;
-               led->brightness_set = wiimote_leds_set;
-
-               ret = led_classdev_register(dev, led);
-               if (ret) {
-                       kfree(led);
-                       goto err;
-               }
-               wdata->leds[i] = led;
-       }
-
-       return 0;
-
-err:
-       wiimote_leds_destroy(wdata);
-       return ret;
-}
-
-static struct wiimote_data *wiimote_create(struct hid_device *hdev)
-{
-       struct wiimote_data *wdata;
-       int i;
-
-       wdata = kzalloc(sizeof(*wdata), GFP_KERNEL);
-       if (!wdata)
-               return NULL;
-
-       wdata->input = input_allocate_device();
-       if (!wdata->input)
-               goto err;
-
-       wdata->hdev = hdev;
-       hid_set_drvdata(hdev, wdata);
-
-       input_set_drvdata(wdata->input, wdata);
-       wdata->input->open = wiimote_input_open;
-       wdata->input->close = wiimote_input_close;
-       wdata->input->dev.parent = &wdata->hdev->dev;
-       wdata->input->id.bustype = wdata->hdev->bus;
-       wdata->input->id.vendor = wdata->hdev->vendor;
-       wdata->input->id.product = wdata->hdev->product;
-       wdata->input->id.version = wdata->hdev->version;
-       wdata->input->name = WIIMOTE_NAME;
-
-       set_bit(EV_KEY, wdata->input->evbit);
-       for (i = 0; i < WIIPROTO_KEY_COUNT; ++i)
-               set_bit(wiiproto_keymap[i], wdata->input->keybit);
-
-       set_bit(FF_RUMBLE, wdata->input->ffbit);
-       if (input_ff_create_memless(wdata->input, NULL, wiimote_ff_play))
-               goto err_input;
-
-       wdata->accel = input_allocate_device();
-       if (!wdata->accel)
-               goto err_input;
-
-       input_set_drvdata(wdata->accel, wdata);
-       wdata->accel->open = wiimote_accel_open;
-       wdata->accel->close = wiimote_accel_close;
-       wdata->accel->dev.parent = &wdata->hdev->dev;
-       wdata->accel->id.bustype = wdata->hdev->bus;
-       wdata->accel->id.vendor = wdata->hdev->vendor;
-       wdata->accel->id.product = wdata->hdev->product;
-       wdata->accel->id.version = wdata->hdev->version;
-       wdata->accel->name = WIIMOTE_NAME " Accelerometer";
-
-       set_bit(EV_ABS, wdata->accel->evbit);
-       set_bit(ABS_RX, wdata->accel->absbit);
-       set_bit(ABS_RY, wdata->accel->absbit);
-       set_bit(ABS_RZ, wdata->accel->absbit);
-       input_set_abs_params(wdata->accel, ABS_RX, -500, 500, 2, 4);
-       input_set_abs_params(wdata->accel, ABS_RY, -500, 500, 2, 4);
-       input_set_abs_params(wdata->accel, ABS_RZ, -500, 500, 2, 4);
-
-       wdata->ir = input_allocate_device();
-       if (!wdata->ir)
-               goto err_ir;
-
-       input_set_drvdata(wdata->ir, wdata);
-       wdata->ir->open = wiimote_ir_open;
-       wdata->ir->close = wiimote_ir_close;
-       wdata->ir->dev.parent = &wdata->hdev->dev;
-       wdata->ir->id.bustype = wdata->hdev->bus;
-       wdata->ir->id.vendor = wdata->hdev->vendor;
-       wdata->ir->id.product = wdata->hdev->product;
-       wdata->ir->id.version = wdata->hdev->version;
-       wdata->ir->name = WIIMOTE_NAME " IR";
-
-       set_bit(EV_ABS, wdata->ir->evbit);
-       set_bit(ABS_HAT0X, wdata->ir->absbit);
-       set_bit(ABS_HAT0Y, wdata->ir->absbit);
-       set_bit(ABS_HAT1X, wdata->ir->absbit);
-       set_bit(ABS_HAT1Y, wdata->ir->absbit);
-       set_bit(ABS_HAT2X, wdata->ir->absbit);
-       set_bit(ABS_HAT2Y, wdata->ir->absbit);
-       set_bit(ABS_HAT3X, wdata->ir->absbit);
-       set_bit(ABS_HAT3Y, wdata->ir->absbit);
-       input_set_abs_params(wdata->ir, ABS_HAT0X, 0, 1023, 2, 4);
-       input_set_abs_params(wdata->ir, ABS_HAT0Y, 0, 767, 2, 4);
-       input_set_abs_params(wdata->ir, ABS_HAT1X, 0, 1023, 2, 4);
-       input_set_abs_params(wdata->ir, ABS_HAT1Y, 0, 767, 2, 4);
-       input_set_abs_params(wdata->ir, ABS_HAT2X, 0, 1023, 2, 4);
-       input_set_abs_params(wdata->ir, ABS_HAT2Y, 0, 767, 2, 4);
-       input_set_abs_params(wdata->ir, ABS_HAT3X, 0, 1023, 2, 4);
-       input_set_abs_params(wdata->ir, ABS_HAT3Y, 0, 767, 2, 4);
-
-       spin_lock_init(&wdata->qlock);
-       INIT_WORK(&wdata->worker, wiimote_worker);
-
-       spin_lock_init(&wdata->state.lock);
-       init_completion(&wdata->state.ready);
-       mutex_init(&wdata->state.sync);
-
-       return wdata;
-
-err_ir:
-       input_free_device(wdata->accel);
-err_input:
-       input_free_device(wdata->input);
-err:
-       kfree(wdata);
-       return NULL;
-}
-
-static void wiimote_destroy(struct wiimote_data *wdata)
-{
-       wiimote_leds_destroy(wdata);
-
-       power_supply_unregister(&wdata->battery);
-       input_unregister_device(wdata->accel);
-       input_unregister_device(wdata->ir);
-       input_unregister_device(wdata->input);
-       cancel_work_sync(&wdata->worker);
-       hid_hw_stop(wdata->hdev);
-
-       kfree(wdata);
-}
-
-static int wiimote_hid_probe(struct hid_device *hdev,
-                               const struct hid_device_id *id)
-{
-       struct wiimote_data *wdata;
-       int ret;
-
-       wdata = wiimote_create(hdev);
-       if (!wdata) {
-               hid_err(hdev, "Can't alloc device\n");
-               return -ENOMEM;
-       }
-
-       ret = hid_parse(hdev);
-       if (ret) {
-               hid_err(hdev, "HID parse failed\n");
-               goto err;
-       }
-
-       ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
-       if (ret) {
-               hid_err(hdev, "HW start failed\n");
-               goto err;
-       }
-
-       ret = input_register_device(wdata->accel);
-       if (ret) {
-               hid_err(hdev, "Cannot register input device\n");
-               goto err_stop;
-       }
-
-       ret = input_register_device(wdata->ir);
-       if (ret) {
-               hid_err(hdev, "Cannot register input device\n");
-               goto err_ir;
-       }
-
-       ret = input_register_device(wdata->input);
-       if (ret) {
-               hid_err(hdev, "Cannot register input device\n");
-               goto err_input;
-       }
-
-       wdata->battery.properties = wiimote_battery_props;
-       wdata->battery.num_properties = ARRAY_SIZE(wiimote_battery_props);
-       wdata->battery.get_property = wiimote_battery_get_property;
-       wdata->battery.name = "wiimote_battery";
-       wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
-       wdata->battery.use_for_apm = 0;
-
-       ret = power_supply_register(&wdata->hdev->dev, &wdata->battery);
-       if (ret) {
-               hid_err(hdev, "Cannot register battery device\n");
-               goto err_battery;
-       }
-
-       ret = wiimote_leds_create(wdata);
-       if (ret)
-               goto err_free;
-
-       hid_info(hdev, "New device registered\n");
-
-       /* by default set led1 after device initialization */
-       spin_lock_irq(&wdata->state.lock);
-       wiiproto_req_leds(wdata, WIIPROTO_FLAG_LED1);
-       spin_unlock_irq(&wdata->state.lock);
-
-       return 0;
-
-err_free:
-       wiimote_destroy(wdata);
-       return ret;
-
-err_battery:
-       input_unregister_device(wdata->input);
-       wdata->input = NULL;
-err_input:
-       input_unregister_device(wdata->ir);
-       wdata->ir = NULL;
-err_ir:
-       input_unregister_device(wdata->accel);
-       wdata->accel = NULL;
-err_stop:
-       hid_hw_stop(hdev);
-err:
-       input_free_device(wdata->ir);
-       input_free_device(wdata->accel);
-       input_free_device(wdata->input);
-       kfree(wdata);
-       return ret;
-}
-
-static void wiimote_hid_remove(struct hid_device *hdev)
-{
-       struct wiimote_data *wdata = hid_get_drvdata(hdev);
-
-       hid_info(hdev, "Device removed\n");
-       wiimote_destroy(wdata);
-}
-
-static const struct hid_device_id wiimote_hid_devices[] = {
-       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
-                               USB_DEVICE_ID_NINTENDO_WIIMOTE) },
-       { }
-};
-MODULE_DEVICE_TABLE(hid, wiimote_hid_devices);
-
-static struct hid_driver wiimote_hid_driver = {
-       .name = "wiimote",
-       .id_table = wiimote_hid_devices,
-       .probe = wiimote_hid_probe,
-       .remove = wiimote_hid_remove,
-       .raw_event = wiimote_hid_event,
-};
-
-static int __init wiimote_init(void)
-{
-       int ret;
-
-       ret = hid_register_driver(&wiimote_hid_driver);
-       if (ret)
-               pr_err("Can't register wiimote hid driver\n");
-
-       return ret;
-}
-
-static void __exit wiimote_exit(void)
-{
-       hid_unregister_driver(&wiimote_hid_driver);
-}
-
-module_init(wiimote_init);
-module_exit(wiimote_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
-MODULE_DESCRIPTION(WIIMOTE_NAME " Device Driver");
-MODULE_VERSION(WIIMOTE_VERSION);
diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h
new file mode 100644 (file)
index 0000000..c81dbeb
--- /dev/null
@@ -0,0 +1,208 @@
+#ifndef __HID_WIIMOTE_H
+#define __HID_WIIMOTE_H
+
+/*
+ * HID driver for Nintendo Wiimote devices
+ * Copyright (c) 2011 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
+#include <linux/spinlock.h>
+
+#define WIIMOTE_NAME "Nintendo Wii Remote"
+#define WIIMOTE_BUFSIZE 32
+
+#define WIIPROTO_FLAG_LED1             0x01
+#define WIIPROTO_FLAG_LED2             0x02
+#define WIIPROTO_FLAG_LED3             0x04
+#define WIIPROTO_FLAG_LED4             0x08
+#define WIIPROTO_FLAG_RUMBLE           0x10
+#define WIIPROTO_FLAG_ACCEL            0x20
+#define WIIPROTO_FLAG_IR_BASIC         0x40
+#define WIIPROTO_FLAG_IR_EXT           0x80
+#define WIIPROTO_FLAG_IR_FULL          0xc0 /* IR_BASIC | IR_EXT */
+#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
+                                       WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
+#define WIIPROTO_FLAGS_IR (WIIPROTO_FLAG_IR_BASIC | WIIPROTO_FLAG_IR_EXT | \
+                                                       WIIPROTO_FLAG_IR_FULL)
+
+/* return flag for led \num */
+#define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1))
+
+struct wiimote_buf {
+       __u8 data[HID_MAX_BUFFER_SIZE];
+       size_t size;
+};
+
+struct wiimote_state {
+       spinlock_t lock;
+       __u8 flags;
+       __u8 accel_split[2];
+       __u8 drm;
+
+       /* synchronous cmd requests */
+       struct mutex sync;
+       struct completion ready;
+       int cmd;
+       __u32 opt;
+
+       /* results of synchronous requests */
+       __u8 cmd_battery;
+       __u8 cmd_err;
+       __u8 *cmd_read_buf;
+       __u8 cmd_read_size;
+};
+
+struct wiimote_data {
+       struct hid_device *hdev;
+       struct input_dev *input;
+       struct led_classdev *leds[4];
+       struct input_dev *accel;
+       struct input_dev *ir;
+       struct power_supply battery;
+       struct wiimote_ext *ext;
+       struct wiimote_debug *debug;
+
+       spinlock_t qlock;
+       __u8 head;
+       __u8 tail;
+       struct wiimote_buf outq[WIIMOTE_BUFSIZE];
+       struct work_struct worker;
+
+       struct wiimote_state state;
+};
+
+enum wiiproto_reqs {
+       WIIPROTO_REQ_NULL = 0x0,
+       WIIPROTO_REQ_RUMBLE = 0x10,
+       WIIPROTO_REQ_LED = 0x11,
+       WIIPROTO_REQ_DRM = 0x12,
+       WIIPROTO_REQ_IR1 = 0x13,
+       WIIPROTO_REQ_SREQ = 0x15,
+       WIIPROTO_REQ_WMEM = 0x16,
+       WIIPROTO_REQ_RMEM = 0x17,
+       WIIPROTO_REQ_IR2 = 0x1a,
+       WIIPROTO_REQ_STATUS = 0x20,
+       WIIPROTO_REQ_DATA = 0x21,
+       WIIPROTO_REQ_RETURN = 0x22,
+       WIIPROTO_REQ_DRM_K = 0x30,
+       WIIPROTO_REQ_DRM_KA = 0x31,
+       WIIPROTO_REQ_DRM_KE = 0x32,
+       WIIPROTO_REQ_DRM_KAI = 0x33,
+       WIIPROTO_REQ_DRM_KEE = 0x34,
+       WIIPROTO_REQ_DRM_KAE = 0x35,
+       WIIPROTO_REQ_DRM_KIE = 0x36,
+       WIIPROTO_REQ_DRM_KAIE = 0x37,
+       WIIPROTO_REQ_DRM_E = 0x3d,
+       WIIPROTO_REQ_DRM_SKAI1 = 0x3e,
+       WIIPROTO_REQ_DRM_SKAI2 = 0x3f,
+       WIIPROTO_REQ_MAX
+};
+
+#define dev_to_wii(pdev) hid_get_drvdata(container_of(pdev, struct hid_device, \
+                                                                       dev))
+
+extern void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm);
+extern int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
+                                               const __u8 *wmem, __u8 size);
+extern ssize_t wiimote_cmd_read(struct wiimote_data *wdata, __u32 offset,
+                                                       __u8 *rmem, __u8 size);
+
+#define wiiproto_req_rreg(wdata, os, sz) \
+                               wiiproto_req_rmem((wdata), false, (os), (sz))
+#define wiiproto_req_reeprom(wdata, os, sz) \
+                               wiiproto_req_rmem((wdata), true, (os), (sz))
+extern void wiiproto_req_rmem(struct wiimote_data *wdata, bool eeprom,
+                                               __u32 offset, __u16 size);
+
+#ifdef CONFIG_HID_WIIMOTE_EXT
+
+extern int wiiext_init(struct wiimote_data *wdata);
+extern void wiiext_deinit(struct wiimote_data *wdata);
+extern void wiiext_event(struct wiimote_data *wdata, bool plugged);
+extern bool wiiext_active(struct wiimote_data *wdata);
+extern void wiiext_handle(struct wiimote_data *wdata, const __u8 *payload);
+
+#else
+
+static inline int wiiext_init(void *u) { return 0; }
+static inline void wiiext_deinit(void *u) { }
+static inline void wiiext_event(void *u, bool p) { }
+static inline bool wiiext_active(void *u) { return false; }
+static inline void wiiext_handle(void *u, const __u8 *p) { }
+
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+
+extern int wiidebug_init(struct wiimote_data *wdata);
+extern void wiidebug_deinit(struct wiimote_data *wdata);
+
+#else
+
+static inline int wiidebug_init(void *u) { return 0; }
+static inline void wiidebug_deinit(void *u) { }
+
+#endif
+
+/* requires the state.lock spinlock to be held */
+static inline bool wiimote_cmd_pending(struct wiimote_data *wdata, int cmd,
+                                                               __u32 opt)
+{
+       return wdata->state.cmd == cmd && wdata->state.opt == opt;
+}
+
+/* requires the state.lock spinlock to be held */
+static inline void wiimote_cmd_complete(struct wiimote_data *wdata)
+{
+       wdata->state.cmd = WIIPROTO_REQ_NULL;
+       complete(&wdata->state.ready);
+}
+
+static inline int wiimote_cmd_acquire(struct wiimote_data *wdata)
+{
+       return mutex_lock_interruptible(&wdata->state.sync) ? -ERESTARTSYS : 0;
+}
+
+/* requires the state.lock spinlock to be held */
+static inline void wiimote_cmd_set(struct wiimote_data *wdata, int cmd,
+                                                               __u32 opt)
+{
+       INIT_COMPLETION(wdata->state.ready);
+       wdata->state.cmd = cmd;
+       wdata->state.opt = opt;
+}
+
+static inline void wiimote_cmd_release(struct wiimote_data *wdata)
+{
+       mutex_unlock(&wdata->state.sync);
+}
+
+static inline int wiimote_cmd_wait(struct wiimote_data *wdata)
+{
+       int ret;
+
+       ret = wait_for_completion_interruptible_timeout(&wdata->state.ready, HZ);
+       if (ret < 0)
+               return -ERESTARTSYS;
+       else if (ret == 0)
+               return -EIO;
+       else
+               return 0;
+}
+
+#endif
index b403fcef0b8693fbeb3eb44c9d80fd8735757060..5bf91dbad59d4181bdf23fdeda7bd7327117933b 100644 (file)
@@ -197,16 +197,24 @@ static int usbhid_restart_out_queue(struct usbhid_device *usbhid)
 {
        struct hid_device *hid = usb_get_intfdata(usbhid->intf);
        int kicked;
+       int r;
 
        if (!hid)
                return 0;
 
        if ((kicked = (usbhid->outhead != usbhid->outtail))) {
                dbg("Kicking head %d tail %d", usbhid->outhead, usbhid->outtail);
+
+               r = usb_autopm_get_interface_async(usbhid->intf);
+               if (r < 0)
+                       return r;
+               /* Asynchronously flush queue. */
+               set_bit(HID_OUT_RUNNING, &usbhid->iofl);
                if (hid_submit_out(hid)) {
                        clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
-                       wake_up(&usbhid->wait);
+                       usb_autopm_put_interface_async(usbhid->intf);
                }
+               wake_up(&usbhid->wait);
        }
        return kicked;
 }
@@ -215,6 +223,7 @@ static int usbhid_restart_ctrl_queue(struct usbhid_device *usbhid)
 {
        struct hid_device *hid = usb_get_intfdata(usbhid->intf);
        int kicked;
+       int r;
 
        WARN_ON(hid == NULL);
        if (!hid)
@@ -222,10 +231,17 @@ static int usbhid_restart_ctrl_queue(struct usbhid_device *usbhid)
 
        if ((kicked = (usbhid->ctrlhead != usbhid->ctrltail))) {
                dbg("Kicking head %d tail %d", usbhid->ctrlhead, usbhid->ctrltail);
+
+               r = usb_autopm_get_interface_async(usbhid->intf);
+               if (r < 0)
+                       return r;
+               /* Asynchronously flush queue. */
+               set_bit(HID_CTRL_RUNNING, &usbhid->iofl);
                if (hid_submit_ctrl(hid)) {
                        clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
-                       wake_up(&usbhid->wait);
+                       usb_autopm_put_interface_async(usbhid->intf);
                }
+               wake_up(&usbhid->wait);
        }
        return kicked;
 }
@@ -304,30 +320,21 @@ static int hid_submit_out(struct hid_device *hid)
        report = usbhid->out[usbhid->outtail].report;
        raw_report = usbhid->out[usbhid->outtail].raw_report;
 
-       r = usb_autopm_get_interface_async(usbhid->intf);
-       if (r < 0)
-               return -1;
-
-       /*
-        * if the device hasn't been woken, we leave the output
-        * to resume()
-        */
-       if (!test_bit(HID_REPORTED_IDLE, &usbhid->iofl)) {
-               usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) + 1 + (report->id > 0);
-               usbhid->urbout->dev = hid_to_usb_dev(hid);
-               memcpy(usbhid->outbuf, raw_report, usbhid->urbout->transfer_buffer_length);
-               kfree(raw_report);
+       usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) +
+                                                1 + (report->id > 0);
+       usbhid->urbout->dev = hid_to_usb_dev(hid);
+       memcpy(usbhid->outbuf, raw_report,
+              usbhid->urbout->transfer_buffer_length);
+       kfree(raw_report);
 
-               dbg_hid("submitting out urb\n");
+       dbg_hid("submitting out urb\n");
 
-               if (usb_submit_urb(usbhid->urbout, GFP_ATOMIC)) {
-                       hid_err(hid, "usb_submit_urb(out) failed\n");
-                       usb_autopm_put_interface_async(usbhid->intf);
-                       return -1;
-               }
-               usbhid->last_out = jiffies;
+       r = usb_submit_urb(usbhid->urbout, GFP_ATOMIC);
+       if (r < 0) {
+               hid_err(hid, "usb_submit_urb(out) failed: %d\n", r);
+               return r;
        }
-
+       usbhid->last_out = jiffies;
        return 0;
 }
 
@@ -343,50 +350,48 @@ static int hid_submit_ctrl(struct hid_device *hid)
        raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report;
        dir = usbhid->ctrl[usbhid->ctrltail].dir;
 
-       r = usb_autopm_get_interface_async(usbhid->intf);
-       if (r < 0)
-               return -1;
-       if (!test_bit(HID_REPORTED_IDLE, &usbhid->iofl)) {
-               len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
-               if (dir == USB_DIR_OUT) {
-                       usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
-                       usbhid->urbctrl->transfer_buffer_length = len;
-                       memcpy(usbhid->ctrlbuf, raw_report, len);
-                       kfree(raw_report);
-               } else {
-                       int maxpacket, padlen;
-
-                       usbhid->urbctrl->pipe = usb_rcvctrlpipe(hid_to_usb_dev(hid), 0);
-                       maxpacket = usb_maxpacket(hid_to_usb_dev(hid), usbhid->urbctrl->pipe, 0);
-                       if (maxpacket > 0) {
-                               padlen = DIV_ROUND_UP(len, maxpacket);
-                               padlen *= maxpacket;
-                               if (padlen > usbhid->bufsize)
-                                       padlen = usbhid->bufsize;
-                       } else
-                               padlen = 0;
-                       usbhid->urbctrl->transfer_buffer_length = padlen;
-               }
-               usbhid->urbctrl->dev = hid_to_usb_dev(hid);
-
-               usbhid->cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | dir;
-               usbhid->cr->bRequest = (dir == USB_DIR_OUT) ? HID_REQ_SET_REPORT : HID_REQ_GET_REPORT;
-               usbhid->cr->wValue = cpu_to_le16(((report->type + 1) << 8) | report->id);
-               usbhid->cr->wIndex = cpu_to_le16(usbhid->ifnum);
-               usbhid->cr->wLength = cpu_to_le16(len);
-
-               dbg_hid("submitting ctrl urb: %s wValue=0x%04x wIndex=0x%04x wLength=%u\n",
-                       usbhid->cr->bRequest == HID_REQ_SET_REPORT ? "Set_Report" : "Get_Report",
-                       usbhid->cr->wValue, usbhid->cr->wIndex, usbhid->cr->wLength);
-
-               if (usb_submit_urb(usbhid->urbctrl, GFP_ATOMIC)) {
-                       usb_autopm_put_interface_async(usbhid->intf);
-                       hid_err(hid, "usb_submit_urb(ctrl) failed\n");
-                       return -1;
-               }
-               usbhid->last_ctrl = jiffies;
+       len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+       if (dir == USB_DIR_OUT) {
+               usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
+               usbhid->urbctrl->transfer_buffer_length = len;
+               memcpy(usbhid->ctrlbuf, raw_report, len);
+               kfree(raw_report);
+       } else {
+               int maxpacket, padlen;
+
+               usbhid->urbctrl->pipe = usb_rcvctrlpipe(hid_to_usb_dev(hid), 0);
+               maxpacket = usb_maxpacket(hid_to_usb_dev(hid),
+                                         usbhid->urbctrl->pipe, 0);
+               if (maxpacket > 0) {
+                       padlen = DIV_ROUND_UP(len, maxpacket);
+                       padlen *= maxpacket;
+                       if (padlen > usbhid->bufsize)
+                               padlen = usbhid->bufsize;
+               } else
+                       padlen = 0;
+               usbhid->urbctrl->transfer_buffer_length = padlen;
        }
-
+       usbhid->urbctrl->dev = hid_to_usb_dev(hid);
+
+       usbhid->cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | dir;
+       usbhid->cr->bRequest = (dir == USB_DIR_OUT) ? HID_REQ_SET_REPORT :
+                                                     HID_REQ_GET_REPORT;
+       usbhid->cr->wValue = cpu_to_le16(((report->type + 1) << 8) |
+                                        report->id);
+       usbhid->cr->wIndex = cpu_to_le16(usbhid->ifnum);
+       usbhid->cr->wLength = cpu_to_le16(len);
+
+       dbg_hid("submitting ctrl urb: %s wValue=0x%04x wIndex=0x%04x wLength=%u\n",
+               usbhid->cr->bRequest == HID_REQ_SET_REPORT ? "Set_Report" :
+                                                            "Get_Report",
+               usbhid->cr->wValue, usbhid->cr->wIndex, usbhid->cr->wLength);
+
+       r = usb_submit_urb(usbhid->urbctrl, GFP_ATOMIC);
+       if (r < 0) {
+               hid_err(hid, "usb_submit_urb(ctrl) failed: %d\n", r);
+               return r;
+       }
+       usbhid->last_ctrl = jiffies;
        return 0;
 }
 
@@ -423,11 +428,8 @@ static void hid_irq_out(struct urb *urb)
        else
                usbhid->outtail = (usbhid->outtail + 1) & (HID_OUTPUT_FIFO_SIZE - 1);
 
-       if (usbhid->outhead != usbhid->outtail) {
-               if (hid_submit_out(hid)) {
-                       clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
-                       wake_up(&usbhid->wait);
-               }
+       if (usbhid->outhead != usbhid->outtail && !hid_submit_out(hid)) {
+               /* Successfully submitted next urb in queue */
                spin_unlock_irqrestore(&usbhid->lock, flags);
                return;
        }
@@ -474,13 +476,9 @@ static void hid_ctrl(struct urb *urb)
        else
                usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1);
 
-       if (usbhid->ctrlhead != usbhid->ctrltail) {
-               if (hid_submit_ctrl(hid)) {
-                       clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
-                       wake_up(&usbhid->wait);
-               }
+       if (usbhid->ctrlhead != usbhid->ctrltail && !hid_submit_ctrl(hid)) {
+               /* Successfully submitted next urb in queue */
                spin_unlock(&usbhid->lock);
-               usb_autopm_put_interface_async(usbhid->intf);
                return;
        }
 
@@ -515,9 +513,23 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
                usbhid->out[usbhid->outhead].report = report;
                usbhid->outhead = head;
 
+               /* Try to awake from autosuspend... */
+               if (usb_autopm_get_interface_async(usbhid->intf) < 0)
+                       return;
+
+               /*
+                * But if still suspended, leave urb enqueued, don't submit.
+                * Submission will occur if/when resume() drains the queue.
+                */
+               if (test_bit(HID_REPORTED_IDLE, &usbhid->iofl))
+                       return;
+
                if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl)) {
-                       if (hid_submit_out(hid))
+                       if (hid_submit_out(hid)) {
                                clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
+                               usb_autopm_put_interface_async(usbhid->intf);
+                       }
+                       wake_up(&usbhid->wait);
                } else {
                        /*
                         * the queue is known to run
@@ -549,9 +561,23 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
        usbhid->ctrl[usbhid->ctrlhead].dir = dir;
        usbhid->ctrlhead = head;
 
+       /* Try to awake from autosuspend... */
+       if (usb_autopm_get_interface_async(usbhid->intf) < 0)
+               return;
+
+       /*
+        * If already suspended, leave urb enqueued, but don't submit.
+        * Submission will occur if/when resume() drains the queue.
+        */
+       if (test_bit(HID_REPORTED_IDLE, &usbhid->iofl))
+               return;
+
        if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl)) {
-               if (hid_submit_ctrl(hid))
+               if (hid_submit_ctrl(hid)) {
                        clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
+                       usb_autopm_put_interface_async(usbhid->intf);
+               }
+               wake_up(&usbhid->wait);
        } else {
                /*
                 * the queue is known to run
@@ -576,6 +602,30 @@ void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, uns
 }
 EXPORT_SYMBOL_GPL(usbhid_submit_report);
 
+/* Workqueue routine to send requests to change LEDs */
+static void hid_led(struct work_struct *work)
+{
+       struct usbhid_device *usbhid =
+               container_of(work, struct usbhid_device, led_work);
+       struct hid_device *hid = usbhid->hid;
+       struct hid_field *field;
+       unsigned long flags;
+
+       field = hidinput_get_led_field(hid);
+       if (!field) {
+               hid_warn(hid, "LED event field not found\n");
+               return;
+       }
+
+       spin_lock_irqsave(&usbhid->lock, flags);
+       if (!test_bit(HID_DISCONNECTED, &usbhid->iofl)) {
+               usbhid->ledcount = hidinput_count_leds(hid);
+               hid_dbg(usbhid->hid, "New ledcount = %u\n", usbhid->ledcount);
+               __usbhid_submit_report(hid, field->report, USB_DIR_OUT);
+       }
+       spin_unlock_irqrestore(&usbhid->lock, flags);
+}
+
 static int usb_hidinput_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
 {
        struct hid_device *hid = input_get_drvdata(dev);
@@ -595,17 +645,15 @@ static int usb_hidinput_input_event(struct input_dev *dev, unsigned int type, un
                return -1;
        }
 
+       spin_lock_irqsave(&usbhid->lock, flags);
        hid_set_field(field, offset, value);
-       if (value) {
-               spin_lock_irqsave(&usbhid->lock, flags);
-               usbhid->ledcount++;
-               spin_unlock_irqrestore(&usbhid->lock, flags);
-       } else {
-               spin_lock_irqsave(&usbhid->lock, flags);
-               usbhid->ledcount--;
-               spin_unlock_irqrestore(&usbhid->lock, flags);
-       }
-       usbhid_submit_report(hid, field->report, USB_DIR_OUT);
+       spin_unlock_irqrestore(&usbhid->lock, flags);
+
+       /*
+        * Defer performing requested LED action.
+        * This is more likely gather all LED changes into a single URB.
+        */
+       schedule_work(&usbhid->led_work);
 
        return 0;
 }
@@ -1100,7 +1148,7 @@ static void usbhid_stop(struct hid_device *hid)
                return;
 
        clear_bit(HID_STARTED, &usbhid->iofl);
-       spin_lock_irq(&usbhid->lock);   /* Sync with error handler */
+       spin_lock_irq(&usbhid->lock);   /* Sync with error and led handlers */
        set_bit(HID_DISCONNECTED, &usbhid->iofl);
        spin_unlock_irq(&usbhid->lock);
        usb_kill_urb(usbhid->urbin);
@@ -1234,6 +1282,8 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
        setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
        spin_lock_init(&usbhid->lock);
 
+       INIT_WORK(&usbhid->led_work, hid_led);
+
        ret = hid_add_device(hid);
        if (ret) {
                if (ret != -ENODEV)
@@ -1266,6 +1316,7 @@ static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid)
 {
        del_timer_sync(&usbhid->io_retry);
        cancel_work_sync(&usbhid->reset_work);
+       cancel_work_sync(&usbhid->led_work);
 }
 
 static void hid_cease_io(struct usbhid_device *usbhid)
@@ -1367,16 +1418,6 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
                        return -EIO;
        }
 
-       if (!ignoreled && PMSG_IS_AUTO(message)) {
-               spin_lock_irq(&usbhid->lock);
-               if (test_bit(HID_LED_ON, &usbhid->iofl)) {
-                       spin_unlock_irq(&usbhid->lock);
-                       usbhid_mark_busy(usbhid);
-                       return -EBUSY;
-               }
-               spin_unlock_irq(&usbhid->lock);
-       }
-
        hid_cancel_delayed_stuff(usbhid);
        hid_cease_io(usbhid);
 
index 5028d60a22a19b6b69cd1c942728f350d40744b7..c831af937481c66123965743f62aaf5f8a634b3c 100644 (file)
@@ -47,6 +47,7 @@ static const struct hid_blacklist {
 
        { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
 
+       { USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
@@ -67,6 +68,9 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
index 1673cac93d77bfa22aba124c0a17ce6cc5c05e56..cb8f703efde5a00747b77d1bebc313e17864ae86 100644 (file)
@@ -55,7 +55,6 @@ struct usb_interface *usbhid_find_interface(int minor);
 #define HID_STARTED            8
 #define HID_REPORTED_IDLE      9
 #define HID_KEYS_PRESSED       10
-#define HID_LED_ON             11
 
 /*
  * USB-specific HID struct, to be pointed to
@@ -97,6 +96,8 @@ struct usbhid_device {
        struct work_struct reset_work;                                  /* Task context for resets */
        wait_queue_head_t wait;                                         /* For sleeping */
        int ledcount;                                                   /* counting the number of active leds */
+
+       struct work_struct led_work;                                    /* Task context for setting LEDs */
 };
 
 #define        hid_to_usb_dev(hid_dev) \
index bc445d7e3bf5213a7f4fa9d4b31acbc8c140fef4..796086980f4a622dfe2857eec1d00ff33cdff787 100644 (file)
@@ -64,6 +64,32 @@ static const unsigned char usb_kbd_keycode[256] = {
        150,158,159,128,136,177,178,176,142,152,173,140
 };
 
+
+/**
+ * struct usb_kbd - state of each attached keyboard
+ * @dev:       input device associated with this keyboard
+ * @usbdev:    usb device associated with this keyboard
+ * @old:       data received in the past from the @irq URB representing which
+ *             keys were pressed. By comparing with the current list of keys
+ *             that are pressed, we are able to see key releases.
+ * @irq:       URB for receiving a list of keys that are pressed when a
+ *             new key is pressed or a key that was pressed is released.
+ * @led:       URB for sending LEDs (e.g. numlock, ...)
+ * @newleds:   data that will be sent with the @led URB representing which LEDs
+               should be on
+ * @name:      Name of the keyboard. @dev's name field points to this buffer
+ * @phys:      Physical path of the keyboard. @dev's phys field points to this
+ *             buffer
+ * @new:       Buffer for the @irq URB
+ * @cr:                Control request for @led URB
+ * @leds:      Buffer for the @led URB
+ * @new_dma:   DMA address for @irq URB
+ * @leds_dma:  DMA address for @led URB
+ * @leds_lock: spinlock that protects @leds, @newleds, and @led_urb_submitted
+ * @led_urb_submitted: indicates whether @led is in progress, i.e. it has been
+ *             submitted and its completion handler has not returned yet
+ *             without resubmitting @led
+ */
 struct usb_kbd {
        struct input_dev *dev;
        struct usb_device *usbdev;
@@ -78,6 +104,10 @@ struct usb_kbd {
        unsigned char *leds;
        dma_addr_t new_dma;
        dma_addr_t leds_dma;
+       
+       spinlock_t leds_lock;
+       bool led_urb_submitted;
+
 };
 
 static void usb_kbd_irq(struct urb *urb)
@@ -136,44 +166,66 @@ resubmit:
 static int usb_kbd_event(struct input_dev *dev, unsigned int type,
                         unsigned int code, int value)
 {
+       unsigned long flags;
        struct usb_kbd *kbd = input_get_drvdata(dev);
 
        if (type != EV_LED)
                return -1;
 
+       spin_lock_irqsave(&kbd->leds_lock, flags);
        kbd->newleds = (!!test_bit(LED_KANA,    dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) |
                       (!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL,   dev->led) << 1) |
                       (!!test_bit(LED_NUML,    dev->led));
 
-       if (kbd->led->status == -EINPROGRESS)
+       if (kbd->led_urb_submitted){
+               spin_unlock_irqrestore(&kbd->leds_lock, flags);
                return 0;
+       }
 
-       if (*(kbd->leds) == kbd->newleds)
+       if (*(kbd->leds) == kbd->newleds){
+               spin_unlock_irqrestore(&kbd->leds_lock, flags);
                return 0;
+       }
 
        *(kbd->leds) = kbd->newleds;
+       
        kbd->led->dev = kbd->usbdev;
        if (usb_submit_urb(kbd->led, GFP_ATOMIC))
                pr_err("usb_submit_urb(leds) failed\n");
-
+       else
+               kbd->led_urb_submitted = true;
+       
+       spin_unlock_irqrestore(&kbd->leds_lock, flags);
+       
        return 0;
 }
 
 static void usb_kbd_led(struct urb *urb)
 {
+       unsigned long flags;
        struct usb_kbd *kbd = urb->context;
 
        if (urb->status)
                hid_warn(urb->dev, "led urb status %d received\n",
                         urb->status);
 
-       if (*(kbd->leds) == kbd->newleds)
+       spin_lock_irqsave(&kbd->leds_lock, flags);
+
+       if (*(kbd->leds) == kbd->newleds){
+               kbd->led_urb_submitted = false;
+               spin_unlock_irqrestore(&kbd->leds_lock, flags);
                return;
+       }
 
        *(kbd->leds) = kbd->newleds;
+       
        kbd->led->dev = kbd->usbdev;
-       if (usb_submit_urb(kbd->led, GFP_ATOMIC))
+       if (usb_submit_urb(kbd->led, GFP_ATOMIC)){
                hid_err(urb->dev, "usb_submit_urb(leds) failed\n");
+               kbd->led_urb_submitted = false;
+       }
+       spin_unlock_irqrestore(&kbd->leds_lock, flags);
+       
 }
 
 static int usb_kbd_open(struct input_dev *dev)
@@ -252,6 +304,7 @@ static int usb_kbd_probe(struct usb_interface *iface,
 
        kbd->usbdev = dev;
        kbd->dev = input_dev;
+       spin_lock_init(&kbd->leds_lock);
 
        if (dev->manufacturer)
                strlcpy(kbd->name, dev->manufacturer, sizeof(kbd->name));
@@ -334,6 +387,7 @@ static void usb_kbd_disconnect(struct usb_interface *intf)
        if (kbd) {
                usb_kill_urb(kbd->irq);
                input_unregister_device(kbd->dev);
+               usb_kill_urb(kbd->led);
                usb_kbd_free_mem(interface_to_usbdev(intf), kbd);
                kfree(kbd);
        }
index fac673940849742b60a7377a57b1cee367c2c22f..93709fbe30eb4c248527fa528bce2ae278807b8e 100644 (file)
@@ -276,8 +276,6 @@ static int puv3_i2c_resume(struct platform_device *dev)
 #define puv3_i2c_resume NULL
 #endif
 
-MODULE_ALIAS("platform:puv3_i2c");
-
 static struct platform_driver puv3_i2c_driver = {
        .probe          = puv3_i2c_probe,
        .remove         = __devexit_p(puv3_i2c_remove),
@@ -289,18 +287,8 @@ static struct platform_driver puv3_i2c_driver = {
        }
 };
 
-static int __init puv3_i2c_init(void)
-{
-       return platform_driver_register(&puv3_i2c_driver);
-}
-
-static void __exit puv3_i2c_exit(void)
-{
-       platform_driver_unregister(&puv3_i2c_driver);
-}
-
-module_init(puv3_i2c_init);
-module_exit(puv3_i2c_exit);
+module_platform_driver(puv3_i2c_driver);
 
 MODULE_DESCRIPTION("PKUnity v3 I2C driver");
 MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:puv3_i2c");
index 4cf25347b01546b1578e0b9987d6717c38db0366..76457d50bc3493e351c40fbe5b667d1b2e33ff6a 100644 (file)
@@ -369,7 +369,7 @@ static int evdev_fetch_next_event(struct evdev_client *client,
 
        spin_lock_irq(&client->buffer_lock);
 
-       have_event = client->head != client->tail;
+       have_event = client->packet_head != client->tail;
        if (have_event) {
                *event = client->buffer[client->tail++];
                client->tail &= client->bufsize - 1;
@@ -391,14 +391,13 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
        if (count < input_event_size())
                return -EINVAL;
 
-       if (client->packet_head == client->tail && evdev->exist &&
-           (file->f_flags & O_NONBLOCK))
-               return -EAGAIN;
-
-       retval = wait_event_interruptible(evdev->wait,
-               client->packet_head != client->tail || !evdev->exist);
-       if (retval)
-               return retval;
+       if (!(file->f_flags & O_NONBLOCK)) {
+               retval = wait_event_interruptible(evdev->wait,
+                               client->packet_head != client->tail ||
+                               !evdev->exist);
+               if (retval)
+                       return retval;
+       }
 
        if (!evdev->exist)
                return -ENODEV;
@@ -412,6 +411,9 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
                retval += input_event_size();
        }
 
+       if (retval == 0 && (file->f_flags & O_NONBLOCK))
+               return -EAGAIN;
+
        return retval;
 }
 
index 7dfe1009fae09b6101133d8e471a0d4b1ef039cd..7f161d93203c09a7fbb31a6522ed81c00d47ca13 100644 (file)
@@ -84,10 +84,12 @@ static ssize_t input_polldev_set_poll(struct device *dev,
 {
        struct input_polled_dev *polldev = dev_get_drvdata(dev);
        struct input_dev *input = polldev->input;
-       unsigned long interval;
+       unsigned int interval;
+       int err;
 
-       if (strict_strtoul(buf, 0, &interval))
-               return -EINVAL;
+       err = kstrtouint(buf, 0, &interval);
+       if (err)
+               return err;
 
        if (interval < polldev->poll_interval_min)
                return -EINVAL;
index 615c21f2a5539d786ec3188c501558a2084a2b46..cdc385b2cf7d2c2941e87b799f0f3ea50e9e7d5a 100644 (file)
@@ -221,6 +221,22 @@ config KEYBOARD_TCA6416
          To compile this driver as a module, choose M here: the
          module will be called tca6416_keypad.
 
+config KEYBOARD_TCA8418
+       tristate "TCA8418 Keypad Support"
+       depends on I2C
+       help
+         This driver implements basic keypad functionality
+         for keys connected through TCA8418 keypad decoder.
+
+         Say Y here if your device has keys connected to
+         TCA8418 keypad decoder.
+
+         If enabled the complete TCA8418 device will be managed through
+         this driver.
+
+         To compile this driver as a module, choose M here: the
+         module will be called tca8418_keypad.
+
 config KEYBOARD_MATRIX
        tristate "GPIO driven matrix keypad support"
        depends on GENERIC_GPIO
@@ -425,9 +441,10 @@ config KEYBOARD_PMIC8XXX
 
 config KEYBOARD_SAMSUNG
        tristate "Samsung keypad support"
-       depends on SAMSUNG_DEV_KEYPAD
+       depends on HAVE_CLK
        help
-         Say Y here if you want to use the Samsung keypad.
+         Say Y here if you want to use the keypad on your Samsung mobile
+         device.
 
          To compile this driver as a module, choose M here: the
          module will be called samsung-keypad.
index ddde0fd476f744c2aa2cd250362f809c4469310b..df7061f129184f4c5dfb6265229529fe1387dcf7 100644 (file)
@@ -16,6 +16,7 @@ obj-$(CONFIG_KEYBOARD_EP93XX)         += ep93xx_keypad.o
 obj-$(CONFIG_KEYBOARD_GPIO)            += gpio_keys.o
 obj-$(CONFIG_KEYBOARD_GPIO_POLLED)     += gpio_keys_polled.o
 obj-$(CONFIG_KEYBOARD_TCA6416)         += tca6416-keypad.o
+obj-$(CONFIG_KEYBOARD_TCA8418)         += tca8418_keypad.o
 obj-$(CONFIG_KEYBOARD_HIL)             += hil_kbd.o
 obj-$(CONFIG_KEYBOARD_HIL_OLD)         += hilkbd.o
 obj-$(CONFIG_KEYBOARD_IMX)             += imx_keypad.o
index 3db8006dac3a8f93e1e31a01e56766044cd194ed..e9e8674dfda122ce4773864866f153553bfa45a5 100644 (file)
@@ -202,18 +202,7 @@ static struct platform_driver adp5520_keys_driver = {
        .probe          = adp5520_keys_probe,
        .remove         = __devexit_p(adp5520_keys_remove),
 };
-
-static int __init adp5520_keys_init(void)
-{
-       return platform_driver_register(&adp5520_keys_driver);
-}
-module_init(adp5520_keys_init);
-
-static void __exit adp5520_keys_exit(void)
-{
-       platform_driver_unregister(&adp5520_keys_driver);
-}
-module_exit(adp5520_keys_exit);
+module_platform_driver(adp5520_keys_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Keys ADP5520 Driver");
index 79172af164f25a5ccd913f32225dcd5e60c22760..6df5f6aa7908a9dbf649226cfb1bbc8dee6685cf 100644 (file)
@@ -259,19 +259,6 @@ static struct platform_driver amikbd_driver = {
                .owner  = THIS_MODULE,
        },
 };
-
-static int __init amikbd_init(void)
-{
-       return platform_driver_probe(&amikbd_driver, amikbd_probe);
-}
-
-module_init(amikbd_init);
-
-static void __exit amikbd_exit(void)
-{
-       platform_driver_unregister(&amikbd_driver);
-}
-
-module_exit(amikbd_exit);
+module_platform_driver(amikbd_driver);
 
 MODULE_ALIAS("platform:amiga-keyboard");
index 19cfc0cf558c99b18875ca0fd0b6aac26134e356..e05a2e7073c6f5424cb4adeaf1ae76728f16ac35 100644 (file)
@@ -1305,7 +1305,7 @@ static ssize_t atkbd_show_extra(struct atkbd *atkbd, char *buf)
 static ssize_t atkbd_set_extra(struct atkbd *atkbd, const char *buf, size_t count)
 {
        struct input_dev *old_dev, *new_dev;
-       unsigned long value;
+       unsigned int value;
        int err;
        bool old_extra;
        unsigned char old_set;
@@ -1313,7 +1313,11 @@ static ssize_t atkbd_set_extra(struct atkbd *atkbd, const char *buf, size_t coun
        if (!atkbd->write)
                return -EIO;
 
-       if (strict_strtoul(buf, 10, &value) || value > 1)
+       err = kstrtouint(buf, 10, &value);
+       if (err)
+               return err;
+
+       if (value > 1)
                return -EINVAL;
 
        if (atkbd->extra != value) {
@@ -1389,11 +1393,15 @@ static ssize_t atkbd_show_scroll(struct atkbd *atkbd, char *buf)
 static ssize_t atkbd_set_scroll(struct atkbd *atkbd, const char *buf, size_t count)
 {
        struct input_dev *old_dev, *new_dev;
-       unsigned long value;
+       unsigned int value;
        int err;
        bool old_scroll;
 
-       if (strict_strtoul(buf, 10, &value) || value > 1)
+       err = kstrtouint(buf, 10, &value);
+       if (err)
+               return err;
+
+       if (value > 1)
                return -EINVAL;
 
        if (atkbd->scroll != value) {
@@ -1433,7 +1441,7 @@ static ssize_t atkbd_show_set(struct atkbd *atkbd, char *buf)
 static ssize_t atkbd_set_set(struct atkbd *atkbd, const char *buf, size_t count)
 {
        struct input_dev *old_dev, *new_dev;
-       unsigned long value;
+       unsigned int value;
        int err;
        unsigned char old_set;
        bool old_extra;
@@ -1441,7 +1449,11 @@ static ssize_t atkbd_set_set(struct atkbd *atkbd, const char *buf, size_t count)
        if (!atkbd->write)
                return -EIO;
 
-       if (strict_strtoul(buf, 10, &value) || (value != 2 && value != 3))
+       err = kstrtouint(buf, 10, &value);
+       if (err)
+               return err;
+
+       if (value != 2 && value != 3)
                return -EINVAL;
 
        if (atkbd->set != value) {
@@ -1484,14 +1496,18 @@ static ssize_t atkbd_show_softrepeat(struct atkbd *atkbd, char *buf)
 static ssize_t atkbd_set_softrepeat(struct atkbd *atkbd, const char *buf, size_t count)
 {
        struct input_dev *old_dev, *new_dev;
-       unsigned long value;
+       unsigned int value;
        int err;
        bool old_softrepeat, old_softraw;
 
        if (!atkbd->write)
                return -EIO;
 
-       if (strict_strtoul(buf, 10, &value) || value > 1)
+       err = kstrtouint(buf, 10, &value);
+       if (err)
+               return err;
+
+       if (value > 1)
                return -EINVAL;
 
        if (atkbd->softrepeat != value) {
@@ -1534,11 +1550,15 @@ static ssize_t atkbd_show_softraw(struct atkbd *atkbd, char *buf)
 static ssize_t atkbd_set_softraw(struct atkbd *atkbd, const char *buf, size_t count)
 {
        struct input_dev *old_dev, *new_dev;
-       unsigned long value;
+       unsigned int value;
        int err;
        bool old_softraw;
 
-       if (strict_strtoul(buf, 10, &value) || value > 1)
+       err = kstrtouint(buf, 10, &value);
+       if (err)
+               return err;
+
+       if (value > 1)
                return -EINVAL;
 
        if (atkbd->softraw != value) {
index 7d989603f875f6a417c395624cffbc2b7171ac26..8eb9116e0a5f0503de60e89d8213ff855ec8ab60 100644 (file)
@@ -384,7 +384,7 @@ static int bfin_kpad_resume(struct platform_device *pdev)
 # define bfin_kpad_resume  NULL
 #endif
 
-struct platform_driver bfin_kpad_device_driver = {
+static struct platform_driver bfin_kpad_device_driver = {
        .driver         = {
                .name   = DRV_NAME,
                .owner  = THIS_MODULE,
@@ -394,19 +394,7 @@ struct platform_driver bfin_kpad_device_driver = {
        .suspend        = bfin_kpad_suspend,
        .resume         = bfin_kpad_resume,
 };
-
-static int __init bfin_kpad_init(void)
-{
-       return platform_driver_register(&bfin_kpad_device_driver);
-}
-
-static void __exit bfin_kpad_exit(void)
-{
-       platform_driver_unregister(&bfin_kpad_device_driver);
-}
-
-module_init(bfin_kpad_init);
-module_exit(bfin_kpad_exit);
+module_platform_driver(bfin_kpad_device_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
index 9d82b3aeff5e5535b5f85d4a084a410523359564..469825247552588e46615d04d5e4221006ab9186 100644 (file)
@@ -328,18 +328,7 @@ static struct platform_driver davinci_ks_driver = {
        },
        .remove = __devexit_p(davinci_ks_remove),
 };
-
-static int __init davinci_ks_init(void)
-{
-       return platform_driver_probe(&davinci_ks_driver, davinci_ks_probe);
-}
-module_init(davinci_ks_init);
-
-static void __exit davinci_ks_exit(void)
-{
-       platform_driver_unregister(&davinci_ks_driver);
-}
-module_exit(davinci_ks_exit);
+module_platform_driver(davinci_ks_driver);
 
 MODULE_AUTHOR("Miguel Aguilar");
 MODULE_DESCRIPTION("Texas Instruments DaVinci Key Scan Driver");
index 4662c5da801890f63179203029bc9acc241f8314..0ba69f3fcb52b8ca3d47ce3516be6b2f6bbbf167 100644 (file)
@@ -390,19 +390,7 @@ static struct platform_driver ep93xx_keypad_driver = {
        .suspend        = ep93xx_keypad_suspend,
        .resume         = ep93xx_keypad_resume,
 };
-
-static int __init ep93xx_keypad_init(void)
-{
-       return platform_driver_register(&ep93xx_keypad_driver);
-}
-
-static void __exit ep93xx_keypad_exit(void)
-{
-       platform_driver_unregister(&ep93xx_keypad_driver);
-}
-
-module_init(ep93xx_keypad_init);
-module_exit(ep93xx_keypad_exit);
+module_platform_driver(ep93xx_keypad_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
index 4c17aff206576d72468071c4465e1a08e4f36fa4..20c8ab1722148251ebd748def95337fd2b25519e 100644 (file)
@@ -241,19 +241,7 @@ static struct platform_driver gpio_keys_polled_driver = {
                .owner  = THIS_MODULE,
        },
 };
-
-static int __init gpio_keys_polled_init(void)
-{
-       return platform_driver_register(&gpio_keys_polled_driver);
-}
-
-static void __exit gpio_keys_polled_exit(void)
-{
-       platform_driver_unregister(&gpio_keys_polled_driver);
-}
-
-module_init(gpio_keys_polled_init);
-module_exit(gpio_keys_polled_exit);
+module_platform_driver(gpio_keys_polled_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
index ccebd2d091519e849d68744715187423de46656d..fb87b3bcadb9f95d88c7204353048cc8a64233d9 100644 (file)
@@ -619,19 +619,7 @@ static struct platform_driver imx_keypad_driver = {
        .probe          = imx_keypad_probe,
        .remove         = __devexit_p(imx_keypad_remove),
 };
-
-static int __init imx_keypad_init(void)
-{
-       return platform_driver_register(&imx_keypad_driver);
-}
-
-static void __exit imx_keypad_exit(void)
-{
-       platform_driver_unregister(&imx_keypad_driver);
-}
-
-module_init(imx_keypad_init);
-module_exit(imx_keypad_exit);
+module_platform_driver(imx_keypad_driver);
 
 MODULE_AUTHOR("Alberto Panizzo <maramaopercheseimorto@gmail.com>");
 MODULE_DESCRIPTION("IMX Keypad Port Driver");
index 7197c5698747ada298c6712058bcaeb438b1018d..24f3ea01c4d5b88ea267f42fe93b70a1b18730bf 100644 (file)
@@ -260,19 +260,7 @@ static struct platform_driver jornada680kbd_driver = {
        .probe  = jornada680kbd_probe,
        .remove = __devexit_p(jornada680kbd_remove),
 };
-
-static int __init jornada680kbd_init(void)
-{
-       return platform_driver_register(&jornada680kbd_driver);
-}
-
-static void __exit jornada680kbd_exit(void)
-{
-       platform_driver_unregister(&jornada680kbd_driver);
-}
-
-module_init(jornada680kbd_init);
-module_exit(jornada680kbd_exit);
+module_platform_driver(jornada680kbd_driver);
 
 MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
 MODULE_DESCRIPTION("HP Jornada 620/660/680/690 Keyboard Driver");
index 0aa6740e60d00b6ba43b41929011f1b8138e4056..eeafc30b207ba123293fcf17c9d840277408a297 100644 (file)
@@ -174,16 +174,4 @@ static struct platform_driver jornada720_kbd_driver = {
        .probe   = jornada720_kbd_probe,
        .remove  = __devexit_p(jornada720_kbd_remove),
 };
-
-static int __init jornada720_kbd_init(void)
-{
-       return platform_driver_register(&jornada720_kbd_driver);
-}
-
-static void __exit jornada720_kbd_exit(void)
-{
-       platform_driver_unregister(&jornada720_kbd_driver);
-}
-
-module_init(jornada720_kbd_init);
-module_exit(jornada720_kbd_exit);
+module_platform_driver(jornada720_kbd_driver);
index 82d1dc8badd5472df8ca2cfd6d90c82a06076be7..21823bfc7911b5284426e17568f25a82b56ad1de 100644 (file)
@@ -545,13 +545,12 @@ static ssize_t lm8323_pwm_store_time(struct device *dev,
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
        struct lm8323_pwm *pwm = cdev_to_pwm(led_cdev);
-       int ret;
-       unsigned long time;
+       int ret, time;
 
-       ret = strict_strtoul(buf, 10, &time);
+       ret = kstrtoint(buf, 10, &time);
        /* Numbers only, please. */
        if (ret)
-               return -EINVAL;
+               return ret;
 
        pwm->fade_time = time;
 
@@ -613,9 +612,9 @@ static ssize_t lm8323_set_disable(struct device *dev,
 {
        struct lm8323_chip *lm = dev_get_drvdata(dev);
        int ret;
-       unsigned long i;
+       unsigned int i;
 
-       ret = strict_strtoul(buf, 10, &i);
+       ret = kstrtouint(buf, 10, &i);
 
        mutex_lock(&lm->lock);
        lm->kp_enabled = !i;
index e2ae657717ea634d7bc799a46f6dbcc0f0fb6371..9b223d73de326a12f9a7279e15d7a85ad4cd8760 100644 (file)
@@ -496,19 +496,7 @@ static struct platform_driver matrix_keypad_driver = {
 #endif
        },
 };
-
-static int __init matrix_keypad_init(void)
-{
-       return platform_driver_register(&matrix_keypad_driver);
-}
-
-static void __exit matrix_keypad_exit(void)
-{
-       platform_driver_unregister(&matrix_keypad_driver);
-}
-
-module_init(matrix_keypad_init);
-module_exit(matrix_keypad_exit);
+module_platform_driver(matrix_keypad_driver);
 
 MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
 MODULE_DESCRIPTION("GPIO Driven Matrix Keypad Driver");
index fcdec5e2b2977269b8718e84bf6da0991b48bb9c..5a71e55c9c54e2c6f532ef70e0cc114b49eeeb67 100644 (file)
@@ -379,7 +379,7 @@ static const struct dev_pm_ops ske_keypad_dev_pm_ops = {
 };
 #endif
 
-struct platform_driver ske_keypad_driver = {
+static struct platform_driver ske_keypad_driver = {
        .driver = {
                .name = "nmk-ske-keypad",
                .owner  = THIS_MODULE,
@@ -390,18 +390,7 @@ struct platform_driver ske_keypad_driver = {
        .probe = ske_keypad_probe,
        .remove = __devexit_p(ske_keypad_remove),
 };
-
-static int __init ske_keypad_init(void)
-{
-       return platform_driver_probe(&ske_keypad_driver, ske_keypad_probe);
-}
-module_init(ske_keypad_init);
-
-static void __exit ske_keypad_exit(void)
-{
-       platform_driver_unregister(&ske_keypad_driver);
-}
-module_exit(ske_keypad_exit);
+module_platform_driver(ske_keypad_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Naveen Kumar <naveen.gaddipati@stericsson.com> / Sundar Iyer <sundar.iyer@stericsson.com>");
index 323bcdfff2484b6d3ddf22fddcb343216c879ee6..6b630d9d3dff13c3651b76a218b32a7e22964754 100644 (file)
@@ -473,20 +473,7 @@ static struct platform_driver omap_kp_driver = {
                .owner  = THIS_MODULE,
        },
 };
-
-static int __init omap_kp_init(void)
-{
-       printk(KERN_INFO "OMAP Keypad Driver\n");
-       return platform_driver_register(&omap_kp_driver);
-}
-
-static void __exit omap_kp_exit(void)
-{
-       platform_driver_unregister(&omap_kp_driver);
-}
-
-module_init(omap_kp_init);
-module_exit(omap_kp_exit);
+module_platform_driver(omap_kp_driver);
 
 MODULE_AUTHOR("Timo Teräs");
 MODULE_DESCRIPTION("OMAP Keypad Driver");
index c51a3c4a7feb52520246c332d93784a25fc70774..d5c5d77f4b82afd1c9e3369007ae8330bcb5abbf 100644 (file)
@@ -335,18 +335,7 @@ static struct platform_driver omap4_keypad_driver = {
                .owner  = THIS_MODULE,
        },
 };
-
-static int __init omap4_keypad_init(void)
-{
-       return platform_driver_register(&omap4_keypad_driver);
-}
-module_init(omap4_keypad_init);
-
-static void __exit omap4_keypad_exit(void)
-{
-       platform_driver_unregister(&omap4_keypad_driver);
-}
-module_exit(omap4_keypad_exit);
+module_platform_driver(omap4_keypad_driver);
 
 MODULE_AUTHOR("Texas Instruments");
 MODULE_DESCRIPTION("OMAP4 Keypad Driver");
index 1f1a5563f60a8b7c367acbd68543d998fb50cf8f..abe728c7b88e103db67b55020fb417e73ac048a3 100644 (file)
@@ -163,18 +163,7 @@ static struct platform_driver opencores_kbd_device_driver = {
                .name = "opencores-kbd",
        },
 };
-
-static int __init opencores_kbd_init(void)
-{
-       return platform_driver_register(&opencores_kbd_device_driver);
-}
-module_init(opencores_kbd_init);
-
-static void __exit opencores_kbd_exit(void)
-{
-       platform_driver_unregister(&opencores_kbd_device_driver);
-}
-module_exit(opencores_kbd_exit);
+module_platform_driver(opencores_kbd_device_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Javier Herrero <jherrero@hvsistemas.es>");
index e7cc51d0fb34adb6834381cffbdc0e9eed94f902..01a1c9f8a383b586d1ea4e18fbb826387a161093 100644 (file)
@@ -780,18 +780,7 @@ static struct platform_driver pmic8xxx_kp_driver = {
                .pm = &pm8xxx_kp_pm_ops,
        },
 };
-
-static int __init pmic8xxx_kp_init(void)
-{
-       return platform_driver_register(&pmic8xxx_kp_driver);
-}
-module_init(pmic8xxx_kp_init);
-
-static void __exit pmic8xxx_kp_exit(void)
-{
-       platform_driver_unregister(&pmic8xxx_kp_driver);
-}
-module_exit(pmic8xxx_kp_exit);
+module_platform_driver(pmic8xxx_kp_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("PMIC8XXX keypad driver");
index eca6ae63de146d8171ac384625c59abea9d8a9af..29fe1b2be1c1f46541fba50815f4bb8d349e8847 100644 (file)
@@ -602,19 +602,7 @@ static struct platform_driver pxa27x_keypad_driver = {
 #endif
        },
 };
-
-static int __init pxa27x_keypad_init(void)
-{
-       return platform_driver_register(&pxa27x_keypad_driver);
-}
-
-static void __exit pxa27x_keypad_exit(void)
-{
-       platform_driver_unregister(&pxa27x_keypad_driver);
-}
-
-module_init(pxa27x_keypad_init);
-module_exit(pxa27x_keypad_exit);
+module_platform_driver(pxa27x_keypad_driver);
 
 MODULE_DESCRIPTION("PXA27x Keypad Controller Driver");
 MODULE_LICENSE("GPL");
index 35451bf780c74483e915b87342ef1b7c88fd28dd..d7f1134b789e666253940beed396bcb7d41001e4 100644 (file)
@@ -195,18 +195,7 @@ static struct platform_driver pxa930_rotary_driver = {
        .probe          = pxa930_rotary_probe,
        .remove         = __devexit_p(pxa930_rotary_remove),
 };
-
-static int __init pxa930_rotary_init(void)
-{
-       return platform_driver_register(&pxa930_rotary_driver);
-}
-module_init(pxa930_rotary_init);
-
-static void __exit pxa930_rotary_exit(void)
-{
-       platform_driver_unregister(&pxa930_rotary_driver);
-}
-module_exit(pxa930_rotary_exit);
+module_platform_driver(pxa930_rotary_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Driver for PXA93x Enhanced Rotary Controller");
index 8a0060cd398277d0fe209fc8c3e7227f14d386e1..17ba7f9f80f37364d8024754971ad6631322b04b 100644 (file)
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
 #include <linux/sched.h>
-#include <plat/keypad.h>
+#include <linux/input/samsung-keypad.h>
 
 #define SAMSUNG_KEYIFCON                       0x00
 #define SAMSUNG_KEYIFSTSCLR                    0x04
@@ -65,10 +67,12 @@ enum samsung_keypad_type {
 
 struct samsung_keypad {
        struct input_dev *input_dev;
+       struct platform_device *pdev;
        struct clk *clk;
        void __iomem *base;
        wait_queue_head_t wait;
        bool stopped;
+       bool wake_enabled;
        int irq;
        enum samsung_keypad_type type;
        unsigned int row_shift;
@@ -155,6 +159,8 @@ static irqreturn_t samsung_keypad_irq(int irq, void *dev_id)
        unsigned int val;
        bool key_down;
 
+       pm_runtime_get_sync(&keypad->pdev->dev);
+
        do {
                val = readl(keypad->base + SAMSUNG_KEYIFSTSCLR);
                /* Clear interrupt. */
@@ -169,6 +175,8 @@ static irqreturn_t samsung_keypad_irq(int irq, void *dev_id)
 
        } while (key_down && !keypad->stopped);
 
+       pm_runtime_put_sync(&keypad->pdev->dev);
+
        return IRQ_HANDLED;
 }
 
@@ -176,6 +184,8 @@ static void samsung_keypad_start(struct samsung_keypad *keypad)
 {
        unsigned int val;
 
+       pm_runtime_get_sync(&keypad->pdev->dev);
+
        /* Tell IRQ thread that it may poll the device. */
        keypad->stopped = false;
 
@@ -188,12 +198,16 @@ static void samsung_keypad_start(struct samsung_keypad *keypad)
 
        /* KEYIFCOL reg clear. */
        writel(0, keypad->base + SAMSUNG_KEYIFCOL);
+
+       pm_runtime_put_sync(&keypad->pdev->dev);
 }
 
 static void samsung_keypad_stop(struct samsung_keypad *keypad)
 {
        unsigned int val;
 
+       pm_runtime_get_sync(&keypad->pdev->dev);
+
        /* Signal IRQ thread to stop polling and disable the handler. */
        keypad->stopped = true;
        wake_up(&keypad->wait);
@@ -214,6 +228,8 @@ static void samsung_keypad_stop(struct samsung_keypad *keypad)
         * re-enable the handler.
         */
        enable_irq(keypad->irq);
+
+       pm_runtime_put_sync(&keypad->pdev->dev);
 }
 
 static int samsung_keypad_open(struct input_dev *input_dev)
@@ -418,9 +434,11 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
        }
 
        keypad->input_dev = input_dev;
+       keypad->pdev = pdev;
        keypad->row_shift = row_shift;
        keypad->rows = pdata->rows;
        keypad->cols = pdata->cols;
+       keypad->stopped = true;
        init_waitqueue_head(&keypad->wait);
 
        if (pdev->dev.of_node) {
@@ -467,13 +485,14 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
                goto err_put_clk;
        }
 
+       device_init_wakeup(&pdev->dev, pdata->wakeup);
+       platform_set_drvdata(pdev, keypad);
+       pm_runtime_enable(&pdev->dev);
+
        error = input_register_device(keypad->input_dev);
        if (error)
                goto err_free_irq;
 
-       device_init_wakeup(&pdev->dev, pdata->wakeup);
-       platform_set_drvdata(pdev, keypad);
-
        if (pdev->dev.of_node) {
                devm_kfree(&pdev->dev, (void *)pdata->keymap_data->keymap);
                devm_kfree(&pdev->dev, (void *)pdata->keymap_data);
@@ -483,6 +502,9 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
 
 err_free_irq:
        free_irq(keypad->irq, keypad);
+       pm_runtime_disable(&pdev->dev);
+       device_init_wakeup(&pdev->dev, 0);
+       platform_set_drvdata(pdev, NULL);
 err_put_clk:
        clk_put(keypad->clk);
        samsung_keypad_dt_gpio_free(keypad);
@@ -499,6 +521,7 @@ static int __devexit samsung_keypad_remove(struct platform_device *pdev)
 {
        struct samsung_keypad *keypad = platform_get_drvdata(pdev);
 
+       pm_runtime_disable(&pdev->dev);
        device_init_wakeup(&pdev->dev, 0);
        platform_set_drvdata(pdev, NULL);
 
@@ -519,11 +542,57 @@ static int __devexit samsung_keypad_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_RUNTIME
+static int samsung_keypad_runtime_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct samsung_keypad *keypad = platform_get_drvdata(pdev);
+       unsigned int val;
+       int error;
+
+       if (keypad->stopped)
+               return 0;
+
+       /* This may fail on some SoCs due to lack of controller support */
+       error = enable_irq_wake(keypad->irq);
+       if (!error)
+               keypad->wake_enabled = true;
+
+       val = readl(keypad->base + SAMSUNG_KEYIFCON);
+       val |= SAMSUNG_KEYIFCON_WAKEUPEN;
+       writel(val, keypad->base + SAMSUNG_KEYIFCON);
+
+       clk_disable(keypad->clk);
+
+       return 0;
+}
+
+static int samsung_keypad_runtime_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct samsung_keypad *keypad = platform_get_drvdata(pdev);
+       unsigned int val;
+
+       if (keypad->stopped)
+               return 0;
+
+       clk_enable(keypad->clk);
+
+       val = readl(keypad->base + SAMSUNG_KEYIFCON);
+       val &= ~SAMSUNG_KEYIFCON_WAKEUPEN;
+       writel(val, keypad->base + SAMSUNG_KEYIFCON);
+
+       if (keypad->wake_enabled)
+               disable_irq_wake(keypad->irq);
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
 static void samsung_keypad_toggle_wakeup(struct samsung_keypad *keypad,
                                         bool enable)
 {
-       struct device *dev = keypad->input_dev->dev.parent;
        unsigned int val;
 
        clk_enable(keypad->clk);
@@ -531,11 +600,11 @@ static void samsung_keypad_toggle_wakeup(struct samsung_keypad *keypad,
        val = readl(keypad->base + SAMSUNG_KEYIFCON);
        if (enable) {
                val |= SAMSUNG_KEYIFCON_WAKEUPEN;
-               if (device_may_wakeup(dev))
+               if (device_may_wakeup(&keypad->pdev->dev))
                        enable_irq_wake(keypad->irq);
        } else {
                val &= ~SAMSUNG_KEYIFCON_WAKEUPEN;
-               if (device_may_wakeup(dev))
+               if (device_may_wakeup(&keypad->pdev->dev))
                        disable_irq_wake(keypad->irq);
        }
        writel(val, keypad->base + SAMSUNG_KEYIFCON);
@@ -578,12 +647,13 @@ static int samsung_keypad_resume(struct device *dev)
 
        return 0;
 }
+#endif
 
 static const struct dev_pm_ops samsung_keypad_pm_ops = {
-       .suspend        = samsung_keypad_suspend,
-       .resume         = samsung_keypad_resume,
+       SET_SYSTEM_SLEEP_PM_OPS(samsung_keypad_suspend, samsung_keypad_resume)
+       SET_RUNTIME_PM_OPS(samsung_keypad_runtime_suspend,
+                          samsung_keypad_runtime_resume, NULL)
 };
-#endif
 
 #ifdef CONFIG_OF
 static const struct of_device_id samsung_keypad_dt_match[] = {
@@ -615,27 +685,13 @@ static struct platform_driver samsung_keypad_driver = {
                .name   = "samsung-keypad",
                .owner  = THIS_MODULE,
                .of_match_table = samsung_keypad_dt_match,
-#ifdef CONFIG_PM
                .pm     = &samsung_keypad_pm_ops,
-#endif
        },
        .id_table       = samsung_keypad_driver_ids,
 };
-
-static int __init samsung_keypad_init(void)
-{
-       return platform_driver_register(&samsung_keypad_driver);
-}
-module_init(samsung_keypad_init);
-
-static void __exit samsung_keypad_exit(void)
-{
-       platform_driver_unregister(&samsung_keypad_driver);
-}
-module_exit(samsung_keypad_exit);
+module_platform_driver(samsung_keypad_driver);
 
 MODULE_DESCRIPTION("Samsung keypad driver");
 MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
 MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:samsung-keypad");
index 934aeb583b3035205e8c141573cbce685c812db9..da54ad5db154bf8065549779dfba335ef9088d66 100644 (file)
@@ -337,19 +337,7 @@ static struct platform_driver sh_keysc_device_driver = {
                .pm     = &sh_keysc_dev_pm_ops,
        }
 };
-
-static int __init sh_keysc_init(void)
-{
-       return platform_driver_register(&sh_keysc_device_driver);
-}
-
-static void __exit sh_keysc_exit(void)
-{
-       platform_driver_unregister(&sh_keysc_device_driver);
-}
-
-module_init(sh_keysc_init);
-module_exit(sh_keysc_exit);
+module_platform_driver(sh_keysc_device_driver);
 
 MODULE_AUTHOR("Magnus Damm");
 MODULE_DESCRIPTION("SuperH KEYSC Keypad Driver");
index d712dffd215712ee9bb419d7df062d066dcf1eac..c88bd63dc9ccc0f582fde988ce129609b7477556 100644 (file)
@@ -326,18 +326,7 @@ static struct platform_driver spear_kbd_driver = {
 #endif
        },
 };
-
-static int __init spear_kbd_init(void)
-{
-       return platform_driver_register(&spear_kbd_driver);
-}
-module_init(spear_kbd_init);
-
-static void __exit spear_kbd_exit(void)
-{
-       platform_driver_unregister(&spear_kbd_driver);
-}
-module_exit(spear_kbd_exit);
+module_platform_driver(spear_kbd_driver);
 
 MODULE_AUTHOR("Rajeev Kumar");
 MODULE_DESCRIPTION("SPEAr Keyboard Driver");
index ab7610ca10ebebd92817b641e0d4e77b1ddd2267..9397cf9c625cc8fb68514f9d9263b4385773dd71 100644 (file)
@@ -368,18 +368,7 @@ static struct platform_driver stmpe_keypad_driver = {
        .probe          = stmpe_keypad_probe,
        .remove         = __devexit_p(stmpe_keypad_remove),
 };
-
-static int __init stmpe_keypad_init(void)
-{
-       return platform_driver_register(&stmpe_keypad_driver);
-}
-module_init(stmpe_keypad_init);
-
-static void __exit stmpe_keypad_exit(void)
-{
-       platform_driver_unregister(&stmpe_keypad_driver);
-}
-module_exit(stmpe_keypad_exit);
+module_platform_driver(stmpe_keypad_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("STMPExxxx keypad driver");
index f60c9e82f2043b2ed4d9adbfdccaa5afa76ace36..2dee3e4e7c6f1a8f337ea4739fe69ab0cb7e7b55 100644 (file)
 
 /**
  * struct tc_keypad - data structure used by keypad driver
+ * @tc3589x:    pointer to tc35893
  * @input:      pointer to input device object
  * @board:      keypad platform device
  * @krow:      number of rows
  * @kcol:      number of coloumns
  * @keymap:     matrix scan code table for keycodes
+ * @keypad_stopped: holds keypad status
  */
 struct tc_keypad {
        struct tc3589x *tc3589x;
@@ -453,18 +455,7 @@ static struct platform_driver tc3589x_keypad_driver = {
        .probe  = tc3589x_keypad_probe,
        .remove = __devexit_p(tc3589x_keypad_remove),
 };
-
-static int __init tc3589x_keypad_init(void)
-{
-       return platform_driver_register(&tc3589x_keypad_driver);
-}
-module_init(tc3589x_keypad_init);
-
-static void __exit tc3589x_keypad_exit(void)
-{
-       return platform_driver_unregister(&tc3589x_keypad_driver);
-}
-module_exit(tc3589x_keypad_exit);
+module_platform_driver(tc3589x_keypad_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Jayeeta Banerjee/Sundar Iyer");
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
new file mode 100644 (file)
index 0000000..958ec10
--- /dev/null
@@ -0,0 +1,430 @@
+/*
+ * Driver for TCA8418 I2C keyboard
+ *
+ * Copyright (C) 2011 Fuel7, Inc.  All rights reserved.
+ *
+ * Author: Kyle Manna <kyle.manna@fuel7.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * If you can't comply with GPLv2, alternative licensing terms may be
+ * arranged. Please contact Fuel7, Inc. (http://fuel7.com/) for proprietary
+ * alternative licensing inquiries.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/tca8418_keypad.h>
+
+/* TCA8418 hardware limits */
+#define TCA8418_MAX_ROWS       8
+#define TCA8418_MAX_COLS       10
+
+/* TCA8418 register offsets */
+#define REG_CFG                        0x01
+#define REG_INT_STAT           0x02
+#define REG_KEY_LCK_EC         0x03
+#define REG_KEY_EVENT_A                0x04
+#define REG_KEY_EVENT_B                0x05
+#define REG_KEY_EVENT_C                0x06
+#define REG_KEY_EVENT_D                0x07
+#define REG_KEY_EVENT_E                0x08
+#define REG_KEY_EVENT_F                0x09
+#define REG_KEY_EVENT_G                0x0A
+#define REG_KEY_EVENT_H                0x0B
+#define REG_KEY_EVENT_I                0x0C
+#define REG_KEY_EVENT_J                0x0D
+#define REG_KP_LCK_TIMER       0x0E
+#define REG_UNLOCK1            0x0F
+#define REG_UNLOCK2            0x10
+#define REG_GPIO_INT_STAT1     0x11
+#define REG_GPIO_INT_STAT2     0x12
+#define REG_GPIO_INT_STAT3     0x13
+#define REG_GPIO_DAT_STAT1     0x14
+#define REG_GPIO_DAT_STAT2     0x15
+#define REG_GPIO_DAT_STAT3     0x16
+#define REG_GPIO_DAT_OUT1      0x17
+#define REG_GPIO_DAT_OUT2      0x18
+#define REG_GPIO_DAT_OUT3      0x19
+#define REG_GPIO_INT_EN1       0x1A
+#define REG_GPIO_INT_EN2       0x1B
+#define REG_GPIO_INT_EN3       0x1C
+#define REG_KP_GPIO1           0x1D
+#define REG_KP_GPIO2           0x1E
+#define REG_KP_GPIO3           0x1F
+#define REG_GPI_EM1            0x20
+#define REG_GPI_EM2            0x21
+#define REG_GPI_EM3            0x22
+#define REG_GPIO_DIR1          0x23
+#define REG_GPIO_DIR2          0x24
+#define REG_GPIO_DIR3          0x25
+#define REG_GPIO_INT_LVL1      0x26
+#define REG_GPIO_INT_LVL2      0x27
+#define REG_GPIO_INT_LVL3      0x28
+#define REG_DEBOUNCE_DIS1      0x29
+#define REG_DEBOUNCE_DIS2      0x2A
+#define REG_DEBOUNCE_DIS3      0x2B
+#define REG_GPIO_PULL1         0x2C
+#define REG_GPIO_PULL2         0x2D
+#define REG_GPIO_PULL3         0x2E
+
+/* TCA8418 bit definitions */
+#define CFG_AI                 BIT(7)
+#define CFG_GPI_E_CFG          BIT(6)
+#define CFG_OVR_FLOW_M         BIT(5)
+#define CFG_INT_CFG            BIT(4)
+#define CFG_OVR_FLOW_IEN       BIT(3)
+#define CFG_K_LCK_IEN          BIT(2)
+#define CFG_GPI_IEN            BIT(1)
+#define CFG_KE_IEN             BIT(0)
+
+#define INT_STAT_CAD_INT       BIT(4)
+#define INT_STAT_OVR_FLOW_INT  BIT(3)
+#define INT_STAT_K_LCK_INT     BIT(2)
+#define INT_STAT_GPI_INT       BIT(1)
+#define INT_STAT_K_INT         BIT(0)
+
+/* TCA8418 register masks */
+#define KEY_LCK_EC_KEC         0x7
+#define KEY_EVENT_CODE         0x7f
+#define KEY_EVENT_VALUE                0x80
+
+
+static const struct i2c_device_id tca8418_id[] = {
+       { TCA8418_NAME, 8418, },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, tca8418_id);
+
+struct tca8418_keypad {
+       unsigned int rows;
+       unsigned int cols;
+       unsigned int keypad_mask; /* Mask for keypad col/rol regs */
+       unsigned int irq;
+       unsigned int row_shift;
+
+       struct i2c_client *client;
+       struct input_dev *input;
+
+       /* Flexible array member, must be at end of struct */
+       unsigned short keymap[];
+};
+
+/*
+ * Write a byte to the TCA8418
+ */
+static int tca8418_write_byte(struct tca8418_keypad *keypad_data,
+                             int reg, u8 val)
+{
+       int error;
+
+       error = i2c_smbus_write_byte_data(keypad_data->client, reg, val);
+       if (error < 0) {
+               dev_err(&keypad_data->client->dev,
+                       "%s failed, reg: %d, val: %d, error: %d\n",
+                       __func__, reg, val, error);
+               return error;
+       }
+
+       return 0;
+}
+
+/*
+ * Read a byte from the TCA8418
+ */
+static int tca8418_read_byte(struct tca8418_keypad *keypad_data,
+                            int reg, u8 *val)
+{
+       int error;
+
+       error = i2c_smbus_read_byte_data(keypad_data->client, reg);
+       if (error < 0) {
+               dev_err(&keypad_data->client->dev,
+                               "%s failed, reg: %d, error: %d\n",
+                               __func__, reg, error);
+               return error;
+       }
+
+       *val = (u8)error;
+
+       return 0;
+}
+
+static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
+{
+       int error, col, row;
+       u8 reg, state, code;
+
+       /* Initial read of the key event FIFO */
+       error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+
+       /* Assume that key code 0 signifies empty FIFO */
+       while (error >= 0 && reg > 0) {
+               state = reg & KEY_EVENT_VALUE;
+               code  = reg & KEY_EVENT_CODE;
+
+               row = code / TCA8418_MAX_COLS;
+               col = code % TCA8418_MAX_COLS;
+
+               row = (col) ? row : row - 1;
+               col = (col) ? col - 1 : TCA8418_MAX_COLS - 1;
+
+               code = MATRIX_SCAN_CODE(row, col, keypad_data->row_shift);
+               input_event(keypad_data->input, EV_MSC, MSC_SCAN, code);
+               input_report_key(keypad_data->input,
+                               keypad_data->keymap[code], state);
+
+               /* Read for next loop */
+               error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+       }
+
+       if (error < 0)
+               dev_err(&keypad_data->client->dev,
+                       "unable to read REG_KEY_EVENT_A\n");
+
+       input_sync(keypad_data->input);
+}
+
+/*
+ * Threaded IRQ handler and this can (and will) sleep.
+ */
+static irqreturn_t tca8418_irq_handler(int irq, void *dev_id)
+{
+       struct tca8418_keypad *keypad_data = dev_id;
+       u8 reg;
+       int error;
+
+       error = tca8418_read_byte(keypad_data, REG_INT_STAT, &reg);
+       if (error) {
+               dev_err(&keypad_data->client->dev,
+                       "unable to read REG_INT_STAT\n");
+               goto exit;
+       }
+
+       if (reg & INT_STAT_OVR_FLOW_INT)
+               dev_warn(&keypad_data->client->dev, "overflow occurred\n");
+
+       if (reg & INT_STAT_K_INT)
+               tca8418_read_keypad(keypad_data);
+
+exit:
+       /* Clear all interrupts, even IRQs we didn't check (GPI, CAD, LCK) */
+       reg = 0xff;
+       error = tca8418_write_byte(keypad_data, REG_INT_STAT, reg);
+       if (error)
+               dev_err(&keypad_data->client->dev,
+                       "unable to clear REG_INT_STAT\n");
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * Configure the TCA8418 for keypad operation
+ */
+static int __devinit tca8418_configure(struct tca8418_keypad *keypad_data)
+{
+       int reg, error;
+
+       /* Write config register, if this fails assume device not present */
+       error = tca8418_write_byte(keypad_data, REG_CFG,
+                               CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
+       if (error < 0)
+               return -ENODEV;
+
+
+       /* Assemble a mask for row and column registers */
+       reg  =  ~(~0 << keypad_data->rows);
+       reg += (~(~0 << keypad_data->cols)) << 8;
+       keypad_data->keypad_mask = reg;
+
+       /* Set registers to keypad mode */
+       error |= tca8418_write_byte(keypad_data, REG_KP_GPIO1, reg);
+       error |= tca8418_write_byte(keypad_data, REG_KP_GPIO2, reg >> 8);
+       error |= tca8418_write_byte(keypad_data, REG_KP_GPIO3, reg >> 16);
+
+       /* Enable column debouncing */
+       error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS1, reg);
+       error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8);
+       error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16);
+
+       return error;
+}
+
+static int __devinit tca8418_keypad_probe(struct i2c_client *client,
+                                         const struct i2c_device_id *id)
+{
+       const struct tca8418_keypad_platform_data *pdata =
+                                               client->dev.platform_data;
+       struct tca8418_keypad *keypad_data;
+       struct input_dev *input;
+       int error, row_shift, max_keys;
+
+       /* Copy the platform data */
+       if (!pdata) {
+               dev_dbg(&client->dev, "no platform data\n");
+               return -EINVAL;
+       }
+
+       if (!pdata->keymap_data) {
+               dev_err(&client->dev, "no keymap data defined\n");
+               return -EINVAL;
+       }
+
+       if (!pdata->rows || pdata->rows > TCA8418_MAX_ROWS) {
+               dev_err(&client->dev, "invalid rows\n");
+               return -EINVAL;
+       }
+
+       if (!pdata->cols || pdata->cols > TCA8418_MAX_COLS) {
+               dev_err(&client->dev, "invalid columns\n");
+               return -EINVAL;
+       }
+
+       /* Check i2c driver capabilities */
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
+               dev_err(&client->dev, "%s adapter not supported\n",
+                       dev_driver_string(&client->adapter->dev));
+               return -ENODEV;
+       }
+
+       row_shift = get_count_order(pdata->cols);
+       max_keys = pdata->rows << row_shift;
+
+       /* Allocate memory for keypad_data, keymap and input device */
+       keypad_data = kzalloc(sizeof(*keypad_data) +
+                       max_keys * sizeof(keypad_data->keymap[0]), GFP_KERNEL);
+       if (!keypad_data)
+               return -ENOMEM;
+
+       keypad_data->rows = pdata->rows;
+       keypad_data->cols = pdata->cols;
+       keypad_data->client = client;
+       keypad_data->row_shift = row_shift;
+
+       /* Initialize the chip or fail if chip isn't present */
+       error = tca8418_configure(keypad_data);
+       if (error < 0)
+               goto fail1;
+
+       /* Configure input device */
+       input = input_allocate_device();
+       if (!input) {
+               error = -ENOMEM;
+               goto fail1;
+       }
+       keypad_data->input = input;
+
+       input->name = client->name;
+       input->dev.parent = &client->dev;
+
+       input->id.bustype = BUS_I2C;
+       input->id.vendor  = 0x0001;
+       input->id.product = 0x001;
+       input->id.version = 0x0001;
+
+       input->keycode     = keypad_data->keymap;
+       input->keycodesize = sizeof(keypad_data->keymap[0]);
+       input->keycodemax  = max_keys;
+
+       __set_bit(EV_KEY, input->evbit);
+       if (pdata->rep)
+               __set_bit(EV_REP, input->evbit);
+
+       input_set_capability(input, EV_MSC, MSC_SCAN);
+
+       input_set_drvdata(input, keypad_data);
+
+       matrix_keypad_build_keymap(pdata->keymap_data, row_shift,
+                       input->keycode, input->keybit);
+
+       if (pdata->irq_is_gpio)
+               client->irq = gpio_to_irq(client->irq);
+
+       error = request_threaded_irq(client->irq, NULL, tca8418_irq_handler,
+                                    IRQF_TRIGGER_FALLING,
+                                    client->name, keypad_data);
+       if (error) {
+               dev_dbg(&client->dev,
+                       "Unable to claim irq %d; error %d\n",
+                       client->irq, error);
+               goto fail2;
+       }
+
+       error = input_register_device(input);
+       if (error) {
+               dev_dbg(&client->dev,
+                       "Unable to register input device, error: %d\n", error);
+               goto fail3;
+       }
+
+       i2c_set_clientdata(client, keypad_data);
+       return 0;
+
+fail3:
+       free_irq(client->irq, keypad_data);
+fail2:
+       input_free_device(input);
+fail1:
+       kfree(keypad_data);
+       return error;
+}
+
+static int __devexit tca8418_keypad_remove(struct i2c_client *client)
+{
+       struct tca8418_keypad *keypad_data = i2c_get_clientdata(client);
+
+       free_irq(keypad_data->client->irq, keypad_data);
+
+       input_unregister_device(keypad_data->input);
+
+       kfree(keypad_data);
+
+       return 0;
+}
+
+
+static struct i2c_driver tca8418_keypad_driver = {
+       .driver = {
+               .name   = TCA8418_NAME,
+               .owner  = THIS_MODULE,
+       },
+       .probe          = tca8418_keypad_probe,
+       .remove         = __devexit_p(tca8418_keypad_remove),
+       .id_table       = tca8418_id,
+};
+
+static int __init tca8418_keypad_init(void)
+{
+       return i2c_add_driver(&tca8418_keypad_driver);
+}
+subsys_initcall(tca8418_keypad_init);
+
+static void __exit tca8418_keypad_exit(void)
+{
+       i2c_del_driver(&tca8418_keypad_driver);
+}
+module_exit(tca8418_keypad_exit);
+
+MODULE_AUTHOR("Kyle Manna <kyle.manna@fuel7.com>");
+MODULE_DESCRIPTION("Keypad driver for TCA8418");
+MODULE_LICENSE("GPL");
index cf3228b0ab909a253c48f91a770172ae55acea1c..a136e2e832beff1e740c8c9a79ad850b01fe14c2 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/interrupt.h>
+#include <linux/of.h>
 #include <linux/clk.h>
 #include <linux/slab.h>
 #include <mach/clk.h>
@@ -52,6 +53,7 @@
 /* KBC Interrupt Register */
 #define KBC_INT_0      0x4
 #define KBC_INT_FIFO_CNT_INT_STATUS    (1 << 2)
+#define KBC_INT_KEYPRESS_INT_STATUS    (1 << 0)
 
 #define KBC_ROW_CFG0_0 0x8
 #define KBC_COL_CFG0_0 0x18
@@ -74,15 +76,17 @@ struct tegra_kbc {
        unsigned int cp_to_wkup_dly;
        bool use_fn_map;
        bool use_ghost_filter;
+       bool keypress_caused_wake;
        const struct tegra_kbc_platform_data *pdata;
        unsigned short keycode[KBC_MAX_KEY * 2];
        unsigned short current_keys[KBC_MAX_KPENT];
        unsigned int num_pressed_keys;
+       u32 wakeup_key;
        struct timer_list timer;
        struct clk *clk;
 };
 
-static const u32 tegra_kbc_default_keymap[] = {
+static const u32 tegra_kbc_default_keymap[] __devinitdata = {
        KEY(0, 2, KEY_W),
        KEY(0, 3, KEY_S),
        KEY(0, 4, KEY_A),
@@ -217,7 +221,8 @@ static const u32 tegra_kbc_default_keymap[] = {
        KEY(31, 4, KEY_HELP),
 };
 
-static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
+static const
+struct matrix_keymap_data tegra_kbc_default_keymap_data __devinitdata = {
        .keymap         = tegra_kbc_default_keymap,
        .keymap_size    = ARRAY_SIZE(tegra_kbc_default_keymap),
 };
@@ -409,6 +414,9 @@ static irqreturn_t tegra_kbc_isr(int irq, void *args)
                 */
                tegra_kbc_set_fifo_interrupt(kbc, false);
                mod_timer(&kbc->timer, jiffies + kbc->cp_dly_jiffies);
+       } else if (val & KBC_INT_KEYPRESS_INT_STATUS) {
+               /* We can be here only through system resume path */
+               kbc->keypress_caused_wake = true;
        }
 
        spin_unlock_irqrestore(&kbc->lock, flags);
@@ -576,6 +584,56 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
        return true;
 }
 
+#ifdef CONFIG_OF
+static struct tegra_kbc_platform_data * __devinit
+tegra_kbc_dt_parse_pdata(struct platform_device *pdev)
+{
+       struct tegra_kbc_platform_data *pdata;
+       struct device_node *np = pdev->dev.of_node;
+
+       if (!np)
+               return NULL;
+
+       pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return NULL;
+
+       if (!of_property_read_u32(np, "debounce-delay", &prop))
+               pdata->debounce_cnt = prop;
+
+       if (!of_property_read_u32(np, "repeat-delay", &prop))
+               pdata->repeat_cnt = prop;
+
+       if (of_find_property(np, "needs-ghost-filter", NULL))
+               pdata->use_ghost_filter = true;
+
+       if (of_find_property(np, "wakeup-source", NULL))
+               pdata->wakeup = true;
+
+       /*
+        * All currently known keymaps with device tree support use the same
+        * pin_cfg, so set it up here.
+        */
+       for (i = 0; i < KBC_MAX_ROW; i++) {
+               pdata->pin_cfg[i].num = i;
+               pdata->pin_cfg[i].is_row = true;
+       }
+
+       for (i = 0; i < KBC_MAX_COL; i++) {
+               pdata->pin_cfg[KBC_MAX_ROW + i].num = i;
+               pdata->pin_cfg[KBC_MAX_ROW + i].is_row = false;
+       }
+
+       return pdata;
+}
+#else
+static inline struct tegra_kbc_platform_data *tegra_kbc_dt_parse_pdata(
+       struct platform_device *pdev)
+{
+       return NULL;
+}
+#endif
+
 static int __devinit tegra_kbc_probe(struct platform_device *pdev)
 {
        const struct tegra_kbc_platform_data *pdata = pdev->dev.platform_data;
@@ -590,21 +648,28 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
        unsigned int scan_time_rows;
 
        if (!pdata)
-               return -EINVAL;
+               pdata = tegra_kbc_dt_parse_pdata(pdev);
 
-       if (!tegra_kbc_check_pin_cfg(pdata, &pdev->dev, &num_rows))
+       if (!pdata)
                return -EINVAL;
 
+       if (!tegra_kbc_check_pin_cfg(pdata, &pdev->dev, &num_rows)) {
+               err = -EINVAL;
+               goto err_free_pdata;
+       }
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                dev_err(&pdev->dev, "failed to get I/O memory\n");
-               return -ENXIO;
+               err = -ENXIO;
+               goto err_free_pdata;
        }
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                dev_err(&pdev->dev, "failed to get keyboard IRQ\n");
-               return -ENXIO;
+               err = -ENXIO;
+               goto err_free_pdata;
        }
 
        kbc = kzalloc(sizeof(*kbc), GFP_KERNEL);
@@ -674,9 +739,10 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
        keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
        matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
                                   input_dev->keycode, input_dev->keybit);
+       kbc->wakeup_key = pdata->wakeup_key;
 
-       err = request_irq(kbc->irq, tegra_kbc_isr, IRQF_TRIGGER_HIGH,
-                         pdev->name, kbc);
+       err = request_irq(kbc->irq, tegra_kbc_isr,
+                         IRQF_NO_SUSPEND | IRQF_TRIGGER_HIGH, pdev->name, kbc);
        if (err) {
                dev_err(&pdev->dev, "failed to request keyboard IRQ\n");
                goto err_put_clk;
@@ -706,6 +772,9 @@ err_free_mem_region:
 err_free_mem:
        input_free_device(input_dev);
        kfree(kbc);
+err_free_pdata:
+       if (!pdev->dev.platform_data)
+               kfree(pdata);
 
        return err;
 }
@@ -715,6 +784,8 @@ static int __devexit tegra_kbc_remove(struct platform_device *pdev)
        struct tegra_kbc *kbc = platform_get_drvdata(pdev);
        struct resource *res;
 
+       platform_set_drvdata(pdev, NULL);
+
        free_irq(kbc->irq, pdev);
        clk_put(kbc->clk);
 
@@ -723,9 +794,14 @@ static int __devexit tegra_kbc_remove(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        release_mem_region(res->start, resource_size(res));
 
-       kfree(kbc);
+       /*
+        * If we do not have platform data attached to the device we
+        * allocated it ourselves and thus need to free it.
+        */
+       if (!pdev->dev.platform_data)
+               kfree(kbc->pdata);
 
-       platform_set_drvdata(pdev, NULL);
+       kfree(kbc);
 
        return 0;
 }
@@ -754,6 +830,8 @@ static int tegra_kbc_suspend(struct device *dev)
                tegra_kbc_setup_wakekeys(kbc, true);
                msleep(30);
 
+               kbc->keypress_caused_wake = false;
+               enable_irq(kbc->irq);
                enable_irq_wake(kbc->irq);
        } else {
                if (kbc->idev->users)
@@ -780,7 +858,19 @@ static int tegra_kbc_resume(struct device *dev)
 
                tegra_kbc_set_fifo_interrupt(kbc, true);
 
-               enable_irq(kbc->irq);
+               if (kbc->keypress_caused_wake && kbc->wakeup_key) {
+                       /*
+                        * We can't report events directly from the ISR
+                        * because timekeeping is stopped when processing
+                        * wakeup request and we get a nasty warning when
+                        * we try to call do_gettimeofday() in evdev
+                        * handler.
+                        */
+                       input_report_key(kbc->idev, kbc->wakeup_key, 1);
+                       input_sync(kbc->idev);
+                       input_report_key(kbc->idev, kbc->wakeup_key, 0);
+                       input_sync(kbc->idev);
+               }
        } else {
                if (kbc->idev->users)
                        err = tegra_kbc_start(kbc);
@@ -793,6 +883,12 @@ static int tegra_kbc_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(tegra_kbc_pm_ops, tegra_kbc_suspend, tegra_kbc_resume);
 
+static const struct of_device_id tegra_kbc_of_match[] = {
+       { .compatible = "nvidia,tegra20-kbc", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, tegra_kbc_of_match);
+
 static struct platform_driver tegra_kbc_driver = {
        .probe          = tegra_kbc_probe,
        .remove         = __devexit_p(tegra_kbc_remove),
@@ -800,20 +896,10 @@ static struct platform_driver tegra_kbc_driver = {
                .name   = "tegra-kbc",
                .owner  = THIS_MODULE,
                .pm     = &tegra_kbc_pm_ops,
+               .of_match_table = tegra_kbc_of_match,
        },
 };
-
-static void __exit tegra_kbc_exit(void)
-{
-       platform_driver_unregister(&tegra_kbc_driver);
-}
-module_exit(tegra_kbc_exit);
-
-static int __init tegra_kbc_init(void)
-{
-       return platform_driver_register(&tegra_kbc_driver);
-}
-module_init(tegra_kbc_init);
+module_platform_driver(tegra_kbc_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Rakesh Iyer <riyer@nvidia.com>");
index 66e55e5cfdd65d5d47d33616d5d2fff8f1413ea4..fb39c94b6fddeddc336680ede342e8189277ebba 100644 (file)
@@ -322,19 +322,7 @@ static struct platform_driver keypad_driver = {
        .driver.name    = "tnetv107x-keypad",
        .driver.owner   = THIS_MODULE,
 };
-
-static int __init keypad_init(void)
-{
-       return platform_driver_register(&keypad_driver);
-}
-
-static void __exit keypad_exit(void)
-{
-       platform_driver_unregister(&keypad_driver);
-}
-
-module_init(keypad_init);
-module_exit(keypad_exit);
+module_platform_driver(keypad_driver);
 
 MODULE_AUTHOR("Cyril Chemparathy");
 MODULE_DESCRIPTION("TNETV107X Keypad Driver");
index a26922cf0e84d3049d972b7dc0d79b986b3645fb..a588578037ebe7bb05481c221839a54287e6910b 100644 (file)
@@ -460,18 +460,7 @@ static struct platform_driver twl4030_kp_driver = {
                .owner  = THIS_MODULE,
        },
 };
-
-static int __init twl4030_kp_init(void)
-{
-       return platform_driver_register(&twl4030_kp_driver);
-}
-module_init(twl4030_kp_init);
-
-static void __exit twl4030_kp_exit(void)
-{
-       platform_driver_unregister(&twl4030_kp_driver);
-}
-module_exit(twl4030_kp_exit);
+module_platform_driver(twl4030_kp_driver);
 
 MODULE_AUTHOR("Texas Instruments");
 MODULE_DESCRIPTION("TWL4030 Keypad Driver");
index 318586dadacfb2582ea3317e25b62db7146e5ad7..99bbb7e775ae10ac3566df1ec9a1b23ae1d349ad 100644 (file)
@@ -262,19 +262,7 @@ static struct platform_driver w90p910_keypad_driver = {
                .owner  = THIS_MODULE,
        },
 };
-
-static int __init w90p910_keypad_init(void)
-{
-       return platform_driver_register(&w90p910_keypad_driver);
-}
-
-static void __exit w90p910_keypad_exit(void)
-{
-       platform_driver_unregister(&w90p910_keypad_driver);
-}
-
-module_init(w90p910_keypad_init);
-module_exit(w90p910_keypad_exit);
+module_platform_driver(w90p910_keypad_driver);
 
 MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
 MODULE_DESCRIPTION("w90p910 keypad driver");
index 3dca3c14510e055be3925d9552cd79e181ef91cd..f2e0cbc5ab646c76267731f1a1280041ba50c1ca 100644 (file)
@@ -137,18 +137,7 @@ static struct platform_driver pm860x_onkey_driver = {
        .probe          = pm860x_onkey_probe,
        .remove         = __devexit_p(pm860x_onkey_remove),
 };
-
-static int __init pm860x_onkey_init(void)
-{
-       return platform_driver_register(&pm860x_onkey_driver);
-}
-module_init(pm860x_onkey_init);
-
-static void __exit pm860x_onkey_exit(void)
-{
-       platform_driver_unregister(&pm860x_onkey_driver);
-}
-module_exit(pm860x_onkey_exit);
+module_platform_driver(pm860x_onkey_driver);
 
 MODULE_DESCRIPTION("Marvell 88PM860x ONKEY driver");
 MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
index 22d875fde53a229f9ff8278f396a632534564849..7b46781c30c9cdf10ad3b392e6b730d6c5907566 100644 (file)
@@ -179,6 +179,31 @@ config INPUT_APANEL
         To compile this driver as a module, choose M here: the module will
         be called apanel.
 
+config INPUT_GP2A
+       tristate "Sharp GP2AP002A00F I2C Proximity/Opto sensor driver"
+       depends on I2C
+       depends on GENERIC_GPIO
+       help
+         Say Y here if you have a Sharp GP2AP002A00F proximity/als combo-chip
+         hooked to an I2C bus.
+
+         To compile this driver as a module, choose M here: the
+         module will be called gp2ap002a00f.
+
+config INPUT_GPIO_TILT_POLLED
+       tristate "Polled GPIO tilt switch"
+       depends on GENERIC_GPIO
+       select INPUT_POLLDEV
+       help
+         This driver implements support for tilt switches connected
+         to GPIO pins that are not capable of generating interrupts.
+
+         The list of gpios to use and the mapping of their states
+         to specific angles is done via platform data.
+
+         To compile this driver as a module, choose M here: the
+         module will be called gpio_tilt_polled.
+
 config INPUT_IXP4XX_BEEPER
        tristate "IXP4XX Beeper support"
        depends on ARCH_IXP4XX
index a244fc6a781cbafebb5043359f5ba1033a1518d2..46671a875b91ba7d932de842e0b55d97862be83c 100644 (file)
@@ -22,6 +22,8 @@ obj-$(CONFIG_INPUT_CMA3000)           += cma3000_d0x.o
 obj-$(CONFIG_INPUT_CMA3000_I2C)                += cma3000_d0x_i2c.o
 obj-$(CONFIG_INPUT_COBALT_BTNS)                += cobalt_btns.o
 obj-$(CONFIG_INPUT_DM355EVM)           += dm355evm_keys.o
+obj-$(CONFIG_INPUT_GP2A)               += gp2ap002a00f.o
+obj-$(CONFIG_INPUT_GPIO_TILT_POLLED)   += gpio_tilt_polled.o
 obj-$(CONFIG_HP_SDC_RTC)               += hp_sdc_rtc.o
 obj-$(CONFIG_INPUT_IXP4XX_BEEPER)      += ixp4xx-beeper.o
 obj-$(CONFIG_INPUT_KEYSPAN_REMOTE)     += keyspan_remote.o
index 3d3288a78fdc66d4fcbf71f26f518e19581e45c9..79d9016336355a4885681ca68a2a0249b3b5894e 100644 (file)
@@ -139,18 +139,7 @@ static struct platform_driver ab8500_ponkey_driver = {
        .probe          = ab8500_ponkey_probe,
        .remove         = __devexit_p(ab8500_ponkey_remove),
 };
-
-static int __init ab8500_ponkey_init(void)
-{
-       return platform_driver_register(&ab8500_ponkey_driver);
-}
-module_init(ab8500_ponkey_init);
-
-static void __exit ab8500_ponkey_exit(void)
-{
-       platform_driver_unregister(&ab8500_ponkey_driver);
-}
-module_exit(ab8500_ponkey_exit);
+module_platform_driver(ab8500_ponkey_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Sundar Iyer <sundar.iyer@stericsson.com>");
index f29de22fdda054b7fed385ea0abe957e2cda5570..34d401efd4a168f0364bf48f9eafc7a2a3a2f5a9 100644 (file)
@@ -122,7 +122,6 @@ static SIMPLE_DEV_PM_OPS(adxl34x_spi_pm, adxl34x_spi_suspend,
 static struct spi_driver adxl34x_driver = {
        .driver = {
                .name = "adxl34x",
-               .bus = &spi_bus_type,
                .owner = THIS_MODULE,
                .pm = &adxl34x_spi_pm,
        },
index 09244804fb97674adfd9d39b28fc94c05bf651a3..1cf72fe513e6934af7fb4ab0d5742c3155537146 100644 (file)
@@ -452,10 +452,10 @@ static ssize_t adxl34x_disable_store(struct device *dev,
                                     const char *buf, size_t count)
 {
        struct adxl34x *ac = dev_get_drvdata(dev);
-       unsigned long val;
+       unsigned int val;
        int error;
 
-       error = strict_strtoul(buf, 10, &val);
+       error = kstrtouint(buf, 10, &val);
        if (error)
                return error;
 
@@ -541,10 +541,10 @@ static ssize_t adxl34x_rate_store(struct device *dev,
                                  const char *buf, size_t count)
 {
        struct adxl34x *ac = dev_get_drvdata(dev);
-       unsigned long val;
+       unsigned char val;
        int error;
 
-       error = strict_strtoul(buf, 10, &val);
+       error = kstrtou8(buf, 10, &val);
        if (error)
                return error;
 
@@ -576,10 +576,10 @@ static ssize_t adxl34x_autosleep_store(struct device *dev,
                                  const char *buf, size_t count)
 {
        struct adxl34x *ac = dev_get_drvdata(dev);
-       unsigned long val;
+       unsigned int val;
        int error;
 
-       error = strict_strtoul(buf, 10, &val);
+       error = kstrtouint(buf, 10, &val);
        if (error)
                return error;
 
@@ -623,13 +623,13 @@ static ssize_t adxl34x_write_store(struct device *dev,
                                   const char *buf, size_t count)
 {
        struct adxl34x *ac = dev_get_drvdata(dev);
-       unsigned long val;
+       unsigned int val;
        int error;
 
        /*
         * This allows basic ADXL register write access for debug purposes.
         */
-       error = strict_strtoul(buf, 16, &val);
+       error = kstrtouint(buf, 16, &val);
        if (error)
                return error;
 
index 874a51c2fbb2821049be81efc57dbff86b6b3525..f63341f20b91aed210208d62bf50c17b9da261c5 100644 (file)
@@ -42,13 +42,13 @@ static int ati_remote2_set_mask(const char *val,
                                const struct kernel_param *kp,
                                unsigned int max)
 {
-       unsigned long mask;
+       unsigned int mask;
        int ret;
 
        if (!val)
                return -EINVAL;
 
-       ret = strict_strtoul(val, 0, &mask);
+       ret = kstrtouint(val, 0, &mask);
        if (ret)
                return ret;
 
@@ -720,11 +720,12 @@ static ssize_t ati_remote2_store_channel_mask(struct device *dev,
        struct usb_device *udev = to_usb_device(dev);
        struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
        struct ati_remote2 *ar2 = usb_get_intfdata(intf);
-       unsigned long mask;
+       unsigned int mask;
        int r;
 
-       if (strict_strtoul(buf, 0, &mask))
-               return -EINVAL;
+       r = kstrtouint(buf, 0, &mask);
+       if (r)
+               return r;
 
        if (mask & ~ATI_REMOTE2_MAX_CHANNEL_MASK)
                return -EINVAL;
@@ -769,10 +770,12 @@ static ssize_t ati_remote2_store_mode_mask(struct device *dev,
        struct usb_device *udev = to_usb_device(dev);
        struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
        struct ati_remote2 *ar2 = usb_get_intfdata(intf);
-       unsigned long mask;
+       unsigned int mask;
+       int err;
 
-       if (strict_strtoul(buf, 0, &mask))
-               return -EINVAL;
+       err = kstrtouint(buf, 0, &mask);
+       if (err)
+               return err;
 
        if (mask & ~ATI_REMOTE2_MAX_MODE_MASK)
                return -EINVAL;
index d00edc9f39d10e38a455579d3a58c8deb96c4216..1c4146fccfdf53d7fd93a5e152998a3fabced2fa 100644 (file)
@@ -264,18 +264,7 @@ static struct platform_driver bfin_rotary_device_driver = {
 #endif
        },
 };
-
-static int __init bfin_rotary_init(void)
-{
-       return platform_driver_register(&bfin_rotary_device_driver);
-}
-module_init(bfin_rotary_init);
-
-static void __exit bfin_rotary_exit(void)
-{
-       platform_driver_unregister(&bfin_rotary_device_driver);
-}
-module_exit(bfin_rotary_exit);
+module_platform_driver(bfin_rotary_device_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
index fd8407a29631030b1f4a088f01e214b46eb9a4f5..53e43d295148a25bcbb0b3eced305a1889be2448 100644 (file)
@@ -163,16 +163,4 @@ static struct platform_driver cobalt_buttons_driver = {
                .owner  = THIS_MODULE,
        },
 };
-
-static int __init cobalt_buttons_init(void)
-{
-       return platform_driver_register(&cobalt_buttons_driver);
-}
-
-static void __exit cobalt_buttons_exit(void)
-{
-       platform_driver_unregister(&cobalt_buttons_driver);
-}
-
-module_init(cobalt_buttons_init);
-module_exit(cobalt_buttons_exit);
+module_platform_driver(cobalt_buttons_driver);
index 7283dd2a1ad3b89d349cd80897f204b5dccbe661..35083c6836c351ba5110f6fe3ec0cecc98b074e9 100644 (file)
@@ -267,17 +267,6 @@ static struct platform_driver dm355evm_keys_driver = {
                .name   = "dm355evm_keys",
        },
 };
-
-static int __init dm355evm_keys_init(void)
-{
-       return platform_driver_register(&dm355evm_keys_driver);
-}
-module_init(dm355evm_keys_init);
-
-static void __exit dm355evm_keys_exit(void)
-{
-       platform_driver_unregister(&dm355evm_keys_driver);
-}
-module_exit(dm355evm_keys_exit);
+module_platform_driver(dm355evm_keys_driver);
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
new file mode 100644 (file)
index 0000000..71fba8c
--- /dev/null
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2011 Sony Ericsson Mobile Communications Inc.
+ *
+ * Author: Courtney Cavin <courtney.cavin@sonyericsson.com>
+ * Prepared for up-stream by: Oskar Andero <oskar.andero@sonyericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/input/gp2ap002a00f.h>
+
+struct gp2a_data {
+       struct input_dev *input;
+       const struct gp2a_platform_data *pdata;
+       struct i2c_client *i2c_client;
+};
+
+enum gp2a_addr {
+       GP2A_ADDR_PROX  = 0x0,
+       GP2A_ADDR_GAIN  = 0x1,
+       GP2A_ADDR_HYS   = 0x2,
+       GP2A_ADDR_CYCLE = 0x3,
+       GP2A_ADDR_OPMOD = 0x4,
+       GP2A_ADDR_CON   = 0x6
+};
+
+enum gp2a_controls {
+       /* Software Shutdown control: 0 = shutdown, 1 = normal operation */
+       GP2A_CTRL_SSD   = 0x01
+};
+
+static int gp2a_report(struct gp2a_data *dt)
+{
+       int vo = gpio_get_value(dt->pdata->vout_gpio);
+
+       input_report_switch(dt->input, SW_FRONT_PROXIMITY, !vo);
+       input_sync(dt->input);
+
+       return 0;
+}
+
+static irqreturn_t gp2a_irq(int irq, void *handle)
+{
+       struct gp2a_data *dt = handle;
+
+       gp2a_report(dt);
+
+       return IRQ_HANDLED;
+}
+
+static int gp2a_enable(struct gp2a_data *dt)
+{
+       return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD,
+                                        GP2A_CTRL_SSD);
+}
+
+static int gp2a_disable(struct gp2a_data *dt)
+{
+       return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD,
+                                        0x00);
+}
+
+static int gp2a_device_open(struct input_dev *dev)
+{
+       struct gp2a_data *dt = input_get_drvdata(dev);
+       int error;
+
+       error = gp2a_enable(dt);
+       if (error < 0) {
+               dev_err(&dt->i2c_client->dev,
+                       "unable to activate, err %d\n", error);
+               return error;
+       }
+
+       gp2a_report(dt);
+
+       return 0;
+}
+
+static void gp2a_device_close(struct input_dev *dev)
+{
+       struct gp2a_data *dt = input_get_drvdata(dev);
+       int error;
+
+       error = gp2a_disable(dt);
+       if (error < 0)
+               dev_err(&dt->i2c_client->dev,
+                       "unable to deactivate, err %d\n", error);
+}
+
+static int __devinit gp2a_initialize(struct gp2a_data *dt)
+{
+       int error;
+
+       error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_GAIN,
+                                         0x08);
+       if (error < 0)
+               return error;
+
+       error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_HYS,
+                                         0xc2);
+       if (error < 0)
+               return error;
+
+       error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_CYCLE,
+                                         0x04);
+       if (error < 0)
+               return error;
+
+       error = gp2a_disable(dt);
+
+       return error;
+}
+
+static int __devinit gp2a_probe(struct i2c_client *client,
+                               const struct i2c_device_id *id)
+{
+       const struct gp2a_platform_data *pdata = client->dev.platform_data;
+       struct gp2a_data *dt;
+       int error;
+
+       if (!pdata)
+               return -EINVAL;
+
+       if (pdata->hw_setup) {
+               error = pdata->hw_setup(client);
+               if (error < 0)
+                       return error;
+       }
+
+       error = gpio_request_one(pdata->vout_gpio, GPIOF_IN, GP2A_I2C_NAME);
+       if (error)
+               goto err_hw_shutdown;
+
+       dt = kzalloc(sizeof(struct gp2a_data), GFP_KERNEL);
+       if (!dt) {
+               error = -ENOMEM;
+               goto err_free_gpio;
+       }
+
+       dt->pdata = pdata;
+       dt->i2c_client = client;
+
+       error = gp2a_initialize(dt);
+       if (error < 0)
+               goto err_free_mem;
+
+       dt->input = input_allocate_device();
+       if (!dt->input) {
+               error = -ENOMEM;
+               goto err_free_mem;
+       }
+
+       input_set_drvdata(dt->input, dt);
+
+       dt->input->open = gp2a_device_open;
+       dt->input->close = gp2a_device_close;
+       dt->input->name = GP2A_I2C_NAME;
+       dt->input->id.bustype = BUS_I2C;
+       dt->input->dev.parent = &client->dev;
+
+       input_set_capability(dt->input, EV_SW, SW_FRONT_PROXIMITY);
+
+       error = request_threaded_irq(client->irq, NULL, gp2a_irq,
+                       IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+                               IRQF_ONESHOT,
+                       GP2A_I2C_NAME, dt);
+       if (error) {
+               dev_err(&client->dev, "irq request failed\n");
+               goto err_free_input_dev;
+       }
+
+       error = input_register_device(dt->input);
+       if (error) {
+               dev_err(&client->dev, "device registration failed\n");
+               goto err_free_irq;
+       }
+
+       device_init_wakeup(&client->dev, pdata->wakeup);
+       i2c_set_clientdata(client, dt);
+
+       return 0;
+
+err_free_irq:
+       free_irq(client->irq, dt);
+err_free_input_dev:
+       input_free_device(dt->input);
+err_free_mem:
+       kfree(dt);
+err_free_gpio:
+       gpio_free(pdata->vout_gpio);
+err_hw_shutdown:
+       if (pdata->hw_shutdown)
+               pdata->hw_shutdown(client);
+       return error;
+}
+
+static int __devexit gp2a_remove(struct i2c_client *client)
+{
+       struct gp2a_data *dt = i2c_get_clientdata(client);
+       const struct gp2a_platform_data *pdata = dt->pdata;
+
+       device_init_wakeup(&client->dev, false);
+
+       free_irq(client->irq, dt);
+
+       input_unregister_device(dt->input);
+       kfree(dt);
+
+       gpio_free(pdata->vout_gpio);
+
+       if (pdata->hw_shutdown)
+               pdata->hw_shutdown(client);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gp2a_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct gp2a_data *dt = i2c_get_clientdata(client);
+       int retval = 0;
+
+       if (device_may_wakeup(&client->dev)) {
+               enable_irq_wake(client->irq);
+       } else {
+               mutex_lock(&dt->input->mutex);
+               if (dt->input->users)
+                       retval = gp2a_disable(dt);
+               mutex_unlock(&dt->input->mutex);
+       }
+
+       return retval;
+}
+
+static int gp2a_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct gp2a_data *dt = i2c_get_clientdata(client);
+       int retval = 0;
+
+       if (device_may_wakeup(&client->dev)) {
+               disable_irq_wake(client->irq);
+       } else {
+               mutex_lock(&dt->input->mutex);
+               if (dt->input->users)
+                       retval = gp2a_enable(dt);
+               mutex_unlock(&dt->input->mutex);
+       }
+
+       return retval;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(gp2a_pm, gp2a_suspend, gp2a_resume);
+
+static const struct i2c_device_id gp2a_i2c_id[] = {
+       { GP2A_I2C_NAME, 0 },
+       { }
+};
+
+static struct i2c_driver gp2a_i2c_driver = {
+       .driver = {
+               .name   = GP2A_I2C_NAME,
+               .owner  = THIS_MODULE,
+               .pm     = &gp2a_pm,
+       },
+       .probe          = gp2a_probe,
+       .remove         = __devexit_p(gp2a_remove),
+       .id_table       = gp2a_i2c_id,
+};
+
+static int __init gp2a_init(void)
+{
+       return i2c_add_driver(&gp2a_i2c_driver);
+}
+
+static void __exit gp2a_exit(void)
+{
+       i2c_del_driver(&gp2a_i2c_driver);
+}
+
+module_init(gp2a_init);
+module_exit(gp2a_exit);
+
+MODULE_AUTHOR("Courtney Cavin <courtney.cavin@sonyericsson.com>");
+MODULE_DESCRIPTION("Sharp GP2AP002A00F I2C Proximity/Opto sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/gpio_tilt_polled.c b/drivers/input/misc/gpio_tilt_polled.c
new file mode 100644 (file)
index 0000000..277a057
--- /dev/null
@@ -0,0 +1,213 @@
+/*
+ *  Driver for tilt switches connected via GPIO lines
+ *  not capable of generating interrupts
+ *
+ *  Copyright (C) 2011 Heiko Stuebner <heiko@sntech.de>
+ *
+ *  based on: drivers/input/keyboard/gpio_keys_polled.c
+ *
+ *  Copyright (C) 2007-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2010 Nuno Goncalves <nunojpg@gmail.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/input-polldev.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/input/gpio_tilt.h>
+
+#define DRV_NAME       "gpio-tilt-polled"
+
+struct gpio_tilt_polled_dev {
+       struct input_polled_dev *poll_dev;
+       struct device *dev;
+       const struct gpio_tilt_platform_data *pdata;
+
+       int last_state;
+
+       int threshold;
+       int count;
+};
+
+static void gpio_tilt_polled_poll(struct input_polled_dev *dev)
+{
+       struct gpio_tilt_polled_dev *tdev = dev->private;
+       const struct gpio_tilt_platform_data *pdata = tdev->pdata;
+       struct input_dev *input = dev->input;
+       struct gpio_tilt_state *tilt_state = NULL;
+       int state, i;
+
+       if (tdev->count < tdev->threshold) {
+               tdev->count++;
+       } else {
+               state = 0;
+               for (i = 0; i < pdata->nr_gpios; i++)
+                       state |= (!!gpio_get_value(pdata->gpios[i].gpio) << i);
+
+               if (state != tdev->last_state) {
+                       for (i = 0; i < pdata->nr_states; i++)
+                               if (pdata->states[i].gpios == state)
+                                       tilt_state = &pdata->states[i];
+
+                       if (tilt_state) {
+                               for (i = 0; i < pdata->nr_axes; i++)
+                                       input_report_abs(input,
+                                                        pdata->axes[i].axis,
+                                                        tilt_state->axes[i]);
+
+                               input_sync(input);
+                       }
+
+                       tdev->count = 0;
+                       tdev->last_state = state;
+               }
+       }
+}
+
+static void gpio_tilt_polled_open(struct input_polled_dev *dev)
+{
+       struct gpio_tilt_polled_dev *tdev = dev->private;
+       const struct gpio_tilt_platform_data *pdata = tdev->pdata;
+
+       if (pdata->enable)
+               pdata->enable(tdev->dev);
+
+       /* report initial state of the axes */
+       tdev->last_state = -1;
+       tdev->count = tdev->threshold;
+       gpio_tilt_polled_poll(tdev->poll_dev);
+}
+
+static void gpio_tilt_polled_close(struct input_polled_dev *dev)
+{
+       struct gpio_tilt_polled_dev *tdev = dev->private;
+       const struct gpio_tilt_platform_data *pdata = tdev->pdata;
+
+       if (pdata->disable)
+               pdata->disable(tdev->dev);
+}
+
+static int __devinit gpio_tilt_polled_probe(struct platform_device *pdev)
+{
+       const struct gpio_tilt_platform_data *pdata = pdev->dev.platform_data;
+       struct device *dev = &pdev->dev;
+       struct gpio_tilt_polled_dev *tdev;
+       struct input_polled_dev *poll_dev;
+       struct input_dev *input;
+       int error, i;
+
+       if (!pdata || !pdata->poll_interval)
+               return -EINVAL;
+
+       tdev = kzalloc(sizeof(struct gpio_tilt_polled_dev), GFP_KERNEL);
+       if (!tdev) {
+               dev_err(dev, "no memory for private data\n");
+               return -ENOMEM;
+       }
+
+       error = gpio_request_array(pdata->gpios, pdata->nr_gpios);
+       if (error) {
+               dev_err(dev,
+                       "Could not request tilt GPIOs: %d\n", error);
+               goto err_free_tdev;
+       }
+
+       poll_dev = input_allocate_polled_device();
+       if (!poll_dev) {
+               dev_err(dev, "no memory for polled device\n");
+               error = -ENOMEM;
+               goto err_free_gpios;
+       }
+
+       poll_dev->private = tdev;
+       poll_dev->poll = gpio_tilt_polled_poll;
+       poll_dev->poll_interval = pdata->poll_interval;
+       poll_dev->open = gpio_tilt_polled_open;
+       poll_dev->close = gpio_tilt_polled_close;
+
+       input = poll_dev->input;
+
+       input->name = pdev->name;
+       input->phys = DRV_NAME"/input0";
+       input->dev.parent = &pdev->dev;
+
+       input->id.bustype = BUS_HOST;
+       input->id.vendor = 0x0001;
+       input->id.product = 0x0001;
+       input->id.version = 0x0100;
+
+       __set_bit(EV_ABS, input->evbit);
+       for (i = 0; i < pdata->nr_axes; i++)
+               input_set_abs_params(input, pdata->axes[i].axis,
+                                    pdata->axes[i].min, pdata->axes[i].max,
+                                    pdata->axes[i].fuzz, pdata->axes[i].flat);
+
+       tdev->threshold = DIV_ROUND_UP(pdata->debounce_interval,
+                                      pdata->poll_interval);
+
+       tdev->poll_dev = poll_dev;
+       tdev->dev = dev;
+       tdev->pdata = pdata;
+
+       error = input_register_polled_device(poll_dev);
+       if (error) {
+               dev_err(dev, "unable to register polled device, err=%d\n",
+                       error);
+               goto err_free_polldev;
+       }
+
+       platform_set_drvdata(pdev, tdev);
+
+       return 0;
+
+err_free_polldev:
+       input_free_polled_device(poll_dev);
+err_free_gpios:
+       gpio_free_array(pdata->gpios, pdata->nr_gpios);
+err_free_tdev:
+       kfree(tdev);
+
+       return error;
+}
+
+static int __devexit gpio_tilt_polled_remove(struct platform_device *pdev)
+{
+       struct gpio_tilt_polled_dev *tdev = platform_get_drvdata(pdev);
+       const struct gpio_tilt_platform_data *pdata = tdev->pdata;
+
+       platform_set_drvdata(pdev, NULL);
+
+       input_unregister_polled_device(tdev->poll_dev);
+       input_free_polled_device(tdev->poll_dev);
+
+       gpio_free_array(pdata->gpios, pdata->nr_gpios);
+
+       kfree(tdev);
+
+       return 0;
+}
+
+static struct platform_driver gpio_tilt_polled_driver = {
+       .probe  = gpio_tilt_polled_probe,
+       .remove = __devexit_p(gpio_tilt_polled_remove),
+       .driver = {
+               .name   = DRV_NAME,
+               .owner  = THIS_MODULE,
+       },
+};
+
+module_platform_driver(gpio_tilt_polled_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("Polled GPIO tilt driver");
+MODULE_ALIAS("platform:" DRV_NAME);
index 302ab46ce75201bbf0025a6730155fc414ce4fa6..50e283068301db05ad34bff8b839c4773bb3b747 100644 (file)
@@ -168,16 +168,5 @@ static struct platform_driver ixp4xx_spkr_platform_driver = {
        .remove         = __devexit_p(ixp4xx_spkr_remove),
        .shutdown       = ixp4xx_spkr_shutdown,
 };
+module_platform_driver(ixp4xx_spkr_platform_driver);
 
-static int __init ixp4xx_spkr_init(void)
-{
-       return platform_driver_register(&ixp4xx_spkr_platform_driver);
-}
-
-static void __exit ixp4xx_spkr_exit(void)
-{
-       platform_driver_unregister(&ixp4xx_spkr_platform_driver);
-}
-
-module_init(ixp4xx_spkr_init);
-module_exit(ixp4xx_spkr_exit);
index 7de0ded4ccc382ed3b90b16c302176be980eb59b..23cf08271049d9225a43147c7a3a500817321760 100644 (file)
@@ -166,18 +166,7 @@ static struct platform_driver max8925_onkey_driver = {
        .probe          = max8925_onkey_probe,
        .remove         = __devexit_p(max8925_onkey_remove),
 };
-
-static int __init max8925_onkey_init(void)
-{
-       return platform_driver_register(&max8925_onkey_driver);
-}
-module_init(max8925_onkey_init);
-
-static void __exit max8925_onkey_exit(void)
-{
-       platform_driver_unregister(&max8925_onkey_driver);
-}
-module_exit(max8925_onkey_exit);
+module_platform_driver(max8925_onkey_driver);
 
 MODULE_DESCRIPTION("Maxim MAX8925 ONKEY driver");
 MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
index 09b0522886579ce5e21ad55e8e12e62e02fc627b..8428f1e8e83ed4ef7c3d98d7e5ad7db7e920a7a5 100644 (file)
@@ -255,7 +255,7 @@ static int __devexit mc13783_pwrbutton_remove(struct platform_device *pdev)
        return 0;
 }
 
-struct platform_driver mc13783_pwrbutton_driver = {
+static struct platform_driver mc13783_pwrbutton_driver = {
        .probe          = mc13783_pwrbutton_probe,
        .remove         = __devexit_p(mc13783_pwrbutton_remove),
        .driver         = {
@@ -264,17 +264,7 @@ struct platform_driver mc13783_pwrbutton_driver = {
        },
 };
 
-static int __init mc13783_pwrbutton_init(void)
-{
-       return platform_driver_register(&mc13783_pwrbutton_driver);
-}
-module_init(mc13783_pwrbutton_init);
-
-static void __exit mc13783_pwrbutton_exit(void)
-{
-       platform_driver_unregister(&mc13783_pwrbutton_driver);
-}
-module_exit(mc13783_pwrbutton_exit);
+module_platform_driver(mc13783_pwrbutton_driver);
 
 MODULE_ALIAS("platform:mc13783-pwrbutton");
 MODULE_DESCRIPTION("MC13783 Power Button");
index f71dc728da58adcad257da28f053b98fc29fa3fa..208d1a1cc7f34895904ad313d3cd2654af4c2995 100644 (file)
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
 
-#define MPU3050_CHIP_ID_REG    0x00
 #define MPU3050_CHIP_ID                0x69
-#define MPU3050_XOUT_H         0x1D
-#define MPU3050_PWR_MGM                0x3E
-#define MPU3050_PWR_MGM_POS    6
-#define MPU3050_PWR_MGM_MASK   0x40
 
 #define MPU3050_AUTO_DELAY     1000
 
 #define MPU3050_MIN_VALUE      -32768
 #define MPU3050_MAX_VALUE      32767
 
+#define MPU3050_DEFAULT_POLL_INTERVAL  200
+#define MPU3050_DEFAULT_FS_RANGE       3
+
+/* Register map */
+#define MPU3050_CHIP_ID_REG    0x00
+#define MPU3050_SMPLRT_DIV     0x15
+#define MPU3050_DLPF_FS_SYNC   0x16
+#define MPU3050_INT_CFG                0x17
+#define MPU3050_XOUT_H         0x1D
+#define MPU3050_PWR_MGM                0x3E
+#define MPU3050_PWR_MGM_POS    6
+
+/* Register bits */
+
+/* DLPF_FS_SYNC */
+#define MPU3050_EXT_SYNC_NONE          0x00
+#define MPU3050_EXT_SYNC_TEMP          0x20
+#define MPU3050_EXT_SYNC_GYROX         0x40
+#define MPU3050_EXT_SYNC_GYROY         0x60
+#define MPU3050_EXT_SYNC_GYROZ         0x80
+#define MPU3050_EXT_SYNC_ACCELX        0xA0
+#define MPU3050_EXT_SYNC_ACCELY        0xC0
+#define MPU3050_EXT_SYNC_ACCELZ        0xE0
+#define MPU3050_EXT_SYNC_MASK          0xE0
+#define MPU3050_FS_250DPS              0x00
+#define MPU3050_FS_500DPS              0x08
+#define MPU3050_FS_1000DPS             0x10
+#define MPU3050_FS_2000DPS             0x18
+#define MPU3050_FS_MASK                0x18
+#define MPU3050_DLPF_CFG_256HZ_NOLPF2  0x00
+#define MPU3050_DLPF_CFG_188HZ         0x01
+#define MPU3050_DLPF_CFG_98HZ          0x02
+#define MPU3050_DLPF_CFG_42HZ          0x03
+#define MPU3050_DLPF_CFG_20HZ          0x04
+#define MPU3050_DLPF_CFG_10HZ          0x05
+#define MPU3050_DLPF_CFG_5HZ           0x06
+#define MPU3050_DLPF_CFG_2100HZ_NOLPF  0x07
+#define MPU3050_DLPF_CFG_MASK          0x07
+/* INT_CFG */
+#define MPU3050_RAW_RDY_EN             0x01
+#define MPU3050_MPU_RDY_EN             0x02
+#define MPU3050_LATCH_INT_EN           0x04
+/* PWR_MGM */
+#define MPU3050_PWR_MGM_PLL_X          0x01
+#define MPU3050_PWR_MGM_PLL_Y          0x02
+#define MPU3050_PWR_MGM_PLL_Z          0x03
+#define MPU3050_PWR_MGM_CLKSEL         0x07
+#define MPU3050_PWR_MGM_STBY_ZG        0x08
+#define MPU3050_PWR_MGM_STBY_YG        0x10
+#define MPU3050_PWR_MGM_STBY_XG        0x20
+#define MPU3050_PWR_MGM_SLEEP          0x40
+#define MPU3050_PWR_MGM_RESET          0x80
+#define MPU3050_PWR_MGM_MASK           0x40
+
 struct axis_data {
        s16 x;
        s16 y;
@@ -148,9 +197,20 @@ static void mpu3050_set_power_mode(struct i2c_client *client, u8 val)
 static int mpu3050_input_open(struct input_dev *input)
 {
        struct mpu3050_sensor *sensor = input_get_drvdata(input);
+       int error;
 
        pm_runtime_get(sensor->dev);
 
+       /* Enable interrupts */
+       error = i2c_smbus_write_byte_data(sensor->client, MPU3050_INT_CFG,
+                                         MPU3050_LATCH_INT_EN |
+                                         MPU3050_RAW_RDY_EN |
+                                         MPU3050_MPU_RDY_EN);
+       if (error < 0) {
+               pm_runtime_put(sensor->dev);
+               return error;
+       }
+
        return 0;
 }
 
@@ -191,6 +251,51 @@ static irqreturn_t mpu3050_interrupt_thread(int irq, void *data)
        return IRQ_HANDLED;
 }
 
+/**
+ *     mpu3050_hw_init -       initialize hardware
+ *     @sensor: the sensor
+ *
+ *     Called during device probe; configures the sampling method.
+ */
+static int __devinit mpu3050_hw_init(struct mpu3050_sensor *sensor)
+{
+       struct i2c_client *client = sensor->client;
+       int ret;
+       u8 reg;
+
+       /* Reset */
+       ret = i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM,
+                                       MPU3050_PWR_MGM_RESET);
+       if (ret < 0)
+               return ret;
+
+       ret = i2c_smbus_read_byte_data(client, MPU3050_PWR_MGM);
+       if (ret < 0)
+               return ret;
+
+       ret &= ~MPU3050_PWR_MGM_CLKSEL;
+       ret |= MPU3050_PWR_MGM_PLL_Z;
+       ret = i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM, ret);
+       if (ret < 0)
+               return ret;
+
+       /* Output frequency divider. The poll interval */
+       ret = i2c_smbus_write_byte_data(client, MPU3050_SMPLRT_DIV,
+                                       MPU3050_DEFAULT_POLL_INTERVAL - 1);
+       if (ret < 0)
+               return ret;
+
+       /* Set low pass filter and full scale */
+       reg = MPU3050_DEFAULT_FS_RANGE;
+       reg |= MPU3050_DLPF_CFG_42HZ << 3;
+       reg |= MPU3050_EXT_SYNC_NONE << 5;
+       ret = i2c_smbus_write_byte_data(client, MPU3050_DLPF_FS_SYNC, reg);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
 /**
  *     mpu3050_probe   -       device detection callback
  *     @client: i2c client of found device
@@ -256,10 +361,14 @@ static int __devinit mpu3050_probe(struct i2c_client *client,
 
        pm_runtime_set_active(&client->dev);
 
+       error = mpu3050_hw_init(sensor);
+       if (error)
+               goto err_pm_set_suspended;
+
        error = request_threaded_irq(client->irq,
                                     NULL, mpu3050_interrupt_thread,
                                     IRQF_TRIGGER_RISING,
-                                    "mpu_int", sensor);
+                                    "mpu3050", sensor);
        if (error) {
                dev_err(&client->dev,
                        "can't get IRQ %d, error %d\n", client->irq, error);
@@ -348,11 +457,18 @@ static const struct i2c_device_id mpu3050_ids[] = {
 };
 MODULE_DEVICE_TABLE(i2c, mpu3050_ids);
 
+static const struct of_device_id mpu3050_of_match[] = {
+       { .compatible = "invn,mpu3050", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, mpu3050_of_match);
+
 static struct i2c_driver mpu3050_i2c_driver = {
        .driver = {
                .name   = "mpu3050",
                .owner  = THIS_MODULE,
                .pm     = &mpu3050_pm,
+               .of_match_table = mpu3050_of_match,
        },
        .probe          = mpu3050_probe,
        .remove         = __devexit_p(mpu3050_remove),
index 99335c286250594933967ea3132aba3a948b4b4f..e09b4fe81913ad89f9de7b8b59ed1c36a013962c 100644 (file)
@@ -125,19 +125,7 @@ static struct platform_driver pcap_keys_device_driver = {
                .owner  = THIS_MODULE,
        }
 };
-
-static int __init pcap_keys_init(void)
-{
-       return platform_driver_register(&pcap_keys_device_driver);
-};
-
-static void __exit pcap_keys_exit(void)
-{
-       platform_driver_unregister(&pcap_keys_device_driver);
-};
-
-module_init(pcap_keys_init);
-module_exit(pcap_keys_exit);
+module_platform_driver(pcap_keys_device_driver);
 
 MODULE_DESCRIPTION("Motorola PCAP2 input events driver");
 MODULE_AUTHOR("Ilya Petrov <ilya.muromec@gmail.com>");
index 95562735728d6d5c2f472767284b778546ab529c..53891de80b0e6ded0473b61e59c52c1640746a0d 100644 (file)
@@ -113,18 +113,7 @@ static struct platform_driver pcf50633_input_driver = {
        .probe = pcf50633_input_probe,
        .remove = __devexit_p(pcf50633_input_remove),
 };
-
-static int __init pcf50633_input_init(void)
-{
-       return platform_driver_register(&pcf50633_input_driver);
-}
-module_init(pcf50633_input_init);
-
-static void __exit pcf50633_input_exit(void)
-{
-       platform_driver_unregister(&pcf50633_input_driver);
-}
-module_exit(pcf50633_input_exit);
+module_platform_driver(pcf50633_input_driver);
 
 MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
 MODULE_DESCRIPTION("PCF50633 input driver");
index 34f4d2e0f50f45cc3cd526e67360b1dafd527ffc..b2484aa07f32fe68565a14501f0267dfb59c5040 100644 (file)
@@ -134,17 +134,5 @@ static struct platform_driver pcspkr_platform_driver = {
        .remove         = __devexit_p(pcspkr_remove),
        .shutdown       = pcspkr_shutdown,
 };
+module_platform_driver(pcspkr_platform_driver);
 
-
-static int __init pcspkr_init(void)
-{
-       return platform_driver_register(&pcspkr_platform_driver);
-}
-
-static void __exit pcspkr_exit(void)
-{
-       platform_driver_unregister(&pcspkr_platform_driver);
-}
-
-module_init(pcspkr_init);
-module_exit(pcspkr_exit);
index 43192930824bce380f0ffeb563ff8b026d23d11d..dfbfb463ea5d22844ea5c247bbbc0cf6644b44ea 100644 (file)
@@ -277,18 +277,7 @@ static struct platform_driver pm8xxx_vib_driver = {
                .pm     = &pm8xxx_vib_pm_ops,
        },
 };
-
-static int __init pm8xxx_vib_init(void)
-{
-       return platform_driver_register(&pm8xxx_vib_driver);
-}
-module_init(pm8xxx_vib_init);
-
-static void __exit pm8xxx_vib_exit(void)
-{
-       platform_driver_unregister(&pm8xxx_vib_driver);
-}
-module_exit(pm8xxx_vib_exit);
+module_platform_driver(pm8xxx_vib_driver);
 
 MODULE_ALIAS("platform:pm8xxx_vib");
 MODULE_DESCRIPTION("PMIC8xxx vibrator driver based on ff-memless framework");
index b3cfb9c71e664244f85bb77427e13ca01a932d20..0f83d0f1d015c3f6ed9f4b6aaeaaab85cb63d29a 100644 (file)
@@ -213,18 +213,7 @@ static struct platform_driver pmic8xxx_pwrkey_driver = {
                .pm     = &pm8xxx_pwr_key_pm_ops,
        },
 };
-
-static int __init pmic8xxx_pwrkey_init(void)
-{
-       return platform_driver_register(&pmic8xxx_pwrkey_driver);
-}
-module_init(pmic8xxx_pwrkey_init);
-
-static void __exit pmic8xxx_pwrkey_exit(void)
-{
-       platform_driver_unregister(&pmic8xxx_pwrkey_driver);
-}
-module_exit(pmic8xxx_pwrkey_exit);
+module_platform_driver(pmic8xxx_pwrkey_driver);
 
 MODULE_ALIAS("platform:pmic8xxx_pwrkey");
 MODULE_DESCRIPTION("PMIC8XXX Power Key driver");
index 57c294f071984ddfe6e14c12dbb4dfb037e7fd2f..fc84c8a51147c098ed70bd0dd949dd375754272e 100644 (file)
@@ -180,18 +180,7 @@ static struct platform_driver pwm_beeper_driver = {
                .pm     = PWM_BEEPER_PM_OPS,
        },
 };
-
-static int __init pwm_beeper_init(void)
-{
-       return platform_driver_register(&pwm_beeper_driver);
-}
-module_init(pwm_beeper_init);
-
-static void __exit pwm_beeper_exit(void)
-{
-       platform_driver_unregister(&pwm_beeper_driver);
-}
-module_exit(pwm_beeper_exit);
+module_platform_driver(pwm_beeper_driver);
 
 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
 MODULE_DESCRIPTION("PWM beeper driver");
index e2c7f622a0b5b98521f507ae5c64863b58b11cec..aeb02bcf7233a3d1d4383bfd14de04a6d9630d75 100644 (file)
@@ -100,19 +100,7 @@ static struct platform_driver rb532_button_driver = {
                .owner = THIS_MODULE,
        },
 };
-
-static int __init rb532_button_init(void)
-{
-       return platform_driver_register(&rb532_button_driver);
-}
-
-static void __exit rb532_button_exit(void)
-{
-       platform_driver_unregister(&rb532_button_driver);
-}
-
-module_init(rb532_button_init);
-module_exit(rb532_button_exit);
+module_platform_driver(rb532_button_driver);
 
 MODULE_AUTHOR("Phil Sutter <n0-1@freewrt.org>");
 MODULE_LICENSE("GPL");
index 2be21694fac1bf1f66a6189cbd6ea96844a75c28..f07f784198b9e81139f8ed5a192f17578cdf6c75 100644 (file)
@@ -284,19 +284,7 @@ static struct platform_driver rotary_encoder_driver = {
                .owner  = THIS_MODULE,
        }
 };
-
-static int __init rotary_encoder_init(void)
-{
-       return platform_driver_register(&rotary_encoder_driver);
-}
-
-static void __exit rotary_encoder_exit(void)
-{
-       platform_driver_unregister(&rotary_encoder_driver);
-}
-
-module_init(rotary_encoder_init);
-module_exit(rotary_encoder_exit);
+module_platform_driver(rotary_encoder_driver);
 
 MODULE_ALIAS("platform:" DRV_NAME);
 MODULE_DESCRIPTION("GPIO rotary encoder driver");
index 1a80c0dab83bce0204a6532413a759d830451fdc..5d9fd55711991bfb1a701eb0f8ba26dd03b4687d 100644 (file)
@@ -164,17 +164,6 @@ static struct platform_driver sgi_buttons_driver = {
                .owner  = THIS_MODULE,
        },
 };
-
-static int __init sgi_buttons_init(void)
-{
-       return platform_driver_register(&sgi_buttons_driver);
-}
-
-static void __exit sgi_buttons_exit(void)
-{
-       platform_driver_unregister(&sgi_buttons_driver);
-}
+module_platform_driver(sgi_buttons_driver);
 
 MODULE_LICENSE("GPL");
-module_init(sgi_buttons_init);
-module_exit(sgi_buttons_exit);
index 38e4b507b94cc629ef330cf173b1b41d3765e904..19a68828cd865d0812ae57e2305dea1432bee912 100644 (file)
@@ -107,25 +107,14 @@ static int __exit twl4030_pwrbutton_remove(struct platform_device *pdev)
 }
 
 static struct platform_driver twl4030_pwrbutton_driver = {
+       .probe          = twl4030_pwrbutton_probe,
        .remove         = __exit_p(twl4030_pwrbutton_remove),
        .driver         = {
                .name   = "twl4030_pwrbutton",
                .owner  = THIS_MODULE,
        },
 };
-
-static int __init twl4030_pwrbutton_init(void)
-{
-       return platform_driver_probe(&twl4030_pwrbutton_driver,
-                       twl4030_pwrbutton_probe);
-}
-module_init(twl4030_pwrbutton_init);
-
-static void __exit twl4030_pwrbutton_exit(void)
-{
-       platform_driver_unregister(&twl4030_pwrbutton_driver);
-}
-module_exit(twl4030_pwrbutton_exit);
+module_platform_driver(twl4030_pwrbutton_driver);
 
 MODULE_ALIAS("platform:twl4030_pwrbutton");
 MODULE_DESCRIPTION("Triton2 Power Button");
index 3c1a432c14dc3a8f5eadd4540f247d669ba773dd..37651373a95b0ebb19d25ca32cfcd62599ee4a9b 100644 (file)
@@ -278,21 +278,9 @@ static struct platform_driver twl4030_vibra_driver = {
 #endif
        },
 };
-
-static int __init twl4030_vibra_init(void)
-{
-       return platform_driver_register(&twl4030_vibra_driver);
-}
-module_init(twl4030_vibra_init);
-
-static void __exit twl4030_vibra_exit(void)
-{
-       platform_driver_unregister(&twl4030_vibra_driver);
-}
-module_exit(twl4030_vibra_exit);
+module_platform_driver(twl4030_vibra_driver);
 
 MODULE_ALIAS("platform:twl4030-vibra");
-
 MODULE_DESCRIPTION("TWL4030 Vibra driver");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Nokia Corporation");
index ad153a417eed98a7fff70945f684becc76581b96..45874fed523ab771e94f8b3d9dccf415b8cfa6af 100644 (file)
@@ -410,18 +410,7 @@ static struct platform_driver twl6040_vibra_driver = {
                .pm     = &twl6040_vibra_pm_ops,
        },
 };
-
-static int __init twl6040_vibra_init(void)
-{
-       return platform_driver_register(&twl6040_vibra_driver);
-}
-module_init(twl6040_vibra_init);
-
-static void __exit twl6040_vibra_exit(void)
-{
-       platform_driver_unregister(&twl6040_vibra_driver);
-}
-module_exit(twl6040_vibra_exit);
+module_platform_driver(twl6040_vibra_driver);
 
 MODULE_ALIAS("platform:twl6040-vibra");
 MODULE_DESCRIPTION("TWL6040 Vibra driver");
index c3d7ba5f5b47037c3b8c46b62ca7d6ad931e62db..47f18d6bce46826160e653821d7be213223c4bb4 100644 (file)
@@ -145,18 +145,7 @@ static struct platform_driver wm831x_on_driver = {
                .owner  = THIS_MODULE,
        },
 };
-
-static int __init wm831x_on_init(void)
-{
-       return platform_driver_register(&wm831x_on_driver);
-}
-module_init(wm831x_on_init);
-
-static void __exit wm831x_on_exit(void)
-{
-       platform_driver_unregister(&wm831x_on_driver);
-}
-module_exit(wm831x_on_exit);
+module_platform_driver(wm831x_on_driver);
 
 MODULE_ALIAS("platform:wm831x-on");
 MODULE_DESCRIPTION("WM831x ON pin");
index ad2e51c04db89c06faa30b0e07ef70db12221fc1..02ca8680ea5b8f68393cecc3925792fc74236614 100644 (file)
@@ -361,15 +361,12 @@ static const struct xenbus_device_id xenkbd_ids[] = {
        { "" }
 };
 
-static struct xenbus_driver xenkbd_driver = {
-       .name = "vkbd",
-       .owner = THIS_MODULE,
-       .ids = xenkbd_ids,
+static DEFINE_XENBUS_DRIVER(xenkbd, ,
        .probe = xenkbd_probe,
        .remove = xenkbd_remove,
        .resume = xenkbd_resume,
        .otherend_changed = xenkbd_backend_changed,
-};
+);
 
 static int __init xenkbd_init(void)
 {
index 003587c71f43edd8d6ad315d5ffaf514ad3705d7..bd87380bd879edcfd9a278d53c887c444a14b9d6 100644 (file)
 
 #include <linux/slab.h>
 #include <linux/input.h>
+#include <linux/input/mt.h>
 #include <linux/serio.h>
 #include <linux/libps2.h>
 
 #include "psmouse.h"
 #include "alps.h"
 
-#define ALPS_OLDPROTO          0x01    /* old style input */
+/*
+ * Definitions for ALPS version 3 and 4 command mode protocol
+ */
+#define ALPS_V3_X_MAX  2000
+#define ALPS_V3_Y_MAX  1400
+
+#define ALPS_BITMAP_X_BITS     15
+#define ALPS_BITMAP_Y_BITS     11
+
+#define ALPS_CMD_NIBBLE_10     0x01f2
+
+static const struct alps_nibble_commands alps_v3_nibble_commands[] = {
+       { PSMOUSE_CMD_SETPOLL,          0x00 }, /* 0 */
+       { PSMOUSE_CMD_RESET_DIS,        0x00 }, /* 1 */
+       { PSMOUSE_CMD_SETSCALE21,       0x00 }, /* 2 */
+       { PSMOUSE_CMD_SETRATE,          0x0a }, /* 3 */
+       { PSMOUSE_CMD_SETRATE,          0x14 }, /* 4 */
+       { PSMOUSE_CMD_SETRATE,          0x28 }, /* 5 */
+       { PSMOUSE_CMD_SETRATE,          0x3c }, /* 6 */
+       { PSMOUSE_CMD_SETRATE,          0x50 }, /* 7 */
+       { PSMOUSE_CMD_SETRATE,          0x64 }, /* 8 */
+       { PSMOUSE_CMD_SETRATE,          0xc8 }, /* 9 */
+       { ALPS_CMD_NIBBLE_10,           0x00 }, /* a */
+       { PSMOUSE_CMD_SETRES,           0x00 }, /* b */
+       { PSMOUSE_CMD_SETRES,           0x01 }, /* c */
+       { PSMOUSE_CMD_SETRES,           0x02 }, /* d */
+       { PSMOUSE_CMD_SETRES,           0x03 }, /* e */
+       { PSMOUSE_CMD_SETSCALE11,       0x00 }, /* f */
+};
+
+static const struct alps_nibble_commands alps_v4_nibble_commands[] = {
+       { PSMOUSE_CMD_ENABLE,           0x00 }, /* 0 */
+       { PSMOUSE_CMD_RESET_DIS,        0x00 }, /* 1 */
+       { PSMOUSE_CMD_SETSCALE21,       0x00 }, /* 2 */
+       { PSMOUSE_CMD_SETRATE,          0x0a }, /* 3 */
+       { PSMOUSE_CMD_SETRATE,          0x14 }, /* 4 */
+       { PSMOUSE_CMD_SETRATE,          0x28 }, /* 5 */
+       { PSMOUSE_CMD_SETRATE,          0x3c }, /* 6 */
+       { PSMOUSE_CMD_SETRATE,          0x50 }, /* 7 */
+       { PSMOUSE_CMD_SETRATE,          0x64 }, /* 8 */
+       { PSMOUSE_CMD_SETRATE,          0xc8 }, /* 9 */
+       { ALPS_CMD_NIBBLE_10,           0x00 }, /* a */
+       { PSMOUSE_CMD_SETRES,           0x00 }, /* b */
+       { PSMOUSE_CMD_SETRES,           0x01 }, /* c */
+       { PSMOUSE_CMD_SETRES,           0x02 }, /* d */
+       { PSMOUSE_CMD_SETRES,           0x03 }, /* e */
+       { PSMOUSE_CMD_SETSCALE11,       0x00 }, /* f */
+};
+
+
 #define ALPS_DUALPOINT         0x02    /* touchpad has trackstick */
 #define ALPS_PASS              0x04    /* device has a pass-through port */
 
                                           6-byte ALPS packet */
 
 static const struct alps_model_info alps_model_data[] = {
-       { { 0x32, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */
-       { { 0x33, 0x02, 0x0a }, 0x88, 0xf8, ALPS_OLDPROTO },              /* UMAX-530T */
-       { { 0x53, 0x02, 0x0a }, 0xf8, 0xf8, 0 },
-       { { 0x53, 0x02, 0x14 }, 0xf8, 0xf8, 0 },
-       { { 0x60, 0x03, 0xc8 }, 0xf8, 0xf8, 0 },                          /* HP ze1115 */
-       { { 0x63, 0x02, 0x0a }, 0xf8, 0xf8, 0 },
-       { { 0x63, 0x02, 0x14 }, 0xf8, 0xf8, 0 },
-       { { 0x63, 0x02, 0x28 }, 0xf8, 0xf8, ALPS_FW_BK_2 },               /* Fujitsu Siemens S6010 */
-       { { 0x63, 0x02, 0x3c }, 0x8f, 0x8f, ALPS_WHEEL },                 /* Toshiba Satellite S2400-103 */
-       { { 0x63, 0x02, 0x50 }, 0xef, 0xef, ALPS_FW_BK_1 },               /* NEC Versa L320 */
-       { { 0x63, 0x02, 0x64 }, 0xf8, 0xf8, 0 },
-       { { 0x63, 0x03, 0xc8 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D800 */
-       { { 0x73, 0x00, 0x0a }, 0xf8, 0xf8, ALPS_DUALPOINT },             /* ThinkPad R61 8918-5QG */
-       { { 0x73, 0x02, 0x0a }, 0xf8, 0xf8, 0 },
-       { { 0x73, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_FW_BK_2 },               /* Ahtec Laptop */
-       { { 0x20, 0x02, 0x0e }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */
-       { { 0x22, 0x02, 0x0a }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
-       { { 0x22, 0x02, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */
+       { { 0x32, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },  /* Toshiba Salellite Pro M10 */
+       { { 0x33, 0x02, 0x0a }, 0x00, ALPS_PROTO_V1, 0x88, 0xf8, 0 },                           /* UMAX-530T */
+       { { 0x53, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+       { { 0x53, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+       { { 0x60, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },                           /* HP ze1115 */
+       { { 0x63, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+       { { 0x63, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+       { { 0x63, 0x02, 0x28 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 },                /* Fujitsu Siemens S6010 */
+       { { 0x63, 0x02, 0x3c }, 0x00, ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL },                  /* Toshiba Satellite S2400-103 */
+       { { 0x63, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 },                /* NEC Versa L320 */
+       { { 0x63, 0x02, 0x64 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+       { { 0x63, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },  /* Dell Latitude D800 */
+       { { 0x73, 0x00, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT },              /* ThinkPad R61 8918-5QG */
+       { { 0x73, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+       { { 0x73, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 },                /* Ahtec Laptop */
+       { { 0x20, 0x02, 0x0e }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },  /* XXX */
+       { { 0x22, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
+       { { 0x22, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT },  /* Dell Latitude D600 */
        /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
-       { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
+       { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
                ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
-       { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS },          /* Dell Vostro 1400 */
-       { { 0x52, 0x01, 0x14 }, 0xff, 0xff,
-               ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },      /* Toshiba Tecra A11-11L */
+       { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS },           /* Dell Vostro 1400 */
+       { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
+               ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },                            /* Toshiba Tecra A11-11L */
+       { { 0x73, 0x02, 0x64 }, 0x9b, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
+       { { 0x73, 0x02, 0x64 }, 0x9d, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
+       { { 0x73, 0x02, 0x64 }, 0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 },
 };
 
 /*
@@ -67,42 +120,7 @@ static const struct alps_model_info alps_model_data[] = {
  * isn't valid per PS/2 spec.
  */
 
-/*
- * PS/2 packet format
- *
- * byte 0:  0    0 YSGN XSGN    1    M    R    L
- * byte 1: X7   X6   X5   X4   X3   X2   X1   X0
- * byte 2: Y7   Y6   Y5   Y4   Y3   Y2   Y1   Y0
- *
- * Note that the device never signals overflow condition.
- *
- * ALPS absolute Mode - new format
- *
- * byte 0:  1    ?    ?    ?    1    ?    ?    ?
- * byte 1:  0   x6   x5   x4   x3   x2   x1   x0
- * byte 2:  0  x10   x9   x8   x7    ?  fin  ges
- * byte 3:  0   y9   y8   y7    1    M    R    L
- * byte 4:  0   y6   y5   y4   y3   y2   y1   y0
- * byte 5:  0   z6   z5   z4   z3   z2   z1   z0
- *
- * Dualpoint device -- interleaved packet format
- *
- * byte 0:    1    1    0    0    1    1    1    1
- * byte 1:    0   x6   x5   x4   x3   x2   x1   x0
- * byte 2:    0  x10   x9   x8   x7    0  fin  ges
- * byte 3:    0    0 YSGN XSGN    1    1    1    1
- * byte 4:   X7   X6   X5   X4   X3   X2   X1   X0
- * byte 5:   Y7   Y6   Y5   Y4   Y3   Y2   Y1   Y0
- * byte 6:    0   y9   y8   y7    1    m    r    l
- * byte 7:    0   y6   y5   y4   y3   y2   y1   y0
- * byte 8:    0   z6   z5   z4   z3   z2   z1   z0
- *
- * CAPITALS = stick, miniscules = touchpad
- *
- * ?'s can have different meanings on different models,
- * such as wheel rotation, extra buttons, stick buttons
- * on a dualpoint, etc.
- */
+/* Packet formats are described in Documentation/input/alps.txt */
 
 static bool alps_is_valid_first_byte(const struct alps_model_info *model,
                                     unsigned char data)
@@ -137,7 +155,7 @@ static void alps_report_buttons(struct psmouse *psmouse,
        input_sync(dev2);
 }
 
-static void alps_process_packet(struct psmouse *psmouse)
+static void alps_process_packet_v1_v2(struct psmouse *psmouse)
 {
        struct alps_data *priv = psmouse->private;
        const struct alps_model_info *model = priv->i;
@@ -147,7 +165,7 @@ static void alps_process_packet(struct psmouse *psmouse)
        int x, y, z, ges, fin, left, right, middle;
        int back = 0, forward = 0;
 
-       if (model->flags & ALPS_OLDPROTO) {
+       if (model->proto_version == ALPS_PROTO_V1) {
                left = packet[2] & 0x10;
                right = packet[2] & 0x08;
                middle = 0;
@@ -239,6 +257,403 @@ static void alps_process_packet(struct psmouse *psmouse)
        input_sync(dev);
 }
 
+/*
+ * Process bitmap data from v3 and v4 protocols. Returns the number of
+ * fingers detected. A return value of 0 means at least one of the
+ * bitmaps was empty.
+ *
+ * The bitmaps don't have enough data to track fingers, so this function
+ * only generates points representing a bounding box of all contacts.
+ * These points are returned in x1, y1, x2, and y2 when the return value
+ * is greater than 0.
+ */
+static int alps_process_bitmap(unsigned int x_map, unsigned int y_map,
+                              int *x1, int *y1, int *x2, int *y2)
+{
+       struct alps_bitmap_point {
+               int start_bit;
+               int num_bits;
+       };
+
+       int fingers_x = 0, fingers_y = 0, fingers;
+       int i, bit, prev_bit;
+       struct alps_bitmap_point x_low = {0,}, x_high = {0,};
+       struct alps_bitmap_point y_low = {0,}, y_high = {0,};
+       struct alps_bitmap_point *point;
+
+       if (!x_map || !y_map)
+               return 0;
+
+       *x1 = *y1 = *x2 = *y2 = 0;
+
+       prev_bit = 0;
+       point = &x_low;
+       for (i = 0; x_map != 0; i++, x_map >>= 1) {
+               bit = x_map & 1;
+               if (bit) {
+                       if (!prev_bit) {
+                               point->start_bit = i;
+                               fingers_x++;
+                       }
+                       point->num_bits++;
+               } else {
+                       if (prev_bit)
+                               point = &x_high;
+                       else
+                               point->num_bits = 0;
+               }
+               prev_bit = bit;
+       }
+
+       /*
+        * y bitmap is reversed for what we need (lower positions are in
+        * higher bits), so we process from the top end.
+        */
+       y_map = y_map << (sizeof(y_map) * BITS_PER_BYTE - ALPS_BITMAP_Y_BITS);
+       prev_bit = 0;
+       point = &y_low;
+       for (i = 0; y_map != 0; i++, y_map <<= 1) {
+               bit = y_map & (1 << (sizeof(y_map) * BITS_PER_BYTE - 1));
+               if (bit) {
+                       if (!prev_bit) {
+                               point->start_bit = i;
+                               fingers_y++;
+                       }
+                       point->num_bits++;
+               } else {
+                       if (prev_bit)
+                               point = &y_high;
+                       else
+                               point->num_bits = 0;
+               }
+               prev_bit = bit;
+       }
+
+       /*
+        * Fingers can overlap, so we use the maximum count of fingers
+        * on either axis as the finger count.
+        */
+       fingers = max(fingers_x, fingers_y);
+
+       /*
+        * If total fingers is > 1 but either axis reports only a single
+        * contact, we have overlapping or adjacent fingers. For the
+        * purposes of creating a bounding box, divide the single contact
+        * (roughly) equally between the two points.
+        */
+       if (fingers > 1) {
+               if (fingers_x == 1) {
+                       i = x_low.num_bits / 2;
+                       x_low.num_bits = x_low.num_bits - i;
+                       x_high.start_bit = x_low.start_bit + i;
+                       x_high.num_bits = max(i, 1);
+               } else if (fingers_y == 1) {
+                       i = y_low.num_bits / 2;
+                       y_low.num_bits = y_low.num_bits - i;
+                       y_high.start_bit = y_low.start_bit + i;
+                       y_high.num_bits = max(i, 1);
+               }
+       }
+
+       *x1 = (ALPS_V3_X_MAX * (2 * x_low.start_bit + x_low.num_bits - 1)) /
+             (2 * (ALPS_BITMAP_X_BITS - 1));
+       *y1 = (ALPS_V3_Y_MAX * (2 * y_low.start_bit + y_low.num_bits - 1)) /
+             (2 * (ALPS_BITMAP_Y_BITS - 1));
+
+       if (fingers > 1) {
+               *x2 = (ALPS_V3_X_MAX * (2 * x_high.start_bit + x_high.num_bits - 1)) /
+                     (2 * (ALPS_BITMAP_X_BITS - 1));
+               *y2 = (ALPS_V3_Y_MAX * (2 * y_high.start_bit + y_high.num_bits - 1)) /
+                     (2 * (ALPS_BITMAP_Y_BITS - 1));
+       }
+
+       return fingers;
+}
+
+static void alps_set_slot(struct input_dev *dev, int slot, bool active,
+                         int x, int y)
+{
+       input_mt_slot(dev, slot);
+       input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
+       if (active) {
+               input_report_abs(dev, ABS_MT_POSITION_X, x);
+               input_report_abs(dev, ABS_MT_POSITION_Y, y);
+       }
+}
+
+static void alps_report_semi_mt_data(struct input_dev *dev, int num_fingers,
+                                    int x1, int y1, int x2, int y2)
+{
+       alps_set_slot(dev, 0, num_fingers != 0, x1, y1);
+       alps_set_slot(dev, 1, num_fingers == 2, x2, y2);
+}
+
+static void alps_process_trackstick_packet_v3(struct psmouse *psmouse)
+{
+       struct alps_data *priv = psmouse->private;
+       unsigned char *packet = psmouse->packet;
+       struct input_dev *dev = priv->dev2;
+       int x, y, z, left, right, middle;
+
+       /* Sanity check packet */
+       if (!(packet[0] & 0x40)) {
+               psmouse_dbg(psmouse, "Bad trackstick packet, discarding\n");
+               return;
+       }
+
+       /*
+        * There's a special packet that seems to indicate the end
+        * of a stream of trackstick data. Filter these out.
+        */
+       if (packet[1] == 0x7f && packet[2] == 0x7f && packet[4] == 0x7f)
+               return;
+
+       x = (s8)(((packet[0] & 0x20) << 2) | (packet[1] & 0x7f));
+       y = (s8)(((packet[0] & 0x10) << 3) | (packet[2] & 0x7f));
+       z = (packet[4] & 0x7c) >> 2;
+
+       /*
+        * The x and y values tend to be quite large, and when used
+        * alone the trackstick is difficult to use. Scale them down
+        * to compensate.
+        */
+       x /= 8;
+       y /= 8;
+
+       input_report_rel(dev, REL_X, x);
+       input_report_rel(dev, REL_Y, -y);
+
+       /*
+        * Most ALPS models report the trackstick buttons in the touchpad
+        * packets, but a few report them here. No reliable way has been
+        * found to differentiate between the models upfront, so we enable
+        * the quirk in response to seeing a button press in the trackstick
+        * packet.
+        */
+       left = packet[3] & 0x01;
+       right = packet[3] & 0x02;
+       middle = packet[3] & 0x04;
+
+       if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS) &&
+           (left || right || middle))
+               priv->quirks |= ALPS_QUIRK_TRACKSTICK_BUTTONS;
+
+       if (priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS) {
+               input_report_key(dev, BTN_LEFT, left);
+               input_report_key(dev, BTN_RIGHT, right);
+               input_report_key(dev, BTN_MIDDLE, middle);
+       }
+
+       input_sync(dev);
+       return;
+}
+
+static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
+{
+       struct alps_data *priv = psmouse->private;
+       unsigned char *packet = psmouse->packet;
+       struct input_dev *dev = psmouse->dev;
+       struct input_dev *dev2 = priv->dev2;
+       int x, y, z;
+       int left, right, middle;
+       int x1 = 0, y1 = 0, x2 = 0, y2 = 0;
+       int fingers = 0, bmap_fingers;
+       unsigned int x_bitmap, y_bitmap;
+
+       /*
+        * There's no single feature of touchpad position and bitmap packets
+        * that can be used to distinguish between them. We rely on the fact
+        * that a bitmap packet should always follow a position packet with
+        * bit 6 of packet[4] set.
+        */
+       if (priv->multi_packet) {
+               /*
+                * Sometimes a position packet will indicate a multi-packet
+                * sequence, but then what follows is another position
+                * packet. Check for this, and when it happens process the
+                * position packet as usual.
+                */
+               if (packet[0] & 0x40) {
+                       fingers = (packet[5] & 0x3) + 1;
+                       x_bitmap = ((packet[4] & 0x7e) << 8) |
+                                  ((packet[1] & 0x7f) << 2) |
+                                  ((packet[0] & 0x30) >> 4);
+                       y_bitmap = ((packet[3] & 0x70) << 4) |
+                                  ((packet[2] & 0x7f) << 1) |
+                                  (packet[4] & 0x01);
+
+                       bmap_fingers = alps_process_bitmap(x_bitmap, y_bitmap,
+                                                          &x1, &y1, &x2, &y2);
+
+                       /*
+                        * We shouldn't report more than one finger if
+                        * we don't have two coordinates.
+                        */
+                       if (fingers > 1 && bmap_fingers < 2)
+                               fingers = bmap_fingers;
+
+                       /* Now process position packet */
+                       packet = priv->multi_data;
+               } else {
+                       priv->multi_packet = 0;
+               }
+       }
+
+       /*
+        * Bit 6 of byte 0 is not usually set in position packets. The only
+        * times it seems to be set is in situations where the data is
+        * suspect anyway, e.g. a palm resting flat on the touchpad. Given
+        * this combined with the fact that this bit is useful for filtering
+        * out misidentified bitmap packets, we reject anything with this
+        * bit set.
+        */
+       if (packet[0] & 0x40)
+               return;
+
+       if (!priv->multi_packet && (packet[4] & 0x40)) {
+               priv->multi_packet = 1;
+               memcpy(priv->multi_data, packet, sizeof(priv->multi_data));
+               return;
+       }
+
+       priv->multi_packet = 0;
+
+       left = packet[3] & 0x01;
+       right = packet[3] & 0x02;
+       middle = packet[3] & 0x04;
+
+       x = ((packet[1] & 0x7f) << 4) | ((packet[4] & 0x30) >> 2) |
+           ((packet[0] & 0x30) >> 4);
+       y = ((packet[2] & 0x7f) << 4) | (packet[4] & 0x0f);
+       z = packet[5] & 0x7f;
+
+       /*
+        * Sometimes the hardware sends a single packet with z = 0
+        * in the middle of a stream. Real releases generate packets
+        * with x, y, and z all zero, so these seem to be flukes.
+        * Ignore them.
+        */
+       if (x && y && !z)
+               return;
+
+       /*
+        * If we don't have MT data or the bitmaps were empty, we have
+        * to rely on ST data.
+        */
+       if (!fingers) {
+               x1 = x;
+               y1 = y;
+               fingers = z > 0 ? 1 : 0;
+       }
+
+       if (z >= 64)
+               input_report_key(dev, BTN_TOUCH, 1);
+       else
+               input_report_key(dev, BTN_TOUCH, 0);
+
+       alps_report_semi_mt_data(dev, fingers, x1, y1, x2, y2);
+
+       input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
+       input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
+       input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
+       input_report_key(dev, BTN_TOOL_QUADTAP, fingers == 4);
+
+       input_report_key(dev, BTN_LEFT, left);
+       input_report_key(dev, BTN_RIGHT, right);
+       input_report_key(dev, BTN_MIDDLE, middle);
+
+       if (z > 0) {
+               input_report_abs(dev, ABS_X, x);
+               input_report_abs(dev, ABS_Y, y);
+       }
+       input_report_abs(dev, ABS_PRESSURE, z);
+
+       input_sync(dev);
+
+       if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS)) {
+               left = packet[3] & 0x10;
+               right = packet[3] & 0x20;
+               middle = packet[3] & 0x40;
+
+               input_report_key(dev2, BTN_LEFT, left);
+               input_report_key(dev2, BTN_RIGHT, right);
+               input_report_key(dev2, BTN_MIDDLE, middle);
+               input_sync(dev2);
+       }
+}
+
+static void alps_process_packet_v3(struct psmouse *psmouse)
+{
+       unsigned char *packet = psmouse->packet;
+
+       /*
+        * v3 protocol packets come in three types, two representing
+        * touchpad data and one representing trackstick data.
+        * Trackstick packets seem to be distinguished by always
+        * having 0x3f in the last byte. This value has never been
+        * observed in the last byte of either of the other types
+        * of packets.
+        */
+       if (packet[5] == 0x3f) {
+               alps_process_trackstick_packet_v3(psmouse);
+               return;
+       }
+
+       alps_process_touchpad_packet_v3(psmouse);
+}
+
+static void alps_process_packet_v4(struct psmouse *psmouse)
+{
+       unsigned char *packet = psmouse->packet;
+       struct input_dev *dev = psmouse->dev;
+       int x, y, z;
+       int left, right;
+
+       left = packet[4] & 0x01;
+       right = packet[4] & 0x02;
+
+       x = ((packet[1] & 0x7f) << 4) | ((packet[3] & 0x30) >> 2) |
+           ((packet[0] & 0x30) >> 4);
+       y = ((packet[2] & 0x7f) << 4) | (packet[3] & 0x0f);
+       z = packet[5] & 0x7f;
+
+       if (z >= 64)
+               input_report_key(dev, BTN_TOUCH, 1);
+       else
+               input_report_key(dev, BTN_TOUCH, 0);
+
+       if (z > 0) {
+               input_report_abs(dev, ABS_X, x);
+               input_report_abs(dev, ABS_Y, y);
+       }
+       input_report_abs(dev, ABS_PRESSURE, z);
+
+       input_report_key(dev, BTN_TOOL_FINGER, z > 0);
+       input_report_key(dev, BTN_LEFT, left);
+       input_report_key(dev, BTN_RIGHT, right);
+
+       input_sync(dev);
+}
+
+static void alps_process_packet(struct psmouse *psmouse)
+{
+       struct alps_data *priv = psmouse->private;
+       const struct alps_model_info *model = priv->i;
+
+       switch (model->proto_version) {
+       case ALPS_PROTO_V1:
+       case ALPS_PROTO_V2:
+               alps_process_packet_v1_v2(psmouse);
+               break;
+       case ALPS_PROTO_V3:
+               alps_process_packet_v3(psmouse);
+               break;
+       case ALPS_PROTO_V4:
+               alps_process_packet_v4(psmouse);
+               break;
+       }
+}
+
 static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
                                        unsigned char packet[],
                                        bool report_buttons)
@@ -344,7 +759,7 @@ static void alps_flush_packet(unsigned long data)
 
        serio_pause_rx(psmouse->ps2dev.serio);
 
-       if (psmouse->pktcnt == 6) {
+       if (psmouse->pktcnt == psmouse->pktsize) {
 
                /*
                 * We did not any more data in reasonable amount of time.
@@ -395,8 +810,8 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
                return PSMOUSE_BAD_DATA;
        }
 
-       /* Bytes 2 - 6 should have 0 in the highest bit */
-       if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= 6 &&
+       /* Bytes 2 - pktsize should have 0 in the highest bit */
+       if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
            (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
                psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
                            psmouse->pktcnt - 1,
@@ -404,7 +819,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
                return PSMOUSE_BAD_DATA;
        }
 
-       if (psmouse->pktcnt == 6) {
+       if (psmouse->pktcnt == psmouse->pktsize) {
                alps_process_packet(psmouse);
                return PSMOUSE_FULL_PACKET;
        }
@@ -412,11 +827,127 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
        return PSMOUSE_GOOD_DATA;
 }
 
+static int alps_command_mode_send_nibble(struct psmouse *psmouse, int nibble)
+{
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+       struct alps_data *priv = psmouse->private;
+       int command;
+       unsigned char *param;
+       unsigned char dummy[4];
+
+       BUG_ON(nibble > 0xf);
+
+       command = priv->nibble_commands[nibble].command;
+       param = (command & 0x0f00) ?
+               dummy : (unsigned char *)&priv->nibble_commands[nibble].data;
+
+       if (ps2_command(ps2dev, param, command))
+               return -1;
+
+       return 0;
+}
+
+static int alps_command_mode_set_addr(struct psmouse *psmouse, int addr)
+{
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+       struct alps_data *priv = psmouse->private;
+       int i, nibble;
+
+       if (ps2_command(ps2dev, NULL, priv->addr_command))
+               return -1;
+
+       for (i = 12; i >= 0; i -= 4) {
+               nibble = (addr >> i) & 0xf;
+               if (alps_command_mode_send_nibble(psmouse, nibble))
+                       return -1;
+       }
+
+       return 0;
+}
+
+static int __alps_command_mode_read_reg(struct psmouse *psmouse, int addr)
+{
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+       unsigned char param[4];
+
+       if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
+               return -1;
+
+       /*
+        * The address being read is returned in the first two bytes
+        * of the result. Check that this address matches the expected
+        * address.
+        */
+       if (addr != ((param[0] << 8) | param[1]))
+               return -1;
+
+       return param[2];
+}
+
+static int alps_command_mode_read_reg(struct psmouse *psmouse, int addr)
+{
+       if (alps_command_mode_set_addr(psmouse, addr))
+               return -1;
+       return __alps_command_mode_read_reg(psmouse, addr);
+}
+
+static int __alps_command_mode_write_reg(struct psmouse *psmouse, u8 value)
+{
+       if (alps_command_mode_send_nibble(psmouse, (value >> 4) & 0xf))
+               return -1;
+       if (alps_command_mode_send_nibble(psmouse, value & 0xf))
+               return -1;
+       return 0;
+}
+
+static int alps_command_mode_write_reg(struct psmouse *psmouse, int addr,
+                                      u8 value)
+{
+       if (alps_command_mode_set_addr(psmouse, addr))
+               return -1;
+       return __alps_command_mode_write_reg(psmouse, value);
+}
+
+static int alps_enter_command_mode(struct psmouse *psmouse,
+                                  unsigned char *resp)
+{
+       unsigned char param[4];
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+       if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
+           ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
+           ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
+           ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
+               psmouse_err(psmouse, "failed to enter command mode\n");
+               return -1;
+       }
+
+       if (param[0] != 0x88 && param[1] != 0x07) {
+               psmouse_dbg(psmouse,
+                           "unknown response while entering command mode: %2.2x %2.2x %2.2x\n",
+                           param[0], param[1], param[2]);
+               return -1;
+       }
+
+       if (resp)
+               *resp = param[2];
+       return 0;
+}
+
+static inline int alps_exit_command_mode(struct psmouse *psmouse)
+{
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+       if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM))
+               return -1;
+       return 0;
+}
+
 static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int *version)
 {
        struct ps2dev *ps2dev = &psmouse->ps2dev;
        static const unsigned char rates[] = { 0, 10, 20, 40, 60, 80, 100, 200 };
        unsigned char param[4];
+       const struct alps_model_info *model = NULL;
        int i;
 
        /*
@@ -464,12 +995,41 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int
                *version = (param[0] << 8) | (param[1] << 4) | i;
        }
 
-       for (i = 0; i < ARRAY_SIZE(alps_model_data); i++)
+       for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
                if (!memcmp(param, alps_model_data[i].signature,
-                           sizeof(alps_model_data[i].signature)))
-                       return alps_model_data + i;
+                           sizeof(alps_model_data[i].signature))) {
+                       model = alps_model_data + i;
+                       break;
+               }
+       }
 
-       return NULL;
+       if (model && model->proto_version > ALPS_PROTO_V2) {
+               /*
+                * Need to check command mode response to identify
+                * model
+                */
+               model = NULL;
+               if (alps_enter_command_mode(psmouse, param)) {
+                       psmouse_warn(psmouse,
+                                    "touchpad failed to enter command mode\n");
+               } else {
+                       for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
+                               if (alps_model_data[i].proto_version > ALPS_PROTO_V2 &&
+                                   alps_model_data[i].command_mode_resp == param[0]) {
+                                       model = alps_model_data + i;
+                                       break;
+                               }
+                       }
+                       alps_exit_command_mode(psmouse);
+
+                       if (!model)
+                               psmouse_dbg(psmouse,
+                                           "Unknown command mode response %2.2x\n",
+                                           param[0]);
+               }
+       }
+
+       return model;
 }
 
 /*
@@ -477,7 +1037,7 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int
  * subsequent commands. It looks like glidepad is behind stickpointer,
  * I'd thought it would be other way around...
  */
-static int alps_passthrough_mode(struct psmouse *psmouse, bool enable)
+static int alps_passthrough_mode_v2(struct psmouse *psmouse, bool enable)
 {
        struct ps2dev *ps2dev = &psmouse->ps2dev;
        int cmd = enable ? PSMOUSE_CMD_SETSCALE21 : PSMOUSE_CMD_SETSCALE11;
@@ -494,7 +1054,7 @@ static int alps_passthrough_mode(struct psmouse *psmouse, bool enable)
        return 0;
 }
 
-static int alps_absolute_mode(struct psmouse *psmouse)
+static int alps_absolute_mode_v1_v2(struct psmouse *psmouse)
 {
        struct ps2dev *ps2dev = &psmouse->ps2dev;
 
@@ -565,17 +1125,17 @@ static int alps_tap_mode(struct psmouse *psmouse, int enable)
 static int alps_poll(struct psmouse *psmouse)
 {
        struct alps_data *priv = psmouse->private;
-       unsigned char buf[6];
+       unsigned char buf[sizeof(psmouse->packet)];
        bool poll_failed;
 
        if (priv->i->flags & ALPS_PASS)
-               alps_passthrough_mode(psmouse, true);
+               alps_passthrough_mode_v2(psmouse, true);
 
        poll_failed = ps2_command(&psmouse->ps2dev, buf,
                                  PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)) < 0;
 
        if (priv->i->flags & ALPS_PASS)
-               alps_passthrough_mode(psmouse, false);
+               alps_passthrough_mode_v2(psmouse, false);
 
        if (poll_failed || (buf[0] & priv->i->mask0) != priv->i->byte0)
                return -1;
@@ -592,13 +1152,13 @@ static int alps_poll(struct psmouse *psmouse)
        return 0;
 }
 
-static int alps_hw_init(struct psmouse *psmouse)
+static int alps_hw_init_v1_v2(struct psmouse *psmouse)
 {
        struct alps_data *priv = psmouse->private;
        const struct alps_model_info *model = priv->i;
 
        if ((model->flags & ALPS_PASS) &&
-           alps_passthrough_mode(psmouse, true)) {
+           alps_passthrough_mode_v2(psmouse, true)) {
                return -1;
        }
 
@@ -607,13 +1167,13 @@ static int alps_hw_init(struct psmouse *psmouse)
                return -1;
        }
 
-       if (alps_absolute_mode(psmouse)) {
+       if (alps_absolute_mode_v1_v2(psmouse)) {
                psmouse_err(psmouse, "Failed to enable absolute mode\n");
                return -1;
        }
 
        if ((model->flags & ALPS_PASS) &&
-           alps_passthrough_mode(psmouse, false)) {
+           alps_passthrough_mode_v2(psmouse, false)) {
                return -1;
        }
 
@@ -626,6 +1186,297 @@ static int alps_hw_init(struct psmouse *psmouse)
        return 0;
 }
 
+/*
+ * Enable or disable passthrough mode to the trackstick. Must be in
+ * command mode when calling this function.
+ */
+static int alps_passthrough_mode_v3(struct psmouse *psmouse, bool enable)
+{
+       int reg_val;
+
+       reg_val = alps_command_mode_read_reg(psmouse, 0x0008);
+       if (reg_val == -1)
+               return -1;
+
+       if (enable)
+               reg_val |= 0x01;
+       else
+               reg_val &= ~0x01;
+
+       if (__alps_command_mode_write_reg(psmouse, reg_val))
+               return -1;
+
+       return 0;
+}
+
+/* Must be in command mode when calling this function */
+static int alps_absolute_mode_v3(struct psmouse *psmouse)
+{
+       int reg_val;
+
+       reg_val = alps_command_mode_read_reg(psmouse, 0x0004);
+       if (reg_val == -1)
+               return -1;
+
+       reg_val |= 0x06;
+       if (__alps_command_mode_write_reg(psmouse, reg_val))
+               return -1;
+
+       return 0;
+}
+
+static int alps_hw_init_v3(struct psmouse *psmouse)
+{
+       struct alps_data *priv = psmouse->private;
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+       int reg_val;
+       unsigned char param[4];
+
+       priv->nibble_commands = alps_v3_nibble_commands;
+       priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
+
+       if (alps_enter_command_mode(psmouse, NULL))
+               goto error;
+
+       /* Check for trackstick */
+       reg_val = alps_command_mode_read_reg(psmouse, 0x0008);
+       if (reg_val == -1)
+               goto error;
+       if (reg_val & 0x80) {
+               if (alps_passthrough_mode_v3(psmouse, true))
+                       goto error;
+               if (alps_exit_command_mode(psmouse))
+                       goto error;
+
+               /*
+                * E7 report for the trackstick
+                *
+                * There have been reports of failures to seem to trace back
+                * to the above trackstick check failing. When these occur
+                * this E7 report fails, so when that happens we continue
+                * with the assumption that there isn't a trackstick after
+                * all.
+                */
+               param[0] = 0x64;
+               if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
+                   ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
+                   ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
+                   ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
+                       psmouse_warn(psmouse, "trackstick E7 report failed\n");
+               } else {
+                       psmouse_dbg(psmouse,
+                                   "trackstick E7 report: %2.2x %2.2x %2.2x\n",
+                                   param[0], param[1], param[2]);
+
+                       /*
+                        * Not sure what this does, but it is absolutely
+                        * essential. Without it, the touchpad does not
+                        * work at all and the trackstick just emits normal
+                        * PS/2 packets.
+                        */
+                       if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+                           ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+                           ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+                           alps_command_mode_send_nibble(psmouse, 0x9) ||
+                           alps_command_mode_send_nibble(psmouse, 0x4)) {
+                               psmouse_err(psmouse,
+                                           "Error sending magic E6 sequence\n");
+                               goto error_passthrough;
+                       }
+               }
+
+               if (alps_enter_command_mode(psmouse, NULL))
+                       goto error_passthrough;
+               if (alps_passthrough_mode_v3(psmouse, false))
+                       goto error;
+       }
+
+       if (alps_absolute_mode_v3(psmouse)) {
+               psmouse_err(psmouse, "Failed to enter absolute mode\n");
+               goto error;
+       }
+
+       reg_val = alps_command_mode_read_reg(psmouse, 0x0006);
+       if (reg_val == -1)
+               goto error;
+       if (__alps_command_mode_write_reg(psmouse, reg_val | 0x01))
+               goto error;
+
+       reg_val = alps_command_mode_read_reg(psmouse, 0x0007);
+       if (reg_val == -1)
+               goto error;
+       if (__alps_command_mode_write_reg(psmouse, reg_val | 0x01))
+               goto error;
+
+       if (alps_command_mode_read_reg(psmouse, 0x0144) == -1)
+               goto error;
+       if (__alps_command_mode_write_reg(psmouse, 0x04))
+               goto error;
+
+       if (alps_command_mode_read_reg(psmouse, 0x0159) == -1)
+               goto error;
+       if (__alps_command_mode_write_reg(psmouse, 0x03))
+               goto error;
+
+       if (alps_command_mode_read_reg(psmouse, 0x0163) == -1)
+               goto error;
+       if (alps_command_mode_write_reg(psmouse, 0x0163, 0x03))
+               goto error;
+
+       if (alps_command_mode_read_reg(psmouse, 0x0162) == -1)
+               goto error;
+       if (alps_command_mode_write_reg(psmouse, 0x0162, 0x04))
+               goto error;
+
+       /*
+        * This ensures the trackstick packets are in the format
+        * supported by this driver. If bit 1 isn't set the packet
+        * format is different.
+        */
+       if (alps_command_mode_write_reg(psmouse, 0x0008, 0x82))
+               goto error;
+
+       alps_exit_command_mode(psmouse);
+
+       /* Set rate and enable data reporting */
+       param[0] = 0x64;
+       if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE) ||
+           ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE)) {
+               psmouse_err(psmouse, "Failed to enable data reporting\n");
+               return -1;
+       }
+
+       return 0;
+
+error_passthrough:
+       /* Something failed while in passthrough mode, so try to get out */
+       if (!alps_enter_command_mode(psmouse, NULL))
+               alps_passthrough_mode_v3(psmouse, false);
+error:
+       /*
+        * Leaving the touchpad in command mode will essentially render
+        * it unusable until the machine reboots, so exit it here just
+        * to be safe
+        */
+       alps_exit_command_mode(psmouse);
+       return -1;
+}
+
+/* Must be in command mode when calling this function */
+static int alps_absolute_mode_v4(struct psmouse *psmouse)
+{
+       int reg_val;
+
+       reg_val = alps_command_mode_read_reg(psmouse, 0x0004);
+       if (reg_val == -1)
+               return -1;
+
+       reg_val |= 0x02;
+       if (__alps_command_mode_write_reg(psmouse, reg_val))
+               return -1;
+
+       return 0;
+}
+
+static int alps_hw_init_v4(struct psmouse *psmouse)
+{
+       struct alps_data *priv = psmouse->private;
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+       unsigned char param[4];
+
+       priv->nibble_commands = alps_v4_nibble_commands;
+       priv->addr_command = PSMOUSE_CMD_DISABLE;
+
+       if (alps_enter_command_mode(psmouse, NULL))
+               goto error;
+
+       if (alps_absolute_mode_v4(psmouse)) {
+               psmouse_err(psmouse, "Failed to enter absolute mode\n");
+               goto error;
+       }
+
+       if (alps_command_mode_write_reg(psmouse, 0x0007, 0x8c))
+               goto error;
+
+       if (alps_command_mode_write_reg(psmouse, 0x0149, 0x03))
+               goto error;
+
+       if (alps_command_mode_write_reg(psmouse, 0x0160, 0x03))
+               goto error;
+
+       if (alps_command_mode_write_reg(psmouse, 0x017f, 0x15))
+               goto error;
+
+       if (alps_command_mode_write_reg(psmouse, 0x0151, 0x01))
+               goto error;
+
+       if (alps_command_mode_write_reg(psmouse, 0x0168, 0x03))
+               goto error;
+
+       if (alps_command_mode_write_reg(psmouse, 0x014a, 0x03))
+               goto error;
+
+       if (alps_command_mode_write_reg(psmouse, 0x0161, 0x03))
+               goto error;
+
+       alps_exit_command_mode(psmouse);
+
+       /*
+        * This sequence changes the output from a 9-byte to an
+        * 8-byte format. All the same data seems to be present,
+        * just in a more compact format.
+        */
+       param[0] = 0xc8;
+       param[1] = 0x64;
+       param[2] = 0x50;
+       if (ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE) ||
+           ps2_command(ps2dev, &param[1], PSMOUSE_CMD_SETRATE) ||
+           ps2_command(ps2dev, &param[2], PSMOUSE_CMD_SETRATE) ||
+           ps2_command(ps2dev, param, PSMOUSE_CMD_GETID))
+               return -1;
+
+       /* Set rate and enable data reporting */
+       param[0] = 0x64;
+       if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE) ||
+           ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE)) {
+               psmouse_err(psmouse, "Failed to enable data reporting\n");
+               return -1;
+       }
+
+       return 0;
+
+error:
+       /*
+        * Leaving the touchpad in command mode will essentially render
+        * it unusable until the machine reboots, so exit it here just
+        * to be safe
+        */
+       alps_exit_command_mode(psmouse);
+       return -1;
+}
+
+static int alps_hw_init(struct psmouse *psmouse)
+{
+       struct alps_data *priv = psmouse->private;
+       const struct alps_model_info *model = priv->i;
+       int ret = -1;
+
+       switch (model->proto_version) {
+       case ALPS_PROTO_V1:
+       case ALPS_PROTO_V2:
+               ret = alps_hw_init_v1_v2(psmouse);
+               break;
+       case ALPS_PROTO_V3:
+               ret = alps_hw_init_v3(psmouse);
+               break;
+       case ALPS_PROTO_V4:
+               ret = alps_hw_init_v4(psmouse);
+               break;
+       }
+
+       return ret;
+}
+
 static int alps_reconnect(struct psmouse *psmouse)
 {
        const struct alps_model_info *model;
@@ -666,6 +1517,8 @@ int alps_init(struct psmouse *psmouse)
 
        psmouse->private = priv;
 
+       psmouse_reset(psmouse);
+
        model = alps_get_model(psmouse, &version);
        if (!model)
                goto init_fail;
@@ -693,8 +1546,29 @@ int alps_init(struct psmouse *psmouse)
                BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT);
 
        dev1->evbit[BIT_WORD(EV_ABS)] |= BIT_MASK(EV_ABS);
-       input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
-       input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
+
+       switch (model->proto_version) {
+       case ALPS_PROTO_V1:
+       case ALPS_PROTO_V2:
+               input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
+               input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
+               break;
+       case ALPS_PROTO_V3:
+               set_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
+               input_mt_init_slots(dev1, 2);
+               input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, ALPS_V3_X_MAX, 0, 0);
+               input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, ALPS_V3_Y_MAX, 0, 0);
+
+               set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit);
+               set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit);
+               set_bit(BTN_TOOL_QUADTAP, dev1->keybit);
+               /* fall through */
+       case ALPS_PROTO_V4:
+               input_set_abs_params(dev1, ABS_X, 0, ALPS_V3_X_MAX, 0, 0);
+               input_set_abs_params(dev1, ABS_Y, 0, ALPS_V3_Y_MAX, 0, 0);
+               break;
+       }
+
        input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0);
 
        if (model->flags & ALPS_WHEEL) {
@@ -737,7 +1611,7 @@ int alps_init(struct psmouse *psmouse)
        psmouse->poll = alps_poll;
        psmouse->disconnect = alps_disconnect;
        psmouse->reconnect = alps_reconnect;
-       psmouse->pktsize = 6;
+       psmouse->pktsize = model->proto_version == ALPS_PROTO_V4 ? 8 : 6;
 
        /* We are having trouble resyncing ALPS touchpads so disable it for now */
        psmouse->resync_time = 0;
index 904ed8b3c8becf2de0dd41518658ee8888b452f1..a00a4ab92a0f7ca2050fa5a3d8b5c85fb142c233 100644 (file)
 #ifndef _ALPS_H
 #define _ALPS_H
 
+#define ALPS_PROTO_V1  0
+#define ALPS_PROTO_V2  1
+#define ALPS_PROTO_V3  2
+#define ALPS_PROTO_V4  3
+
 struct alps_model_info {
         unsigned char signature[3];
+       unsigned char command_mode_resp; /* v3/v4 only */
+       unsigned char proto_version;
         unsigned char byte0, mask0;
         unsigned char flags;
 };
 
+struct alps_nibble_commands {
+       int command;
+       unsigned char data;
+};
+
 struct alps_data {
        struct input_dev *dev2;         /* Relative device */
        char phys[32];                  /* Phys */
        const struct alps_model_info *i;/* Info */
+       const struct alps_nibble_commands *nibble_commands;
+       int addr_command;               /* Command to set register address */
        int prev_fin;                   /* Finger bit from previous packet */
+       int multi_packet;               /* Multi-packet data in progress */
+       unsigned char multi_data[6];    /* Saved multi-packet data */
+       u8 quirks;
        struct timer_list timer;
 };
 
+#define ALPS_QUIRK_TRACKSTICK_BUTTONS  1 /* trakcstick buttons in trackstick packet */
+
 #ifdef CONFIG_MOUSE_PS2_ALPS
 int alps_detect(struct psmouse *psmouse, bool set_properties);
 int alps_init(struct psmouse *psmouse);
index ff5f61a0fd3ad1eb071e600e5e37fb15fb4ca61a..39be7b82c046ac9b9129678631077aa5cf5e37e8 100644 (file)
@@ -140,25 +140,13 @@ static int __exit amimouse_remove(struct platform_device *pdev)
 }
 
 static struct platform_driver amimouse_driver = {
+       .probe = amimouse_probe,
        .remove = __exit_p(amimouse_remove),
        .driver   = {
                .name   = "amiga-mouse",
                .owner  = THIS_MODULE,
        },
 };
-
-static int __init amimouse_init(void)
-{
-       return platform_driver_probe(&amimouse_driver, amimouse_probe);
-}
-
-module_init(amimouse_init);
-
-static void __exit amimouse_exit(void)
-{
-       platform_driver_unregister(&amimouse_driver);
-}
-
-module_exit(amimouse_exit);
+module_platform_driver(amimouse_driver);
 
 MODULE_ALIAS("platform:amiga-mouse");
index e2a9867c19d52fce53cac578bdaf265d3211bba1..d2c0db159b18dfc364c2ff7fc036863ef2de9614 100644 (file)
@@ -42,6 +42,24 @@ static int synaptics_send_cmd(struct psmouse *psmouse, unsigned char c,
        return 0;
 }
 
+/*
+ * V3 and later support this fast command
+ */
+static int elantech_send_cmd(struct psmouse *psmouse, unsigned char c,
+                               unsigned char *param)
+{
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+       if (ps2_command(ps2dev, NULL, ETP_PS2_CUSTOM_COMMAND) ||
+           ps2_command(ps2dev, NULL, c) ||
+           ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
+               psmouse_err(psmouse, "%s query 0x%02x failed.\n", __func__, c);
+               return -1;
+       }
+
+       return 0;
+}
+
 /*
  * A retrying version of ps2_command
  */
@@ -863,13 +881,13 @@ static int elantech_set_range(struct psmouse *psmouse,
                        i = (etd->fw_version > 0x020800 &&
                             etd->fw_version < 0x020900) ? 1 : 2;
 
-                       if (synaptics_send_cmd(psmouse, ETP_FW_ID_QUERY, param))
+                       if (etd->send_cmd(psmouse, ETP_FW_ID_QUERY, param))
                                return -1;
 
                        fixed_dpi = param[1] & 0x10;
 
                        if (((etd->fw_version >> 16) == 0x14) && fixed_dpi) {
-                               if (synaptics_send_cmd(psmouse, ETP_SAMPLE_QUERY, param))
+                               if (etd->send_cmd(psmouse, ETP_SAMPLE_QUERY, param))
                                        return -1;
 
                                *x_max = (etd->capabilities[1] - i) * param[1] / 2;
@@ -888,7 +906,7 @@ static int elantech_set_range(struct psmouse *psmouse,
                break;
 
        case 3:
-               if (synaptics_send_cmd(psmouse, ETP_FW_ID_QUERY, param))
+               if (etd->send_cmd(psmouse, ETP_FW_ID_QUERY, param))
                        return -1;
 
                *x_max = (0x0f & param[0]) << 8 | param[1];
@@ -896,7 +914,7 @@ static int elantech_set_range(struct psmouse *psmouse,
                break;
 
        case 4:
-               if (synaptics_send_cmd(psmouse, ETP_FW_ID_QUERY, param))
+               if (etd->send_cmd(psmouse, ETP_FW_ID_QUERY, param))
                        return -1;
 
                *x_max = (0x0f & param[0]) << 8 | param[1];
@@ -912,6 +930,30 @@ static int elantech_set_range(struct psmouse *psmouse,
        return 0;
 }
 
+/*
+ * (value from firmware) * 10 + 790 = dpi
+ * we also have to convert dpi to dots/mm (*10/254 to avoid floating point)
+ */
+static unsigned int elantech_convert_res(unsigned int val)
+{
+       return (val * 10 + 790) * 10 / 254;
+}
+
+static int elantech_get_resolution_v4(struct psmouse *psmouse,
+                                     unsigned int *x_res,
+                                     unsigned int *y_res)
+{
+       unsigned char param[3];
+
+       if (elantech_send_cmd(psmouse, ETP_RESOLUTION_QUERY, param))
+               return -1;
+
+       *x_res = elantech_convert_res(param[1] & 0x0f);
+       *y_res = elantech_convert_res((param[1] & 0xf0) >> 4);
+
+       return 0;
+}
+
 /*
  * Set the appropriate event bits for the input subsystem
  */
@@ -920,6 +962,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
        struct input_dev *dev = psmouse->dev;
        struct elantech_data *etd = psmouse->private;
        unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0;
+       unsigned int x_res = 0, y_res = 0;
 
        if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
                return -1;
@@ -967,10 +1010,20 @@ static int elantech_set_input_params(struct psmouse *psmouse)
                break;
 
        case 4:
+               if (elantech_get_resolution_v4(psmouse, &x_res, &y_res)) {
+                       /*
+                        * if query failed, print a warning and leave the values
+                        * zero to resemble synaptics.c behavior.
+                        */
+                       psmouse_warn(psmouse, "couldn't query resolution data.\n");
+               }
+
                __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
                /* For X to recognize me as touchpad. */
                input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
                input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
+               input_abs_set_res(dev, ABS_X, x_res);
+               input_abs_set_res(dev, ABS_Y, y_res);
                /*
                 * range of pressure and width is the same as v2,
                 * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility.
@@ -983,6 +1036,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
                input_mt_init_slots(dev, ETP_MAX_FINGERS);
                input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
                input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
+               input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
+               input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
                input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2,
                                     ETP_PMAX_V2, 0, 0);
                /*
@@ -1031,16 +1086,13 @@ static ssize_t elantech_set_int_attr(struct psmouse *psmouse,
        struct elantech_data *etd = psmouse->private;
        struct elantech_attr_data *attr = data;
        unsigned char *reg = (unsigned char *) etd + attr->field_offset;
-       unsigned long value;
+       unsigned char value;
        int err;
 
-       err = strict_strtoul(buf, 16, &value);
+       err = kstrtou8(buf, 16, &value);
        if (err)
                return err;
 
-       if (value > 0xff)
-               return -EINVAL;
-
        /* Do we need to preserve some bits for version 2 hardware too? */
        if (etd->hw_version == 1) {
                if (attr->reg == 0x10)
@@ -1233,9 +1285,11 @@ static int elantech_set_properties(struct elantech_data *etd)
                }
        }
 
-       /*
-        * Turn on packet checking by default.
-        */
+       /* decide which send_cmd we're gonna use early */
+       etd->send_cmd = etd->hw_version >= 3 ? elantech_send_cmd :
+                                              synaptics_send_cmd;
+
+       /* Turn on packet checking by default */
        etd->paritycheck = 1;
 
        /*
@@ -1291,7 +1345,7 @@ int elantech_init(struct psmouse *psmouse)
                     "assuming hardware version %d (with firmware version 0x%02x%02x%02x)\n",
                     etd->hw_version, param[0], param[1], param[2]);
 
-       if (synaptics_send_cmd(psmouse, ETP_CAPABILITIES_QUERY,
+       if (etd->send_cmd(psmouse, ETP_CAPABILITIES_QUERY,
            etd->capabilities)) {
                psmouse_err(psmouse, "failed to query capabilities.\n");
                goto init_fail;
index 9e5f1aabea7e92c2abc48fe7de380c17279873be..46db3be45ac988710a821450bd37f5127bfc90c3 100644 (file)
@@ -20,6 +20,7 @@
 #define ETP_FW_VERSION_QUERY           0x01
 #define ETP_CAPABILITIES_QUERY         0x02
 #define ETP_SAMPLE_QUERY               0x03
+#define ETP_RESOLUTION_QUERY           0x04
 
 /*
  * Command values for register reading or writing
@@ -135,6 +136,7 @@ struct elantech_data {
        unsigned int width;
        struct finger_pos mt[ETP_MAX_FINGERS];
        unsigned char parity[256];
+       int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
 };
 
 #ifdef CONFIG_MOUSE_PS2_ELANTECH
index 58902fbb98967a7b456fa71d0155ee65b51003bd..a9ad8e1402be217786574e88146d6e5d652a1e8e 100644 (file)
@@ -178,18 +178,7 @@ static struct platform_driver gpio_mouse_device_driver = {
                .owner  = THIS_MODULE,
        }
 };
-
-static int __init gpio_mouse_init(void)
-{
-       return platform_driver_register(&gpio_mouse_device_driver);
-}
-module_init(gpio_mouse_init);
-
-static void __exit gpio_mouse_exit(void)
-{
-       platform_driver_unregister(&gpio_mouse_device_driver);
-}
-module_exit(gpio_mouse_exit);
+module_platform_driver(gpio_mouse_device_driver);
 
 MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
 MODULE_DESCRIPTION("GPIO mouse driver");
index 0470dd46b566c99faa8c8aaa94f2795ad25e58dc..1c5d521de600b23c0d44008f057f77ad40e3bc44 100644 (file)
@@ -789,11 +789,14 @@ static ssize_t hgpk_set_powered(struct psmouse *psmouse, void *data,
                                const char *buf, size_t count)
 {
        struct hgpk_data *priv = psmouse->private;
-       unsigned long value;
+       unsigned int value;
        int err;
 
-       err = strict_strtoul(buf, 10, &value);
-       if (err || value > 1)
+       err = kstrtouint(buf, 10, &value);
+       if (err)
+               return err;
+
+       if (value > 1)
                return -EINVAL;
 
        if (value != priv->powered) {
@@ -881,11 +884,14 @@ static ssize_t hgpk_trigger_recal(struct psmouse *psmouse, void *data,
                                const char *buf, size_t count)
 {
        struct hgpk_data *priv = psmouse->private;
-       unsigned long value;
+       unsigned int value;
        int err;
 
-       err = strict_strtoul(buf, 10, &value);
-       if (err || value != 1)
+       err = kstrtouint(buf, 10, &value);
+       if (err)
+               return err;
+
+       if (value != 1)
                return -EINVAL;
 
        /*
index faac2c3bef740e739a59e65c3633b5bf9d262399..84de2fc6acc1c4a163a2b5fed4c45a34572fe4cb 100644 (file)
@@ -155,9 +155,14 @@ static ssize_t ps2pp_attr_show_smartscroll(struct psmouse *psmouse,
 static ssize_t ps2pp_attr_set_smartscroll(struct psmouse *psmouse, void *data,
                                          const char *buf, size_t count)
 {
-       unsigned long value;
+       unsigned int value;
+       int err;
 
-       if (strict_strtoul(buf, 10, &value) || value > 1)
+       err = kstrtouint(buf, 10, &value);
+       if (err)
+               return err;
+
+       if (value > 1)
                return -EINVAL;
 
        ps2pp_set_smartscroll(psmouse, value);
index 9f352fbd7b4f8342478b90c5b52c7cfd44be83af..de7e8bc17b1f053906bfea7811bd791ed38dcd96 100644 (file)
@@ -127,7 +127,7 @@ struct psmouse_protocol {
  * relevant events to the input module once full packet has arrived.
  */
 
-static psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
+psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
 {
        struct input_dev *dev = psmouse->dev;
        unsigned char *packet = psmouse->packet;
@@ -418,6 +418,49 @@ int psmouse_reset(struct psmouse *psmouse)
        return 0;
 }
 
+/*
+ * Here we set the mouse resolution.
+ */
+
+void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution)
+{
+       static const unsigned char params[] = { 0, 1, 2, 2, 3 };
+       unsigned char p;
+
+       if (resolution == 0 || resolution > 200)
+               resolution = 200;
+
+       p = params[resolution / 50];
+       ps2_command(&psmouse->ps2dev, &p, PSMOUSE_CMD_SETRES);
+       psmouse->resolution = 25 << p;
+}
+
+/*
+ * Here we set the mouse report rate.
+ */
+
+static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate)
+{
+       static const unsigned char rates[] = { 200, 100, 80, 60, 40, 20, 10, 0 };
+       unsigned char r;
+       int i = 0;
+
+       while (rates[i] > rate) i++;
+       r = rates[i];
+       ps2_command(&psmouse->ps2dev, &r, PSMOUSE_CMD_SETRATE);
+       psmouse->rate = r;
+}
+
+/*
+ * psmouse_poll() - default poll handler. Everyone except for ALPS uses it.
+ */
+
+static int psmouse_poll(struct psmouse *psmouse)
+{
+       return ps2_command(&psmouse->ps2dev, psmouse->packet,
+                          PSMOUSE_CMD_POLL | (psmouse->pktsize << 8));
+}
+
 
 /*
  * Genius NetMouse magic init.
@@ -602,6 +645,56 @@ static int cortron_detect(struct psmouse *psmouse, bool set_properties)
        return 0;
 }
 
+/*
+ * Apply default settings to the psmouse structure. Most of them will
+ * be overridden by individual protocol initialization routines.
+ */
+
+static void psmouse_apply_defaults(struct psmouse *psmouse)
+{
+       struct input_dev *input_dev = psmouse->dev;
+
+       memset(input_dev->evbit, 0, sizeof(input_dev->evbit));
+       memset(input_dev->keybit, 0, sizeof(input_dev->keybit));
+       memset(input_dev->relbit, 0, sizeof(input_dev->relbit));
+       memset(input_dev->absbit, 0, sizeof(input_dev->absbit));
+       memset(input_dev->mscbit, 0, sizeof(input_dev->mscbit));
+
+       __set_bit(EV_KEY, input_dev->evbit);
+       __set_bit(EV_REL, input_dev->evbit);
+
+       __set_bit(BTN_LEFT, input_dev->keybit);
+       __set_bit(BTN_RIGHT, input_dev->keybit);
+
+       __set_bit(REL_X, input_dev->relbit);
+       __set_bit(REL_Y, input_dev->relbit);
+
+       psmouse->set_rate = psmouse_set_rate;
+       psmouse->set_resolution = psmouse_set_resolution;
+       psmouse->poll = psmouse_poll;
+       psmouse->protocol_handler = psmouse_process_byte;
+       psmouse->pktsize = 3;
+       psmouse->reconnect = NULL;
+       psmouse->disconnect = NULL;
+       psmouse->cleanup = NULL;
+       psmouse->pt_activate = NULL;
+       psmouse->pt_deactivate = NULL;
+}
+
+/*
+ * Apply default settings to the psmouse structure and call specified
+ * protocol detection or initialization routine.
+ */
+static int psmouse_do_detect(int (*detect)(struct psmouse *psmouse,
+                                          bool set_properties),
+                            struct psmouse *psmouse, bool set_properties)
+{
+       if (set_properties)
+               psmouse_apply_defaults(psmouse);
+
+       return detect(psmouse, set_properties);
+}
+
 /*
  * psmouse_extensions() probes for any extensions to the basic PS/2 protocol
  * the mouse may have.
@@ -616,7 +709,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
  * We always check for lifebook because it does not disturb mouse
  * (it only checks DMI information).
  */
-       if (lifebook_detect(psmouse, set_properties) == 0) {
+       if (psmouse_do_detect(lifebook_detect, psmouse, set_properties) == 0) {
                if (max_proto > PSMOUSE_IMEX) {
                        if (!set_properties || lifebook_init(psmouse) == 0)
                                return PSMOUSE_LIFEBOOK;
@@ -628,15 +721,18 @@ static int psmouse_extensions(struct psmouse *psmouse,
  * upsets the thinkingmouse).
  */
 
-       if (max_proto > PSMOUSE_IMEX && thinking_detect(psmouse, set_properties) == 0)
+       if (max_proto > PSMOUSE_IMEX &&
+           psmouse_do_detect(thinking_detect, psmouse, set_properties) == 0) {
                return PSMOUSE_THINKPS;
+       }
 
 /*
  * Try Synaptics TouchPad. Note that probing is done even if Synaptics protocol
  * support is disabled in config - we need to know if it is synaptics so we
  * can reset it properly after probing for intellimouse.
  */
-       if (max_proto > PSMOUSE_PS2 && synaptics_detect(psmouse, set_properties) == 0) {
+       if (max_proto > PSMOUSE_PS2 &&
+           psmouse_do_detect(synaptics_detect, psmouse, set_properties) == 0) {
                synaptics_hardware = true;
 
                if (max_proto > PSMOUSE_IMEX) {
@@ -667,7 +763,8 @@ static int psmouse_extensions(struct psmouse *psmouse,
  */
        if (max_proto > PSMOUSE_IMEX) {
                ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
-               if (alps_detect(psmouse, set_properties) == 0) {
+               if (psmouse_do_detect(alps_detect,
+                                     psmouse, set_properties) == 0) {
                        if (!set_properties || alps_init(psmouse) == 0)
                                return PSMOUSE_ALPS;
 /*
@@ -681,7 +778,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
  * Try OLPC HGPK touchpad.
  */
        if (max_proto > PSMOUSE_IMEX &&
-                       hgpk_detect(psmouse, set_properties) == 0) {
+           psmouse_do_detect(hgpk_detect, psmouse, set_properties) == 0) {
                if (!set_properties || hgpk_init(psmouse) == 0)
                        return PSMOUSE_HGPK;
 /*
@@ -694,7 +791,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
  * Try Elantech touchpad.
  */
        if (max_proto > PSMOUSE_IMEX &&
-                       elantech_detect(psmouse, set_properties) == 0) {
+           psmouse_do_detect(elantech_detect, psmouse, set_properties) == 0) {
                if (!set_properties || elantech_init(psmouse) == 0)
                        return PSMOUSE_ELANTECH;
 /*
@@ -703,18 +800,21 @@ static int psmouse_extensions(struct psmouse *psmouse,
                max_proto = PSMOUSE_IMEX;
        }
 
-
        if (max_proto > PSMOUSE_IMEX) {
-               if (genius_detect(psmouse, set_properties) == 0)
+               if (psmouse_do_detect(genius_detect,
+                                     psmouse, set_properties) == 0)
                        return PSMOUSE_GENPS;
 
-               if (ps2pp_init(psmouse, set_properties) == 0)
+               if (psmouse_do_detect(ps2pp_init,
+                                     psmouse, set_properties) == 0)
                        return PSMOUSE_PS2PP;
 
-               if (trackpoint_detect(psmouse, set_properties) == 0)
+               if (psmouse_do_detect(trackpoint_detect,
+                                     psmouse, set_properties) == 0)
                        return PSMOUSE_TRACKPOINT;
 
-               if (touchkit_ps2_detect(psmouse, set_properties) == 0)
+               if (psmouse_do_detect(touchkit_ps2_detect,
+                                     psmouse, set_properties) == 0)
                        return PSMOUSE_TOUCHKIT_PS2;
        }
 
@@ -723,7 +823,8 @@ static int psmouse_extensions(struct psmouse *psmouse,
  * Trackpoint devices (causing TP_READ_ID command to time out).
  */
        if (max_proto > PSMOUSE_IMEX) {
-               if (fsp_detect(psmouse, set_properties) == 0) {
+               if (psmouse_do_detect(fsp_detect,
+                                     psmouse, set_properties) == 0) {
                        if (!set_properties || fsp_init(psmouse) == 0)
                                return PSMOUSE_FSP;
 /*
@@ -741,17 +842,23 @@ static int psmouse_extensions(struct psmouse *psmouse,
        ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
        psmouse_reset(psmouse);
 
-       if (max_proto >= PSMOUSE_IMEX && im_explorer_detect(psmouse, set_properties) == 0)
+       if (max_proto >= PSMOUSE_IMEX &&
+           psmouse_do_detect(im_explorer_detect,
+                             psmouse, set_properties) == 0) {
                return PSMOUSE_IMEX;
+       }
 
-       if (max_proto >= PSMOUSE_IMPS && intellimouse_detect(psmouse, set_properties) == 0)
+       if (max_proto >= PSMOUSE_IMPS &&
+           psmouse_do_detect(intellimouse_detect,
+                             psmouse, set_properties) == 0) {
                return PSMOUSE_IMPS;
+       }
 
 /*
  * Okay, all failed, we have a standard mouse here. The number of the buttons
  * is still a question, though. We assume 3.
  */
-       ps2bare_detect(psmouse, set_properties);
+       psmouse_do_detect(ps2bare_detect, psmouse, set_properties);
 
        if (synaptics_hardware) {
 /*
@@ -819,6 +926,13 @@ static const struct psmouse_protocol psmouse_protocols[] = {
                .detect         = synaptics_detect,
                .init           = synaptics_init,
        },
+       {
+               .type           = PSMOUSE_SYNAPTICS_RELATIVE,
+               .name           = "SynRelPS/2",
+               .alias          = "synaptics-relative",
+               .detect         = synaptics_detect,
+               .init           = synaptics_init_relative,
+       },
 #endif
 #ifdef CONFIG_MOUSE_PS2_ALPS
        {
@@ -957,39 +1071,6 @@ static int psmouse_probe(struct psmouse *psmouse)
        return 0;
 }
 
-/*
- * Here we set the mouse resolution.
- */
-
-void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution)
-{
-       static const unsigned char params[] = { 0, 1, 2, 2, 3 };
-       unsigned char p;
-
-       if (resolution == 0 || resolution > 200)
-               resolution = 200;
-
-       p = params[resolution / 50];
-       ps2_command(&psmouse->ps2dev, &p, PSMOUSE_CMD_SETRES);
-       psmouse->resolution = 25 << p;
-}
-
-/*
- * Here we set the mouse report rate.
- */
-
-static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate)
-{
-       static const unsigned char rates[] = { 200, 100, 80, 60, 40, 20, 10, 0 };
-       unsigned char r;
-       int i = 0;
-
-       while (rates[i] > rate) i++;
-       r = rates[i];
-       ps2_command(&psmouse->ps2dev, &r, PSMOUSE_CMD_SETRATE);
-       psmouse->rate = r;
-}
-
 /*
  * psmouse_initialize() initializes the mouse to a sane state.
  */
@@ -1035,16 +1116,6 @@ static void psmouse_deactivate(struct psmouse *psmouse)
        psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
 }
 
-/*
- * psmouse_poll() - default poll handler. Everyone except for ALPS uses it.
- */
-
-static int psmouse_poll(struct psmouse *psmouse)
-{
-       return ps2_command(&psmouse->ps2dev, psmouse->packet,
-                          PSMOUSE_CMD_POLL | (psmouse->pktsize << 8));
-}
-
 
 /*
  * psmouse_resync() attempts to re-validate current protocol.
@@ -1245,18 +1316,9 @@ static int psmouse_switch_protocol(struct psmouse *psmouse,
 
        input_dev->dev.parent = &psmouse->ps2dev.serio->dev;
 
-       input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
-       input_dev->keybit[BIT_WORD(BTN_MOUSE)] =
-                               BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT);
-       input_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
-
-       psmouse->set_rate = psmouse_set_rate;
-       psmouse->set_resolution = psmouse_set_resolution;
-       psmouse->poll = psmouse_poll;
-       psmouse->protocol_handler = psmouse_process_byte;
-       psmouse->pktsize = 3;
-
        if (proto && (proto->detect || proto->init)) {
+               psmouse_apply_defaults(psmouse);
+
                if (proto->detect && proto->detect(psmouse, true) < 0)
                        return -1;
 
@@ -1558,13 +1620,12 @@ static ssize_t psmouse_show_int_attr(struct psmouse *psmouse, void *offset, char
 static ssize_t psmouse_set_int_attr(struct psmouse *psmouse, void *offset, const char *buf, size_t count)
 {
        unsigned int *field = (unsigned int *)((char *)psmouse + (size_t)offset);
-       unsigned long value;
-
-       if (strict_strtoul(buf, 10, &value))
-               return -EINVAL;
+       unsigned int value;
+       int err;
 
-       if ((unsigned int)value != value)
-               return -EINVAL;
+       err = kstrtouint(buf, 10, &value);
+       if (err)
+               return err;
 
        *field = value;
 
@@ -1671,10 +1732,12 @@ static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, void *data, co
 
 static ssize_t psmouse_attr_set_rate(struct psmouse *psmouse, void *data, const char *buf, size_t count)
 {
-       unsigned long value;
+       unsigned int value;
+       int err;
 
-       if (strict_strtoul(buf, 10, &value))
-               return -EINVAL;
+       err = kstrtouint(buf, 10, &value);
+       if (err)
+               return err;
 
        psmouse->set_rate(psmouse, value);
        return count;
@@ -1682,10 +1745,12 @@ static ssize_t psmouse_attr_set_rate(struct psmouse *psmouse, void *data, const
 
 static ssize_t psmouse_attr_set_resolution(struct psmouse *psmouse, void *data, const char *buf, size_t count)
 {
-       unsigned long value;
+       unsigned int value;
+       int err;
 
-       if (strict_strtoul(buf, 10, &value))
-               return -EINVAL;
+       err = kstrtouint(buf, 10, &value);
+       if (err)
+               return err;
 
        psmouse->set_resolution(psmouse, value);
        return count;
index 9b84b0c4e37189fcd9db65599343cfe4f0f0a806..6a417092d010f7afd62f0a95565ad008fb57f6b6 100644 (file)
@@ -8,6 +8,7 @@
 #define PSMOUSE_CMD_SETSTREAM  0x00ea
 #define PSMOUSE_CMD_SETPOLL    0x00f0
 #define PSMOUSE_CMD_POLL       0x00eb  /* caller sets number of bytes to receive */
+#define PSMOUSE_CMD_RESET_WRAP 0x00ec
 #define PSMOUSE_CMD_GETID      0x02f2
 #define PSMOUSE_CMD_SETRATE    0x10f3
 #define PSMOUSE_CMD_ENABLE     0x00f4
@@ -93,6 +94,7 @@ enum psmouse_type {
        PSMOUSE_HGPK,
        PSMOUSE_ELANTECH,
        PSMOUSE_FSP,
+       PSMOUSE_SYNAPTICS_RELATIVE,
        PSMOUSE_AUTO            /* This one should always be last */
 };
 
@@ -102,6 +104,7 @@ int psmouse_sliced_command(struct psmouse *psmouse, unsigned char command);
 int psmouse_reset(struct psmouse *psmouse);
 void psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state);
 void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution);
+psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse);
 
 struct psmouse_attribute {
        struct device_attribute dattr;
index ee3b0ca9d592b05286ff44dd2b90fadb8b07b193..a9e4bfdf31f4e5906a5b3d5b5a975d315b4f25bc 100644 (file)
@@ -250,19 +250,7 @@ static struct platform_driver pxa930_trkball_driver = {
        .probe          = pxa930_trkball_probe,
        .remove         = __devexit_p(pxa930_trkball_remove),
 };
-
-static int __init pxa930_trkball_init(void)
-{
-       return platform_driver_register(&pxa930_trkball_driver);
-}
-
-static void __exit pxa930_trkball_exit(void)
-{
-       platform_driver_unregister(&pxa930_trkball_driver);
-}
-
-module_init(pxa930_trkball_init);
-module_exit(pxa930_trkball_exit);
+module_platform_driver(pxa930_trkball_driver);
 
 MODULE_AUTHOR("Yong Yao <yaoyong@marvell.com>");
 MODULE_DESCRIPTION("PXA930 Trackball Mouse Driver");
index 86d6f39178b0d556364df951ca33575598bfa416..e36847de7617cfdd7cc75af410afad11c46e56f8 100644 (file)
@@ -408,7 +408,7 @@ static int fsp_onpad_hscr(struct psmouse *psmouse, bool enable)
 static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
                                   const char *buf, size_t count)
 {
-       unsigned long reg, val;
+       int reg, val;
        char *rest;
        ssize_t retval;
 
@@ -416,7 +416,11 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
        if (rest == buf || *rest != ' ' || reg > 0xff)
                return -EINVAL;
 
-       if (strict_strtoul(rest + 1, 16, &val) || val > 0xff)
+       retval = kstrtoint(rest + 1, 16, &val);
+       if (retval)
+               return retval;
+
+       if (val > 0xff)
                return -EINVAL;
 
        if (fsp_reg_write_enable(psmouse, true))
@@ -448,10 +452,13 @@ static ssize_t fsp_attr_set_getreg(struct psmouse *psmouse, void *data,
                                        const char *buf, size_t count)
 {
        struct fsp_data *pad = psmouse->private;
-       unsigned long reg;
-       int val;
+       int reg, val, err;
+
+       err = kstrtoint(buf, 16, &reg);
+       if (err)
+               return err;
 
-       if (strict_strtoul(buf, 16, &reg) || reg > 0xff)
+       if (reg > 0xff)
                return -EINVAL;
 
        if (fsp_reg_read(psmouse, reg, &val))
@@ -480,9 +487,13 @@ static ssize_t fsp_attr_show_pagereg(struct psmouse *psmouse,
 static ssize_t fsp_attr_set_pagereg(struct psmouse *psmouse, void *data,
                                        const char *buf, size_t count)
 {
-       unsigned long val;
+       int val, err;
 
-       if (strict_strtoul(buf, 16, &val) || val > 0xff)
+       err = kstrtoint(buf, 16, &val);
+       if (err)
+               return err;
+
+       if (val > 0xff)
                return -EINVAL;
 
        if (fsp_page_reg_write(psmouse, val))
@@ -505,9 +516,14 @@ static ssize_t fsp_attr_show_vscroll(struct psmouse *psmouse,
 static ssize_t fsp_attr_set_vscroll(struct psmouse *psmouse, void *data,
                                        const char *buf, size_t count)
 {
-       unsigned long val;
+       unsigned int val;
+       int err;
+
+       err = kstrtouint(buf, 10, &val);
+       if (err)
+               return err;
 
-       if (strict_strtoul(buf, 10, &val) || val > 1)
+       if (val > 1)
                return -EINVAL;
 
        fsp_onpad_vscr(psmouse, val);
@@ -529,9 +545,14 @@ static ssize_t fsp_attr_show_hscroll(struct psmouse *psmouse,
 static ssize_t fsp_attr_set_hscroll(struct psmouse *psmouse, void *data,
                                        const char *buf, size_t count)
 {
-       unsigned long val;
+       unsigned int val;
+       int err;
+
+       err = kstrtouint(buf, 10, &val);
+       if (err)
+               return err;
 
-       if (strict_strtoul(buf, 10, &val) || val > 1)
+       if (val > 1)
                return -EINVAL;
 
        fsp_onpad_hscr(psmouse, val);
index a6dcd18e9adf93b5b97cd00e04b6419a6788e1e5..8081a0a5d602c0b9f04557b5323882c4bcf89694 100644 (file)
@@ -269,19 +269,49 @@ static int synaptics_query_hardware(struct psmouse *psmouse)
        return 0;
 }
 
-static int synaptics_set_absolute_mode(struct psmouse *psmouse)
+static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
+{
+       static unsigned char param = 0xc8;
+       struct synaptics_data *priv = psmouse->private;
+
+       if (!SYN_CAP_ADV_GESTURE(priv->ext_cap_0c))
+               return 0;
+
+       if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL))
+               return -1;
+
+       if (ps2_command(&psmouse->ps2dev, &param, PSMOUSE_CMD_SETRATE))
+               return -1;
+
+       /* Advanced gesture mode also sends multi finger data */
+       priv->capabilities |= BIT(1);
+
+       return 0;
+}
+
+static int synaptics_set_mode(struct psmouse *psmouse)
 {
        struct synaptics_data *priv = psmouse->private;
 
-       priv->mode = SYN_BIT_ABSOLUTE_MODE;
-       if (SYN_ID_MAJOR(priv->identity) >= 4)
+       priv->mode = 0;
+       if (priv->absolute_mode)
+               priv->mode |= SYN_BIT_ABSOLUTE_MODE;
+       if (priv->disable_gesture)
                priv->mode |= SYN_BIT_DISABLE_GESTURE;
+       if (psmouse->rate >= 80)
+               priv->mode |= SYN_BIT_HIGH_RATE;
        if (SYN_CAP_EXTENDED(priv->capabilities))
                priv->mode |= SYN_BIT_W_MODE;
 
        if (synaptics_mode_cmd(psmouse, priv->mode))
                return -1;
 
+       if (priv->absolute_mode &&
+           synaptics_set_advanced_gesture_mode(psmouse)) {
+               psmouse_err(psmouse, "Advanced gesture mode init failed.\n");
+               return -1;
+       }
+
        return 0;
 }
 
@@ -300,26 +330,6 @@ static void synaptics_set_rate(struct psmouse *psmouse, unsigned int rate)
        synaptics_mode_cmd(psmouse, priv->mode);
 }
 
-static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
-{
-       static unsigned char param = 0xc8;
-       struct synaptics_data *priv = psmouse->private;
-
-       if (!(SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
-                       SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)))
-               return 0;
-
-       if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL))
-               return -1;
-       if (ps2_command(&psmouse->ps2dev, &param, PSMOUSE_CMD_SETRATE))
-               return -1;
-
-       /* Advanced gesture mode also sends multi finger data */
-       priv->capabilities |= BIT(1);
-
-       return 0;
-}
-
 /*****************************************************************************
  *     Synaptics pass-through PS/2 port support
  ****************************************************************************/
@@ -1143,8 +1153,24 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
 {
        int i;
 
+       /* Things that apply to both modes */
        __set_bit(INPUT_PROP_POINTER, dev->propbit);
+       __set_bit(EV_KEY, dev->evbit);
+       __set_bit(BTN_LEFT, dev->keybit);
+       __set_bit(BTN_RIGHT, dev->keybit);
 
+       if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities))
+               __set_bit(BTN_MIDDLE, dev->keybit);
+
+       if (!priv->absolute_mode) {
+               /* Relative mode */
+               __set_bit(EV_REL, dev->evbit);
+               __set_bit(REL_X, dev->relbit);
+               __set_bit(REL_Y, dev->relbit);
+               return;
+       }
+
+       /* Absolute mode */
        __set_bit(EV_ABS, dev->evbit);
        set_abs_position_params(dev, priv, ABS_X, ABS_Y);
        input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
@@ -1170,20 +1196,14 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
        if (SYN_CAP_PALMDETECT(priv->capabilities))
                input_set_abs_params(dev, ABS_TOOL_WIDTH, 0, 15, 0, 0);
 
-       __set_bit(EV_KEY, dev->evbit);
        __set_bit(BTN_TOUCH, dev->keybit);
        __set_bit(BTN_TOOL_FINGER, dev->keybit);
-       __set_bit(BTN_LEFT, dev->keybit);
-       __set_bit(BTN_RIGHT, dev->keybit);
 
        if (SYN_CAP_MULTIFINGER(priv->capabilities)) {
                __set_bit(BTN_TOOL_DOUBLETAP, dev->keybit);
                __set_bit(BTN_TOOL_TRIPLETAP, dev->keybit);
        }
 
-       if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities))
-               __set_bit(BTN_MIDDLE, dev->keybit);
-
        if (SYN_CAP_FOUR_BUTTON(priv->capabilities) ||
            SYN_CAP_MIDDLE_BUTTON(priv->capabilities)) {
                __set_bit(BTN_FORWARD, dev->keybit);
@@ -1205,10 +1225,58 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
        }
 }
 
+static ssize_t synaptics_show_disable_gesture(struct psmouse *psmouse,
+                                             void *data, char *buf)
+{
+       struct synaptics_data *priv = psmouse->private;
+
+       return sprintf(buf, "%c\n", priv->disable_gesture ? '1' : '0');
+}
+
+static ssize_t synaptics_set_disable_gesture(struct psmouse *psmouse,
+                                            void *data, const char *buf,
+                                            size_t len)
+{
+       struct synaptics_data *priv = psmouse->private;
+       unsigned int value;
+       int err;
+
+       err = kstrtouint(buf, 10, &value);
+       if (err)
+               return err;
+
+       if (value > 1)
+               return -EINVAL;
+
+       if (value == priv->disable_gesture)
+               return len;
+
+       priv->disable_gesture = value;
+       if (value)
+               priv->mode |= SYN_BIT_DISABLE_GESTURE;
+       else
+               priv->mode &= ~SYN_BIT_DISABLE_GESTURE;
+
+       if (synaptics_mode_cmd(psmouse, priv->mode))
+               return -EIO;
+
+       return len;
+}
+
+PSMOUSE_DEFINE_ATTR(disable_gesture, S_IWUSR | S_IRUGO, NULL,
+                   synaptics_show_disable_gesture,
+                   synaptics_set_disable_gesture);
+
 static void synaptics_disconnect(struct psmouse *psmouse)
 {
+       struct synaptics_data *priv = psmouse->private;
+
+       if (!priv->absolute_mode && SYN_ID_DISGEST_SUPPORTED(priv->identity))
+               device_remove_file(&psmouse->ps2dev.serio->dev,
+                                  &psmouse_attr_disable_gesture.dattr);
+
        synaptics_reset(psmouse);
-       kfree(psmouse->private);
+       kfree(priv);
        psmouse->private = NULL;
 }
 
@@ -1245,17 +1313,11 @@ static int synaptics_reconnect(struct psmouse *psmouse)
                return -1;
        }
 
-       if (synaptics_set_absolute_mode(psmouse)) {
+       if (synaptics_set_mode(psmouse)) {
                psmouse_err(psmouse, "Unable to initialize device.\n");
                return -1;
        }
 
-       if (synaptics_set_advanced_gesture_mode(psmouse)) {
-               psmouse_err(psmouse,
-                           "Advanced gesture mode reconnect failed.\n");
-               return -1;
-       }
-
        if (old_priv.identity != priv->identity ||
            old_priv.model_id != priv->model_id ||
            old_priv.capabilities != priv->capabilities ||
@@ -1332,20 +1394,18 @@ void __init synaptics_module_init(void)
        broken_olpc_ec = dmi_check_system(olpc_dmi_table);
 }
 
-int synaptics_init(struct psmouse *psmouse)
+static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
 {
        struct synaptics_data *priv;
+       int err = -1;
 
        /*
-        * The OLPC XO has issues with Synaptics' absolute mode; similarly to
-        * the HGPK, it quickly degrades and the hardware becomes jumpy and
-        * overly sensitive.  Not only that, but the constant packet spew
-        * (even at a lowered 40pps rate) overloads the EC such that key
-        * presses on the keyboard are missed.  Given all of that, don't
-        * even attempt to use Synaptics mode.  Relative mode seems to work
-        * just fine.
+        * The OLPC XO has issues with Synaptics' absolute mode; the constant
+        * packet spew overloads the EC such that key presses on the keyboard
+        * are missed.  Given that, don't even attempt to use Absolute mode.
+        * Relative mode seems to work just fine.
         */
-       if (broken_olpc_ec) {
+       if (absolute_mode && broken_olpc_ec) {
                psmouse_info(psmouse,
                             "OLPC XO detected, not enabling Synaptics protocol.\n");
                return -ENODEV;
@@ -1362,13 +1422,12 @@ int synaptics_init(struct psmouse *psmouse)
                goto init_fail;
        }
 
-       if (synaptics_set_absolute_mode(psmouse)) {
-               psmouse_err(psmouse, "Unable to initialize device.\n");
-               goto init_fail;
-       }
+       priv->absolute_mode = absolute_mode;
+       if (SYN_ID_DISGEST_SUPPORTED(priv->identity))
+               priv->disable_gesture = true;
 
-       if (synaptics_set_advanced_gesture_mode(psmouse)) {
-               psmouse_err(psmouse, "Advanced gesture mode init failed.\n");
+       if (synaptics_set_mode(psmouse)) {
+               psmouse_err(psmouse, "Unable to initialize device.\n");
                goto init_fail;
        }
 
@@ -1393,12 +1452,19 @@ int synaptics_init(struct psmouse *psmouse)
        psmouse->model = ((priv->model_id & 0x00ff0000) >> 8) |
                          (priv->model_id & 0x000000ff);
 
-       psmouse->protocol_handler = synaptics_process_byte;
+       if (absolute_mode) {
+               psmouse->protocol_handler = synaptics_process_byte;
+               psmouse->pktsize = 6;
+       } else {
+               /* Relative mode follows standard PS/2 mouse protocol */
+               psmouse->protocol_handler = psmouse_process_byte;
+               psmouse->pktsize = 3;
+       }
+
        psmouse->set_rate = synaptics_set_rate;
        psmouse->disconnect = synaptics_disconnect;
        psmouse->reconnect = synaptics_reconnect;
        psmouse->cleanup = synaptics_reset;
-       psmouse->pktsize = 6;
        /* Synaptics can usually stay in sync without extra help */
        psmouse->resync_time = 0;
 
@@ -1417,11 +1483,32 @@ int synaptics_init(struct psmouse *psmouse)
                psmouse->rate = 40;
        }
 
+       if (!priv->absolute_mode && SYN_ID_DISGEST_SUPPORTED(priv->identity)) {
+               err = device_create_file(&psmouse->ps2dev.serio->dev,
+                                        &psmouse_attr_disable_gesture.dattr);
+               if (err) {
+                       psmouse_err(psmouse,
+                                   "Failed to create disable_gesture attribute (%d)",
+                                   err);
+                       goto init_fail;
+               }
+       }
+
        return 0;
 
  init_fail:
        kfree(priv);
-       return -1;
+       return err;
+}
+
+int synaptics_init(struct psmouse *psmouse)
+{
+       return __synaptics_init(psmouse, true);
+}
+
+int synaptics_init_relative(struct psmouse *psmouse)
+{
+       return __synaptics_init(psmouse, false);
 }
 
 bool synaptics_supported(void)
index 622aea8dd7e09de66a1e573b081ece89f828e632..fd26ccca13d793b225654b21de12a21f6a306ad5 100644 (file)
 #define SYN_ID_MINOR(i)                        (((i) >> 16) & 0xff)
 #define SYN_ID_FULL(i)                 ((SYN_ID_MAJOR(i) << 8) | SYN_ID_MINOR(i))
 #define SYN_ID_IS_SYNAPTICS(i)         ((((i) >> 8) & 0xff) == 0x47)
+#define SYN_ID_DISGEST_SUPPORTED(i)    (SYN_ID_MAJOR(i) >= 4)
 
 /* synaptics special commands */
 #define SYN_PS_SET_MODE2               0x14
@@ -159,6 +160,9 @@ struct synaptics_data {
        unsigned char mode;                     /* current mode byte */
        int scroll;
 
+       bool absolute_mode;                     /* run in Absolute mode */
+       bool disable_gesture;                   /* disable gestures */
+
        struct serio *pt_port;                  /* Pass-through serio port */
 
        struct synaptics_mt_state mt_state;     /* Current mt finger state */
@@ -175,6 +179,7 @@ struct synaptics_data {
 void synaptics_module_init(void);
 int synaptics_detect(struct psmouse *psmouse, bool set_properties);
 int synaptics_init(struct psmouse *psmouse);
+int synaptics_init_relative(struct psmouse *psmouse);
 void synaptics_reset(struct psmouse *psmouse);
 bool synaptics_supported(void);
 
index 54b2fa892e1941b76d3d863013b85a4884595770..22b218018137d964fdae5987addd66a1b7272320 100644 (file)
@@ -89,10 +89,12 @@ static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data,
        struct trackpoint_data *tp = psmouse->private;
        struct trackpoint_attr_data *attr = data;
        unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
-       unsigned long value;
+       unsigned char value;
+       int err;
 
-       if (strict_strtoul(buf, 10, &value) || value > 255)
-               return -EINVAL;
+       err = kstrtou8(buf, 10, &value);
+       if (err)
+               return err;
 
        *field = value;
        trackpoint_write(&psmouse->ps2dev, attr->command, value);
@@ -115,9 +117,14 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
        struct trackpoint_data *tp = psmouse->private;
        struct trackpoint_attr_data *attr = data;
        unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
-       unsigned long value;
+       unsigned int value;
+       int err;
+
+       err = kstrtouint(buf, 10, &value);
+       if (err)
+               return err;
 
-       if (strict_strtoul(buf, 10, &value) || value > 1)
+       if (value > 1)
                return -EINVAL;
 
        if (attr->inverted)
index d363dc4571a381e47efc8d1e912e0c5eca8177a1..35864c6130bb0b5723eed9f2fcc8408868f36bef 100644 (file)
@@ -196,18 +196,7 @@ static struct platform_driver altera_ps2_driver = {
                .of_match_table = altera_ps2_match,
        },
 };
-
-static int __init altera_ps2_init(void)
-{
-       return platform_driver_register(&altera_ps2_driver);
-}
-module_init(altera_ps2_init);
-
-static void __exit altera_ps2_exit(void)
-{
-       platform_driver_unregister(&altera_ps2_driver);
-}
-module_exit(altera_ps2_exit);
+module_platform_driver(altera_ps2_driver);
 
 MODULE_DESCRIPTION("Altera University Program PS2 controller driver");
 MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
index 95280f9207e14cf4d552aaa7cf8b7b3704fc123f..421a7442e4641f92e73aa231cefe1de805a87fd2 100644 (file)
@@ -358,19 +358,7 @@ static struct platform_driver psif_driver = {
        .suspend        = psif_suspend,
        .resume         = psif_resume,
 };
-
-static int __init psif_init(void)
-{
-       return platform_driver_probe(&psif_driver, psif_probe);
-}
-
-static void __exit psif_exit(void)
-{
-       platform_driver_unregister(&psif_driver);
-}
-
-module_init(psif_init);
-module_exit(psif_exit);
+module_platform_driver(psif_driver);
 
 MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
 MODULE_DESCRIPTION("Atmel AVR32 PSIF PS/2 driver");
index d37a48e099d0d0c63441f28c6da2a77488797cfd..86564414b75a4535c0f2745bbf2ea812cb5d5069 100644 (file)
@@ -991,7 +991,7 @@ static int i8042_controller_init(void)
  * Reset the controller and reset CRT to the original value set by BIOS.
  */
 
-static void i8042_controller_reset(void)
+static void i8042_controller_reset(bool force_reset)
 {
        i8042_flush();
 
@@ -1016,7 +1016,7 @@ static void i8042_controller_reset(void)
  * Reset the controller if requested.
  */
 
-       if (i8042_reset)
+       if (i8042_reset || force_reset)
                i8042_controller_selftest();
 
 /*
@@ -1139,9 +1139,9 @@ static int i8042_controller_resume(bool force_reset)
  * upsetting it.
  */
 
-static int i8042_pm_reset(struct device *dev)
+static int i8042_pm_suspend(struct device *dev)
 {
-       i8042_controller_reset();
+       i8042_controller_reset(true);
 
        return 0;
 }
@@ -1163,13 +1163,20 @@ static int i8042_pm_thaw(struct device *dev)
        return 0;
 }
 
+static int i8042_pm_reset(struct device *dev)
+{
+       i8042_controller_reset(false);
+
+       return 0;
+}
+
 static int i8042_pm_restore(struct device *dev)
 {
        return i8042_controller_resume(false);
 }
 
 static const struct dev_pm_ops i8042_pm_ops = {
-       .suspend        = i8042_pm_reset,
+       .suspend        = i8042_pm_suspend,
        .resume         = i8042_pm_resume,
        .thaw           = i8042_pm_thaw,
        .poweroff       = i8042_pm_reset,
@@ -1185,7 +1192,7 @@ static const struct dev_pm_ops i8042_pm_ops = {
 
 static void i8042_shutdown(struct platform_device *dev)
 {
-       i8042_controller_reset();
+       i8042_controller_reset(false);
 }
 
 static int __init i8042_create_kbd_port(void)
@@ -1424,7 +1431,7 @@ static int __init i8042_probe(struct platform_device *dev)
  out_fail:
        i8042_free_aux_ports(); /* in case KBD failed but AUX not */
        i8042_free_irqs();
-       i8042_controller_reset();
+       i8042_controller_reset(false);
        i8042_platform_device = NULL;
 
        return error;
@@ -1434,7 +1441,7 @@ static int __devexit i8042_remove(struct platform_device *dev)
 {
        i8042_unregister_ports();
        i8042_free_irqs();
-       i8042_controller_reset();
+       i8042_controller_reset(false);
        i8042_platform_device = NULL;
 
        return 0;
index 7ec3c97dc1b9b5f2198af4338db7c48ca7ec9abd..8b44ddc8041ce4c1cf410435585caeb8f379193a 100644 (file)
@@ -143,16 +143,4 @@ static struct platform_driver rpckbd_driver = {
                .owner  = THIS_MODULE,
        },
 };
-
-static int __init rpckbd_init(void)
-{
-       return platform_driver_register(&rpckbd_driver);
-}
-
-static void __exit rpckbd_exit(void)
-{
-       platform_driver_unregister(&rpckbd_driver);
-}
-
-module_init(rpckbd_init);
-module_exit(rpckbd_exit);
+module_platform_driver(rpckbd_driver);
index d64c5a43aaad238671f3958e9d0bd6e08da5010f..d96d4c2a76a94793270c6e4ce6dda247b2e08ebe 100644 (file)
@@ -253,7 +253,7 @@ static int __devinit xps2_of_probe(struct platform_device *ofdev)
        }
 
        /* Get IRQ for the device */
-       if (of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq) == NO_IRQ) {
+       if (!of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq)) {
                dev_err(dev, "no IRQ found\n");
                return -ENODEV;
        }
@@ -369,19 +369,7 @@ static struct platform_driver xps2_of_driver = {
        .probe          = xps2_of_probe,
        .remove         = __devexit_p(xps2_of_remove),
 };
-
-static int __init xps2_init(void)
-{
-       return platform_driver_register(&xps2_of_driver);
-}
-
-static void __exit xps2_cleanup(void)
-{
-       platform_driver_unregister(&xps2_of_driver);
-}
-
-module_init(xps2_init);
-module_exit(xps2_cleanup);
+module_platform_driver(xps2_of_driver);
 
 MODULE_AUTHOR("Xilinx, Inc.");
 MODULE_DESCRIPTION("Xilinx XPS PS/2 driver");
index d5ef3debd0455c8cb792e21e60bde573c37ce069..205d16aab441990283afd4f0068f97b106dcd198 100644 (file)
@@ -1198,9 +1198,9 @@ static ssize_t
 store_tabletXtilt(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
 {
        struct aiptek *aiptek = dev_get_drvdata(dev);
-       long x;
+       int x;
 
-       if (strict_strtol(buf, 10, &x)) {
+       if (kstrtoint(buf, 10, &x)) {
                size_t len = buf[count - 1] == '\n' ? count - 1 : count;
 
                if (strncmp(buf, "disable", len))
@@ -1240,9 +1240,9 @@ static ssize_t
 store_tabletYtilt(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
 {
        struct aiptek *aiptek = dev_get_drvdata(dev);
-       long y;
+       int y;
 
-       if (strict_strtol(buf, 10, &y)) {
+       if (kstrtoint(buf, 10, &y)) {
                size_t len = buf[count - 1] == '\n' ? count - 1 : count;
 
                if (strncmp(buf, "disable", len))
@@ -1277,12 +1277,13 @@ static ssize_t
 store_tabletJitterDelay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
 {
        struct aiptek *aiptek = dev_get_drvdata(dev);
-       long j;
+       int err, j;
 
-       if (strict_strtol(buf, 10, &j))
-               return -EINVAL;
+       err = kstrtoint(buf, 10, &j);
+       if (err)
+               return err;
 
-       aiptek->newSetting.jitterDelay = (int)j;
+       aiptek->newSetting.jitterDelay = j;
        return count;
 }
 
@@ -1306,12 +1307,13 @@ static ssize_t
 store_tabletProgrammableDelay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
 {
        struct aiptek *aiptek = dev_get_drvdata(dev);
-       long d;
+       int err, d;
 
-       if (strict_strtol(buf, 10, &d))
-               return -EINVAL;
+       err = kstrtoint(buf, 10, &d);
+       if (err)
+               return err;
 
-       aiptek->newSetting.programmableDelay = (int)d;
+       aiptek->newSetting.programmableDelay = d;
        return count;
 }
 
@@ -1557,11 +1559,13 @@ static ssize_t
 store_tabletWheel(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
 {
        struct aiptek *aiptek = dev_get_drvdata(dev);
-       long w;
+       int err, w;
 
-       if (strict_strtol(buf, 10, &w)) return -EINVAL;
+       err = kstrtoint(buf, 10, &w);
+       if (err)
+               return err;
 
-       aiptek->newSetting.wheel = (int)w;
+       aiptek->newSetting.wheel = w;
        return count;
 }
 
index 8f9cde3e0ec21a2e6db96fe6ce998c72aac93035..2a97b7e76db1d33310820502f8e51ae01576af64 100644 (file)
@@ -28,7 +28,9 @@
 #define HID_USAGE_Y_TILT               0x3e
 #define HID_USAGE_FINGER               0x22
 #define HID_USAGE_STYLUS               0x20
-#define HID_COLLECTION                 0xc0
+#define HID_COLLECTION                 0xa1
+#define HID_COLLECTION_LOGICAL         0x02
+#define HID_COLLECTION_END             0xc0
 
 enum {
        WCM_UNDEFINED = 0,
@@ -66,7 +68,8 @@ static int wacom_get_report(struct usb_interface *intf, u8 type, u8 id,
        do {
                retval = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
                                USB_REQ_GET_REPORT,
-                               USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+                               USB_DIR_IN | USB_TYPE_CLASS |
+                               USB_RECIP_INTERFACE,
                                (type << 8) + id,
                                intf->altsetting[0].desc.bInterfaceNumber,
                                buf, size, 100);
@@ -164,7 +167,70 @@ static void wacom_close(struct input_dev *dev)
                usb_autopm_put_interface(wacom->intf);
 }
 
-static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
+static int wacom_parse_logical_collection(unsigned char *report,
+                                         struct wacom_features *features)
+{
+       int length = 0;
+
+       if (features->type == BAMBOO_PT) {
+
+               /* Logical collection is only used by 3rd gen Bamboo Touch */
+               features->pktlen = WACOM_PKGLEN_BBTOUCH3;
+               features->device_type = BTN_TOOL_DOUBLETAP;
+
+               /*
+                * Stylus and Touch have same active area
+                * so compute physical size based on stylus
+                * data before its overwritten.
+                */
+               features->x_phy =
+                       (features->x_max * features->x_resolution) / 100;
+               features->y_phy =
+                       (features->y_max * features->y_resolution) / 100;
+
+               features->x_max = features->y_max =
+                       get_unaligned_le16(&report[10]);
+
+               length = 11;
+       }
+       return length;
+}
+
+/*
+ * Interface Descriptor of wacom devices can be incomplete and
+ * inconsistent so wacom_features table is used to store stylus
+ * device's packet lengths, various maximum values, and tablet
+ * resolution based on product ID's.
+ *
+ * For devices that contain 2 interfaces, wacom_features table is
+ * inaccurate for the touch interface.  Since the Interface Descriptor
+ * for touch interfaces has pretty complete data, this function exists
+ * to query tablet for this missing information instead of hard coding in
+ * an additional table.
+ *
+ * A typical Interface Descriptor for a stylus will contain a
+ * boot mouse application collection that is not of interest and this
+ * function will ignore it.
+ *
+ * It also contains a digitizer application collection that also is not
+ * of interest since any information it contains would be duplicate
+ * of what is in wacom_features. Usually it defines a report of an array
+ * of bytes that could be used as max length of the stylus packet returned.
+ * If it happens to define a Digitizer-Stylus Physical Collection then
+ * the X and Y logical values contain valid data but it is ignored.
+ *
+ * A typical Interface Descriptor for a touch interface will contain a
+ * Digitizer-Finger Physical Collection which will define both logical
+ * X/Y maximum as well as the physical size of tablet. Since touch
+ * interfaces haven't supported pressure or distance, this is enough
+ * information to override invalid values in the wacom_features table.
+ *
+ * 3rd gen Bamboo Touch no longer define a Digitizer-Finger Pysical
+ * Collection. Instead they define a Logical Collection with a single
+ * Logical Maximum for both X and Y.
+ */
+static int wacom_parse_hid(struct usb_interface *intf,
+                          struct hid_descriptor *hid_desc,
                           struct wacom_features *features)
 {
        struct usb_device *dev = interface_to_usbdev(intf);
@@ -244,8 +310,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
                                                /* penabled only accepts exact bytes of data */
                                                if (features->type == TABLETPC2FG)
                                                        features->pktlen = WACOM_PKGLEN_GRAPHIRE;
-                                               if (features->type == BAMBOO_PT)
-                                                       features->pktlen = WACOM_PKGLEN_BBFUN;
                                                features->device_type = BTN_TOOL_PEN;
                                                features->x_max =
                                                        get_unaligned_le16(&report[i + 3]);
@@ -287,8 +351,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
                                                /* penabled only accepts exact bytes of data */
                                                if (features->type == TABLETPC2FG)
                                                        features->pktlen = WACOM_PKGLEN_GRAPHIRE;
-                                               if (features->type == BAMBOO_PT)
-                                                       features->pktlen = WACOM_PKGLEN_BBFUN;
                                                features->device_type = BTN_TOOL_PEN;
                                                features->y_max =
                                                        get_unaligned_le16(&report[i + 3]);
@@ -302,6 +364,11 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
                                i++;
                                break;
 
+                       /*
+                        * Requiring Stylus Usage will ignore boot mouse
+                        * X/Y values and some cases of invalid Digitizer X/Y
+                        * values commonly reported.
+                        */
                        case HID_USAGE_STYLUS:
                                pen = 1;
                                i++;
@@ -309,10 +376,20 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
                        }
                        break;
 
-               case HID_COLLECTION:
+               case HID_COLLECTION_END:
                        /* reset UsagePage and Finger */
                        finger = usage = 0;
                        break;
+
+               case HID_COLLECTION:
+                       i++;
+                       switch (report[i]) {
+                       case HID_COLLECTION_LOGICAL:
+                               i += wacom_parse_logical_collection(&report[i],
+                                                                   features);
+                               break;
+                       }
+                       break;
                }
        }
 
@@ -348,7 +425,8 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
                                                WAC_HID_FEATURE_REPORT,
                                                report_id, rep_data, 4, 1);
                } while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES);
-       } else if (features->type != TABLETPC) {
+       } else if (features->type != TABLETPC &&
+                  features->device_type == BTN_TOOL_PEN) {
                do {
                        rep_data[0] = 2;
                        rep_data[1] = 2;
@@ -485,7 +563,8 @@ static int wacom_led_control(struct wacom *wacom)
        if (!buf)
                return -ENOMEM;
 
-       if (wacom->wacom_wac.features.type == WACOM_21UX2)
+       if (wacom->wacom_wac.features.type == WACOM_21UX2 ||
+           wacom->wacom_wac.features.type == WACOM_24HD)
                led = (wacom->led.select[1] << 4) | 0x40;
 
        led |=  wacom->led.select[0] | 0x4;
@@ -704,6 +783,7 @@ static int wacom_initialize_leds(struct wacom *wacom)
                                           &intuos4_led_attr_group);
                break;
 
+       case WACOM_24HD:
        case WACOM_21UX2:
                wacom->led.select[0] = 0;
                wacom->led.select[1] = 0;
@@ -738,6 +818,7 @@ static void wacom_destroy_leds(struct wacom *wacom)
                                   &intuos4_led_attr_group);
                break;
 
+       case WACOM_24HD:
        case WACOM_21UX2:
                sysfs_remove_group(&wacom->intf->dev.kobj,
                                   &cintiq_led_attr_group);
index 2ee47d01a3b4ecde112b07ac95ccd798f053e4f4..88672ec296c116e10d7340c7f07397e10e09afe1 100644 (file)
@@ -452,7 +452,7 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
        if ((data[1] & 0xb8) == 0xa0) {
                t = (data[6] << 2) | ((data[7] >> 6) & 3);
                if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
-                   features->type == WACOM_21UX2) {
+                   features->type == WACOM_21UX2 || features->type == WACOM_24HD) {
                        t = (t << 1) | (data[1] & 1);
                }
                input_report_abs(input, ABS_PRESSURE, t);
@@ -519,6 +519,56 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
                                input_report_key(input, wacom->tool[1], 0);
                                input_report_abs(input, ABS_MISC, 0);
                        }
+               } else if (features->type == WACOM_24HD) {
+                       input_report_key(input, BTN_0, (data[6] & 0x01));
+                       input_report_key(input, BTN_1, (data[6] & 0x02));
+                       input_report_key(input, BTN_2, (data[6] & 0x04));
+                       input_report_key(input, BTN_3, (data[6] & 0x08));
+                       input_report_key(input, BTN_4, (data[6] & 0x10));
+                       input_report_key(input, BTN_5, (data[6] & 0x20));
+                       input_report_key(input, BTN_6, (data[6] & 0x40));
+                       input_report_key(input, BTN_7, (data[6] & 0x80));
+                       input_report_key(input, BTN_8, (data[8] & 0x01));
+                       input_report_key(input, BTN_9, (data[8] & 0x02));
+                       input_report_key(input, BTN_A, (data[8] & 0x04));
+                       input_report_key(input, BTN_B, (data[8] & 0x08));
+                       input_report_key(input, BTN_C, (data[8] & 0x10));
+                       input_report_key(input, BTN_X, (data[8] & 0x20));
+                       input_report_key(input, BTN_Y, (data[8] & 0x40));
+                       input_report_key(input, BTN_Z, (data[8] & 0x80));
+
+                       /*
+                        * Three "buttons" are available on the 24HD which are
+                        * physically implemented as a touchstrip. Each button
+                        * is approximately 3 bits wide with a 2 bit spacing.
+                        * The raw touchstrip bits are stored at:
+                        *    ((data[3] & 0x1f) << 8) | data[4])
+                        */
+                       input_report_key(input, KEY_PROG1, data[4] & 0x07);
+                       input_report_key(input, KEY_PROG2, data[4] & 0xE0);
+                       input_report_key(input, KEY_PROG3, data[3] & 0x1C);
+
+                       if (data[1] & 0x80) {
+                               input_report_abs(input, ABS_WHEEL, (data[1] & 0x7f));
+                       } else {
+                               /* Out of proximity, clear wheel value. */
+                               input_report_abs(input, ABS_WHEEL, 0);
+                       }
+
+                       if (data[2] & 0x80) {
+                               input_report_abs(input, ABS_THROTTLE, (data[2] & 0x7f));
+                       } else {
+                               /* Out of proximity, clear second wheel value. */
+                               input_report_abs(input, ABS_THROTTLE, 0);
+                       }
+
+                       if (data[1] | data[2] | (data[3] & 0x1f) | data[4] | data[6] | data[8]) {
+                               input_report_key(input, wacom->tool[1], 1);
+                               input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+                       } else {
+                               input_report_key(input, wacom->tool[1], 0);
+                               input_report_abs(input, ABS_MISC, 0);
+                       }
                } else {
                        if (features->type == WACOM_21UX2) {
                                input_report_key(input, BTN_0, (data[5] & 0x01));
@@ -799,6 +849,9 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
        unsigned char *data = wacom->data;
        int i;
 
+       if (data[0] != 0x02)
+           return 0;
+
        for (i = 0; i < 2; i++) {
                int offset = (data[1] & 0x80) ? (8 * i) : (9 * i);
                bool touch = data[offset + 3] & 0x80;
@@ -837,18 +890,77 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
        return 0;
 }
 
+static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
+{
+       struct input_dev *input = wacom->input;
+       int slot_id = data[0] - 2;  /* data[0] is between 2 and 17 */
+       bool touch = data[1] & 0x80;
+
+       touch = touch && !wacom->shared->stylus_in_proximity;
+
+       input_mt_slot(input, slot_id);
+       input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
+
+       if (touch) {
+               int x = (data[2] << 4) | (data[4] >> 4);
+               int y = (data[3] << 4) | (data[4] & 0x0f);
+               int w = data[6];
+
+               input_report_abs(input, ABS_MT_POSITION_X, x);
+               input_report_abs(input, ABS_MT_POSITION_Y, y);
+               input_report_abs(input, ABS_MT_TOUCH_MAJOR, w);
+       }
+}
+
+static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data)
+{
+       struct input_dev *input = wacom->input;
+
+       input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0);
+       input_report_key(input, BTN_FORWARD, (data[1] & 0x04) != 0);
+       input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0);
+       input_report_key(input, BTN_RIGHT, (data[1] & 0x01) != 0);
+}
+
+static int wacom_bpt3_touch(struct wacom_wac *wacom)
+{
+       struct input_dev *input = wacom->input;
+       unsigned char *data = wacom->data;
+       int count = data[1] & 0x03;
+       int i;
+
+       if (data[0] != 0x02)
+           return 0;
+
+       /* data has up to 7 fixed sized 8-byte messages starting at data[2] */
+       for (i = 0; i < count; i++) {
+               int offset = (8 * i) + 2;
+               int msg_id = data[offset];
+
+               if (msg_id >= 2 && msg_id <= 17)
+                       wacom_bpt3_touch_msg(wacom, data + offset);
+               else if (msg_id == 128)
+                       wacom_bpt3_button_msg(wacom, data + offset);
+
+       }
+
+       input_mt_report_pointer_emulation(input, true);
+
+       input_sync(input);
+
+       return 0;
+}
+
 static int wacom_bpt_pen(struct wacom_wac *wacom)
 {
        struct input_dev *input = wacom->input;
        unsigned char *data = wacom->data;
        int prox = 0, x = 0, y = 0, p = 0, d = 0, pen = 0, btn1 = 0, btn2 = 0;
 
-       /*
-        * Similar to Graphire protocol, data[1] & 0x20 is proximity and
-        * data[1] & 0x18 is tool ID.  0x30 is safety check to ignore
-        * 2 unused tool ID's.
-        */
-       prox = (data[1] & 0x30) == 0x30;
+       if (data[0] != 0x02)
+           return 0;
+
+       prox = (data[1] & 0x20) == 0x20;
 
        /*
         * All reports shared between PEN and RUBBER tool must be
@@ -912,7 +1024,9 @@ static int wacom_bpt_irq(struct wacom_wac *wacom, size_t len)
 {
        if (len == WACOM_PKGLEN_BBTOUCH)
                return wacom_bpt_touch(wacom);
-       else if (len == WACOM_PKGLEN_BBFUN)
+       else if (len == WACOM_PKGLEN_BBTOUCH3)
+               return wacom_bpt3_touch(wacom);
+       else if (len == WACOM_PKGLEN_BBFUN || len == WACOM_PKGLEN_BBPEN)
                return wacom_bpt_pen(wacom);
 
        return 0;
@@ -955,6 +1069,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
        case CINTIQ:
        case WACOM_BEE:
        case WACOM_21UX2:
+       case WACOM_24HD:
                sync = wacom_intuos_irq(wacom_wac);
                break;
 
@@ -1031,9 +1146,9 @@ void wacom_setup_device_quirks(struct wacom_features *features)
            features->type == BAMBOO_PT)
                features->quirks |= WACOM_QUIRK_MULTI_INPUT;
 
-       /* quirks for bamboo touch */
+       /* quirk for bamboo touch with 2 low res touches */
        if (features->type == BAMBOO_PT &&
-           features->device_type == BTN_TOOL_DOUBLETAP) {
+           features->pktlen == WACOM_PKGLEN_BBTOUCH) {
                features->x_max <<= 5;
                features->y_max <<= 5;
                features->x_fuzz <<= 5;
@@ -1110,6 +1225,26 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
                __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
                break;
 
+       case WACOM_24HD:
+               __set_bit(BTN_A, input_dev->keybit);
+               __set_bit(BTN_B, input_dev->keybit);
+               __set_bit(BTN_C, input_dev->keybit);
+               __set_bit(BTN_X, input_dev->keybit);
+               __set_bit(BTN_Y, input_dev->keybit);
+               __set_bit(BTN_Z, input_dev->keybit);
+
+               for (i = 0; i < 10; i++)
+                       __set_bit(BTN_0 + i, input_dev->keybit);
+
+               __set_bit(KEY_PROG1, input_dev->keybit);
+               __set_bit(KEY_PROG2, input_dev->keybit);
+               __set_bit(KEY_PROG3, input_dev->keybit);
+
+               input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+               input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
+               wacom_setup_cintiq(wacom_wac);
+               break;
+
        case WACOM_21UX2:
                __set_bit(BTN_A, input_dev->keybit);
                __set_bit(BTN_B, input_dev->keybit);
@@ -1240,7 +1375,21 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
                        __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
                        __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
 
-                       input_mt_init_slots(input_dev, 2);
+                       if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
+                               __set_bit(BTN_TOOL_TRIPLETAP,
+                                         input_dev->keybit);
+                               __set_bit(BTN_TOOL_QUADTAP,
+                                         input_dev->keybit);
+
+                               input_mt_init_slots(input_dev, 16);
+
+                               input_set_abs_params(input_dev,
+                                                    ABS_MT_TOUCH_MAJOR,
+                                                    0, 255, 0, 0);
+                       } else {
+                               input_mt_init_slots(input_dev, 2);
+                       }
+
                        input_set_abs_params(input_dev, ABS_MT_POSITION_X,
                                             0, features->x_max,
                                             features->x_fuzz, 0);
@@ -1425,6 +1574,9 @@ static const struct wacom_features wacom_features_0xBB =
 static const struct wacom_features wacom_features_0xBC =
        { "Wacom Intuos4 WL",     WACOM_PKGLEN_INTUOS,    40840, 25400, 2047,
          63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0xF4 =
+       { "Wacom Cintiq 24HD",    WACOM_PKGLEN_INTUOS,   104480, 65600, 2047,
+         63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
 static const struct wacom_features wacom_features_0x3F =
        { "Wacom Cintiq 21UX",    WACOM_PKGLEN_INTUOS,    87200, 65600, 1023,
          63, CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -1509,6 +1661,15 @@ static const struct wacom_features wacom_features_0xDA =
 static struct wacom_features wacom_features_0xDB =
        { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN,  21648, 13700, 1023,
          31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xDD =
+        { "Wacom Bamboo Connect", WACOM_PKGLEN_BBPEN,     14720,  9200, 1023,
+          31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xDE =
+        { "Wacom Bamboo 16FG 4x5", WACOM_PKGLEN_BBPEN,    14720,  9200, 1023,
+          31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xDF =
+        { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN,    21648, 13700, 1023,
+          31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
 static const struct wacom_features wacom_features_0x6004 =
        { "ISD-V4",               WACOM_PKGLEN_GRAPHIRE,  12800,  8000,  255,
          0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1604,6 +1765,9 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0xD8) },
        { USB_DEVICE_WACOM(0xDA) },
        { USB_DEVICE_WACOM(0xDB) },
+       { USB_DEVICE_WACOM(0xDD) },
+       { USB_DEVICE_WACOM(0xDE) },
+       { USB_DEVICE_WACOM(0xDF) },
        { USB_DEVICE_WACOM(0xF0) },
        { USB_DEVICE_WACOM(0xCC) },
        { USB_DEVICE_WACOM(0x90) },
@@ -1616,6 +1780,7 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0xE6) },
        { USB_DEVICE_WACOM(0xEC) },
        { USB_DEVICE_WACOM(0x47) },
+       { USB_DEVICE_WACOM(0xF4) },
        { USB_DEVICE_LENOVO(0x6004) },
        { }
 };
index 53eb71b6833099ad0a23aed0a3943553231794f1..050acaefee7d3763f543f70ac5f551f41d5bde30 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/types.h>
 
 /* maximum packet length for USB devices */
-#define WACOM_PKGLEN_MAX       32
+#define WACOM_PKGLEN_MAX       64
 
 /* packet length for individual models */
 #define WACOM_PKGLEN_PENPRTN    7
@@ -22,6 +22,8 @@
 #define WACOM_PKGLEN_TPC1FG     5
 #define WACOM_PKGLEN_TPC2FG    14
 #define WACOM_PKGLEN_BBTOUCH   20
+#define WACOM_PKGLEN_BBTOUCH3  64
+#define WACOM_PKGLEN_BBPEN     10
 
 /* device IDs */
 #define STYLUS_DEVICE_ID       0x02
@@ -57,6 +59,7 @@ enum {
        INTUOS4S,
        INTUOS4,
        INTUOS4L,
+       WACOM_24HD,
        WACOM_21UX2,
        CINTIQ,
        WACOM_BEE,
index b3aebc2166ba666256fd083da1f5c733545530fc..05f30b73c3c385efc2906185877ecee28e30bc9f 100644 (file)
@@ -217,18 +217,7 @@ static struct platform_driver pm860x_touch_driver = {
        .probe  = pm860x_touch_probe,
        .remove = __devexit_p(pm860x_touch_remove),
 };
-
-static int __init pm860x_touch_init(void)
-{
-       return platform_driver_register(&pm860x_touch_driver);
-}
-module_init(pm860x_touch_init);
-
-static void __exit pm860x_touch_exit(void)
-{
-       platform_driver_unregister(&pm860x_touch_driver);
-}
-module_exit(pm860x_touch_exit);
+module_platform_driver(pm860x_touch_driver);
 
 MODULE_DESCRIPTION("Touchscreen driver for Marvell Semiconductor 88PM860x");
 MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
index 3488ffe1fa0a16bc7b5c5a53802607a11fb340fb..4af2a18eb3ba9013332de2c961a3076fe28e1d22 100644 (file)
@@ -98,6 +98,19 @@ config TOUCHSCREEN_ATMEL_MXT
          To compile this driver as a module, choose M here: the
          module will be called atmel_mxt_ts.
 
+config TOUCHSCREEN_AUO_PIXCIR
+       tristate "AUO in-cell touchscreen using Pixcir ICs"
+       depends on I2C
+       depends on GPIOLIB
+       help
+         Say Y here if you have a AUO display with in-cell touchscreen
+         using Pixcir ICs.
+
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called auo-pixcir-ts.
+
 config TOUCHSCREEN_BITSY
        tristate "Compaq iPAQ H3600 (Bitsy) touchscreen"
        depends on SA1100_BITSY
@@ -177,6 +190,16 @@ config TOUCHSCREEN_EETI
          To compile this driver as a module, choose M here: the
          module will be called eeti_ts.
 
+config TOUCHSCREEN_EGALAX
+       tristate "EETI eGalax multi-touch panel support"
+       depends on I2C
+       help
+         Say Y here to enable support for I2C connected EETI
+         eGalax multi-touch panels.
+
+         To compile this driver as a module, choose M here: the
+         module will be called egalax_ts.
+
 config TOUCHSCREEN_FUJITSU
        tristate "Fujitsu serial touchscreen"
        select SERIO
@@ -435,6 +458,18 @@ config TOUCHSCREEN_UCB1400
          To compile this driver as a module, choose M here: the
          module will be called ucb1400_ts.
 
+config TOUCHSCREEN_PIXCIR
+       tristate "PIXCIR I2C touchscreens"
+       depends on I2C
+       help
+         Say Y here if you have a pixcir i2c touchscreen
+         controller.
+
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called pixcir_i2c_ts.
+
 config TOUCHSCREEN_WM831X
        tristate "Support for WM831x touchscreen controllers"
        depends on MFD_WM831X
@@ -541,6 +576,7 @@ config TOUCHSCREEN_USB_COMPOSITE
          - GoTop Super_Q2/GogoPen/PenPower tablets
          - JASTEC USB Touch Controller/DigiTech DTR-02U
          - Zytronic controllers
+         - Elo TouchSystems 2700 IntelliTouch
 
          Have a look at <http://linux.chapter7.ch/touchkit/> for
          a usage description and the required user-space stuff.
@@ -620,6 +656,11 @@ config TOUCHSCREEN_USB_JASTEC
        bool "JASTEC/DigiTech DTR-02U USB touch controller device support" if EXPERT
        depends on TOUCHSCREEN_USB_COMPOSITE
 
+config TOUCHSCREEN_USB_ELO
+       default y
+       bool "Elo TouchSystems 2700 IntelliTouch controller device support" if EXPERT
+       depends on TOUCHSCREEN_USB_COMPOSITE
+
 config TOUCHSCREEN_USB_E2I
        default y
        bool "e2i Touchscreen controller (e.g. from Mimo 740)"
index f957676035a4e6fb5b17c2b4847a9b5d602d2784..496091e884603428da438e7d0cd9d72b27c6f497 100644 (file)
@@ -14,6 +14,7 @@ obj-$(CONFIG_TOUCHSCREEN_AD7879_SPI)  += ad7879-spi.o
 obj-$(CONFIG_TOUCHSCREEN_ADS7846)      += ads7846.o
 obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT)    += atmel_mxt_ts.o
 obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
+obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR)   += auo-pixcir-ts.o
 obj-$(CONFIG_TOUCHSCREEN_BITSY)                += h3600_ts_input.o
 obj-$(CONFIG_TOUCHSCREEN_BU21013)       += bu21013_ts.o
 obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110)   += cy8ctmg110_ts.o
@@ -23,6 +24,7 @@ obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE)   += hampshire.o
 obj-$(CONFIG_TOUCHSCREEN_GUNZE)                += gunze.o
 obj-$(CONFIG_TOUCHSCREEN_EETI)         += eeti_ts.o
 obj-$(CONFIG_TOUCHSCREEN_ELO)          += elo.o
+obj-$(CONFIG_TOUCHSCREEN_EGALAX)       += egalax_ts.o
 obj-$(CONFIG_TOUCHSCREEN_FUJITSU)      += fujitsu_ts.o
 obj-$(CONFIG_TOUCHSCREEN_INEXIO)       += inexio.o
 obj-$(CONFIG_TOUCHSCREEN_INTEL_MID)    += intel-mid-touch.o
@@ -39,6 +41,7 @@ obj-$(CONFIG_TOUCHSCREEN_HTCPEN)      += htcpen.o
 obj-$(CONFIG_TOUCHSCREEN_USB_COMPOSITE)        += usbtouchscreen.o
 obj-$(CONFIG_TOUCHSCREEN_PCAP)         += pcap_ts.o
 obj-$(CONFIG_TOUCHSCREEN_PENMOUNT)     += penmount.o
+obj-$(CONFIG_TOUCHSCREEN_PIXCIR)       += pixcir_i2c_ts.o
 obj-$(CONFIG_TOUCHSCREEN_S3C2410)      += s3c2410_ts.o
 obj-$(CONFIG_TOUCHSCREEN_ST1232)       += st1232.o
 obj-$(CONFIG_TOUCHSCREEN_STMPE)                += stmpe-ts.o
index baa43df6502d098db994073ada5408f2b6601e2c..49a36df0b752929c522e67dbae73383de5273d49 100644 (file)
@@ -488,10 +488,10 @@ static ssize_t ad7877_disable_store(struct device *dev,
                                     const char *buf, size_t count)
 {
        struct ad7877 *ts = dev_get_drvdata(dev);
-       unsigned long val;
+       unsigned int val;
        int error;
 
-       error = strict_strtoul(buf, 10, &val);
+       error = kstrtouint(buf, 10, &val);
        if (error)
                return error;
 
@@ -518,10 +518,10 @@ static ssize_t ad7877_dac_store(struct device *dev,
                                     const char *buf, size_t count)
 {
        struct ad7877 *ts = dev_get_drvdata(dev);
-       unsigned long val;
+       unsigned int val;
        int error;
 
-       error = strict_strtoul(buf, 10, &val);
+       error = kstrtouint(buf, 10, &val);
        if (error)
                return error;
 
@@ -548,10 +548,10 @@ static ssize_t ad7877_gpio3_store(struct device *dev,
                                     const char *buf, size_t count)
 {
        struct ad7877 *ts = dev_get_drvdata(dev);
-       unsigned long val;
+       unsigned int val;
        int error;
 
-       error = strict_strtoul(buf, 10, &val);
+       error = kstrtouint(buf, 10, &val);
        if (error)
                return error;
 
@@ -579,10 +579,10 @@ static ssize_t ad7877_gpio4_store(struct device *dev,
                                     const char *buf, size_t count)
 {
        struct ad7877 *ts = dev_get_drvdata(dev);
-       unsigned long val;
+       unsigned int val;
        int error;
 
-       error = strict_strtoul(buf, 10, &val);
+       error = kstrtouint(buf, 10, &val);
        if (error)
                return error;
 
@@ -853,7 +853,6 @@ static SIMPLE_DEV_PM_OPS(ad7877_pm, ad7877_suspend, ad7877_resume);
 static struct spi_driver ad7877_driver = {
        .driver = {
                .name   = "ad7877",
-               .bus    = &spi_bus_type,
                .owner  = THIS_MODULE,
                .pm     = &ad7877_pm,
        },
index c789b974c7958ad7fec36e6f45de9b9b7dc3367d..0dac6712f42b282df0fb4b16441ce4a4c5764b95 100644 (file)
 
 #define AD7879_DEVID           0x79    /* AD7879-1/AD7889-1 */
 
-#ifdef CONFIG_PM_SLEEP
-static int ad7879_i2c_suspend(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ad7879 *ts = i2c_get_clientdata(client);
-
-       ad7879_suspend(ts);
-
-       return 0;
-}
-
-static int ad7879_i2c_resume(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ad7879 *ts = i2c_get_clientdata(client);
-
-       ad7879_resume(ts);
-
-       return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(ad7879_i2c_pm, ad7879_i2c_suspend, ad7879_i2c_resume);
-
 /* All registers are word-sized.
  * AD7879 uses a high-byte first convention.
  */
@@ -47,7 +23,7 @@ static int ad7879_i2c_read(struct device *dev, u8 reg)
 {
        struct i2c_client *client = to_i2c_client(dev);
 
-       return swab16(i2c_smbus_read_word_data(client, reg));
+       return i2c_smbus_read_word_swapped(client, reg);
 }
 
 static int ad7879_i2c_multi_read(struct device *dev,
@@ -68,7 +44,7 @@ static int ad7879_i2c_write(struct device *dev, u8 reg, u16 val)
 {
        struct i2c_client *client = to_i2c_client(dev);
 
-       return i2c_smbus_write_word_data(client, reg, swab16(val));
+       return i2c_smbus_write_word_swapped(client, reg, val);
 }
 
 static const struct ad7879_bus_ops ad7879_i2c_bus_ops = {
@@ -119,7 +95,7 @@ static struct i2c_driver ad7879_i2c_driver = {
        .driver = {
                .name   = "ad7879",
                .owner  = THIS_MODULE,
-               .pm     = &ad7879_i2c_pm,
+               .pm     = &ad7879_pm_ops,
        },
        .probe          = ad7879_i2c_probe,
        .remove         = __devexit_p(ad7879_i2c_remove),
@@ -141,4 +117,3 @@ module_exit(ad7879_i2c_exit);
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("AD7879(-1) touchscreen I2C bus driver");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("i2c:ad7879");
index b1643c8fa7c9a1bca311e90e2cfc39e7ba836e2f..9b2e1c2b197198796992183a90a10927c073a57b 100644 (file)
 #define AD7879_WRITECMD(reg) (AD7879_CMD(reg))
 #define AD7879_READCMD(reg)  (AD7879_CMD(reg) | AD7879_CMD_READ)
 
-#ifdef CONFIG_PM_SLEEP
-static int ad7879_spi_suspend(struct device *dev)
-{
-       struct spi_device *spi = to_spi_device(dev);
-       struct ad7879 *ts = spi_get_drvdata(spi);
-
-       ad7879_suspend(ts);
-
-       return 0;
-}
-
-static int ad7879_spi_resume(struct device *dev)
-{
-       struct spi_device *spi = to_spi_device(dev);
-       struct ad7879 *ts = spi_get_drvdata(spi);
-
-       ad7879_resume(ts);
-
-       return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(ad7879_spi_pm, ad7879_spi_suspend, ad7879_spi_resume);
-
 /*
  * ad7879_read/write are only used for initial setup and for sysfs controls.
  * The main traffic is done in ad7879_collect().
@@ -174,9 +150,8 @@ static int __devexit ad7879_spi_remove(struct spi_device *spi)
 static struct spi_driver ad7879_spi_driver = {
        .driver = {
                .name   = "ad7879",
-               .bus    = &spi_bus_type,
                .owner  = THIS_MODULE,
-               .pm     = &ad7879_spi_pm,
+               .pm     = &ad7879_pm_ops,
        },
        .probe          = ad7879_spi_probe,
        .remove         = __devexit_p(ad7879_spi_remove),
index 3b2e9ed2aeec3482c7ec178b238410fc93424ff2..e2482b40da5198fdb1406e135fff827d049e5895 100644 (file)
@@ -281,8 +281,11 @@ static void ad7879_close(struct input_dev* input)
                __ad7879_disable(ts);
 }
 
-void ad7879_suspend(struct ad7879 *ts)
+#ifdef CONFIG_PM_SLEEP
+static int ad7879_suspend(struct device *dev)
 {
+       struct ad7879 *ts = dev_get_drvdata(dev);
+
        mutex_lock(&ts->input->mutex);
 
        if (!ts->suspended && !ts->disabled && ts->input->users)
@@ -291,11 +294,14 @@ void ad7879_suspend(struct ad7879 *ts)
        ts->suspended = true;
 
        mutex_unlock(&ts->input->mutex);
+
+       return 0;
 }
-EXPORT_SYMBOL(ad7879_suspend);
 
-void ad7879_resume(struct ad7879 *ts)
+static int ad7879_resume(struct device *dev)
 {
+       struct ad7879 *ts = dev_get_drvdata(dev);
+
        mutex_lock(&ts->input->mutex);
 
        if (ts->suspended && !ts->disabled && ts->input->users)
@@ -304,8 +310,13 @@ void ad7879_resume(struct ad7879 *ts)
        ts->suspended = false;
 
        mutex_unlock(&ts->input->mutex);
+
+       return 0;
 }
-EXPORT_SYMBOL(ad7879_resume);
+#endif
+
+SIMPLE_DEV_PM_OPS(ad7879_pm_ops, ad7879_suspend, ad7879_resume);
+EXPORT_SYMBOL(ad7879_pm_ops);
 
 static void ad7879_toggle(struct ad7879 *ts, bool disable)
 {
@@ -340,10 +351,10 @@ static ssize_t ad7879_disable_store(struct device *dev,
                                     const char *buf, size_t count)
 {
        struct ad7879 *ts = dev_get_drvdata(dev);
-       unsigned long val;
+       unsigned int val;
        int error;
 
-       error = strict_strtoul(buf, 10, &val);
+       error = kstrtouint(buf, 10, &val);
        if (error)
                return error;
 
index 6b45a27236c72ec640ed0e160a1755a12917daba..6fd13c48d373d5d4c8fa46996278376af11aff17 100644 (file)
@@ -21,8 +21,8 @@ struct ad7879_bus_ops {
        int (*write)(struct device *dev, u8 reg, u16 val);
 };
 
-void ad7879_suspend(struct ad7879 *);
-void ad7879_resume(struct ad7879 *);
+extern const struct dev_pm_ops ad7879_pm_ops;
+
 struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned irq,
                            const struct ad7879_bus_ops *bops);
 void ad7879_remove(struct ad7879 *);
index de31ec6fe9e47005ba94cc9eae222f922bb9c4b9..23fd90185659036681de4685f2347f0d23749c5a 100644 (file)
@@ -602,10 +602,12 @@ static ssize_t ads7846_disable_store(struct device *dev,
                                     const char *buf, size_t count)
 {
        struct ads7846 *ts = dev_get_drvdata(dev);
-       unsigned long i;
+       unsigned int i;
+       int err;
 
-       if (strict_strtoul(buf, 10, &i))
-               return -EINVAL;
+       err = kstrtouint(buf, 10, &i);
+       if (err)
+               return err;
 
        if (i)
                ads7846_disable(ts);
@@ -1424,7 +1426,6 @@ static int __devexit ads7846_remove(struct spi_device *spi)
 static struct spi_driver ads7846_driver = {
        .driver = {
                .name   = "ads7846",
-               .bus    = &spi_bus_type,
                .owner  = THIS_MODULE,
                .pm     = &ads7846_pm,
        },
index 8034cbb20f74c8d8bcd426aa27b998e8bd8fcf20..d016cb26d12523f6eb24214aa10a7e1f70a286ee 100644 (file)
@@ -429,18 +429,7 @@ static struct platform_driver atmel_wm97xx_driver = {
        .suspend        = atmel_wm97xx_suspend,
        .resume         = atmel_wm97xx_resume,
 };
-
-static int __init atmel_wm97xx_init(void)
-{
-       return platform_driver_probe(&atmel_wm97xx_driver, atmel_wm97xx_probe);
-}
-module_init(atmel_wm97xx_init);
-
-static void __exit atmel_wm97xx_exit(void)
-{
-       platform_driver_unregister(&atmel_wm97xx_driver);
-}
-module_exit(atmel_wm97xx_exit);
+module_platform_driver(atmel_wm97xx_driver);
 
 MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
 MODULE_DESCRIPTION("wm97xx continuous touch driver for Atmel AT91 and AVR32");
index 122a878836598b94320aa2262522a056a86c3620..201b2d2ec1b3bbb63f47d4c4d207c842532f6db2 100644 (file)
@@ -351,20 +351,7 @@ static struct platform_driver atmel_tsadcc_driver = {
                .name   = "atmel_tsadcc",
        },
 };
-
-static int __init atmel_tsadcc_init(void)
-{
-       return platform_driver_register(&atmel_tsadcc_driver);
-}
-
-static void __exit atmel_tsadcc_exit(void)
-{
-       platform_driver_unregister(&atmel_tsadcc_driver);
-}
-
-module_init(atmel_tsadcc_init);
-module_exit(atmel_tsadcc_exit);
-
+module_platform_driver(atmel_tsadcc_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Atmel TouchScreen Driver");
diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c
new file mode 100644 (file)
index 0000000..94fb9fb
--- /dev/null
@@ -0,0 +1,652 @@
+/*
+ * Driver for AUO in-cell touchscreens
+ *
+ * Copyright (c) 2011 Heiko Stuebner <heiko@sntech.de>
+ *
+ * loosely based on auo_touch.c from Dell Streak vendor-kernel
+ *
+ * Copyright (c) 2008 QUALCOMM Incorporated.
+ * Copyright (c) 2008 QUALCOMM USA, INC.
+ *
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/input/auo-pixcir-ts.h>
+
+/*
+ * Coordinate calculation:
+ * X1 = X1_LSB + X1_MSB*256
+ * Y1 = Y1_LSB + Y1_MSB*256
+ * X2 = X2_LSB + X2_MSB*256
+ * Y2 = Y2_LSB + Y2_MSB*256
+ */
+#define AUO_PIXCIR_REG_X1_LSB          0x00
+#define AUO_PIXCIR_REG_X1_MSB          0x01
+#define AUO_PIXCIR_REG_Y1_LSB          0x02
+#define AUO_PIXCIR_REG_Y1_MSB          0x03
+#define AUO_PIXCIR_REG_X2_LSB          0x04
+#define AUO_PIXCIR_REG_X2_MSB          0x05
+#define AUO_PIXCIR_REG_Y2_LSB          0x06
+#define AUO_PIXCIR_REG_Y2_MSB          0x07
+
+#define AUO_PIXCIR_REG_STRENGTH                0x0d
+#define AUO_PIXCIR_REG_STRENGTH_X1_LSB 0x0e
+#define AUO_PIXCIR_REG_STRENGTH_X1_MSB 0x0f
+
+#define AUO_PIXCIR_REG_RAW_DATA_X      0x2b
+#define AUO_PIXCIR_REG_RAW_DATA_Y      0x4f
+
+#define AUO_PIXCIR_REG_X_SENSITIVITY   0x6f
+#define AUO_PIXCIR_REG_Y_SENSITIVITY   0x70
+#define AUO_PIXCIR_REG_INT_SETTING     0x71
+#define AUO_PIXCIR_REG_INT_WIDTH       0x72
+#define AUO_PIXCIR_REG_POWER_MODE      0x73
+
+#define AUO_PIXCIR_REG_VERSION         0x77
+#define AUO_PIXCIR_REG_CALIBRATE       0x78
+
+#define AUO_PIXCIR_REG_TOUCHAREA_X1    0x1e
+#define AUO_PIXCIR_REG_TOUCHAREA_Y1    0x1f
+#define AUO_PIXCIR_REG_TOUCHAREA_X2    0x20
+#define AUO_PIXCIR_REG_TOUCHAREA_Y2    0x21
+
+#define AUO_PIXCIR_REG_EEPROM_CALIB_X  0x42
+#define AUO_PIXCIR_REG_EEPROM_CALIB_Y  0xad
+
+#define AUO_PIXCIR_INT_TPNUM_MASK      0xe0
+#define AUO_PIXCIR_INT_TPNUM_SHIFT     5
+#define AUO_PIXCIR_INT_RELEASE         (1 << 4)
+#define AUO_PIXCIR_INT_ENABLE          (1 << 3)
+#define AUO_PIXCIR_INT_POL_HIGH                (1 << 2)
+#define AUO_PIXCIR_INT_MODE_MASK       0x03
+
+/*
+ * Power modes:
+ * active:     scan speed 60Hz
+ * sleep:      scan speed 10Hz can be auto-activated, wakeup on 1st touch
+ * deep sleep: scan speed 1Hz can only be entered or left manually.
+ */
+#define AUO_PIXCIR_POWER_ACTIVE                0x00
+#define AUO_PIXCIR_POWER_SLEEP         0x01
+#define AUO_PIXCIR_POWER_DEEP_SLEEP    0x02
+#define AUO_PIXCIR_POWER_MASK          0x03
+
+#define AUO_PIXCIR_POWER_ALLOW_SLEEP   (1 << 2)
+#define AUO_PIXCIR_POWER_IDLE_TIME(ms) ((ms & 0xf) << 4)
+
+#define AUO_PIXCIR_CALIBRATE           0x03
+
+#define AUO_PIXCIR_EEPROM_CALIB_X_LEN  62
+#define AUO_PIXCIR_EEPROM_CALIB_Y_LEN  36
+
+#define AUO_PIXCIR_RAW_DATA_X_LEN      18
+#define AUO_PIXCIR_RAW_DATA_Y_LEN      11
+
+#define AUO_PIXCIR_STRENGTH_ENABLE     (1 << 0)
+
+/* Touchscreen absolute values */
+#define AUO_PIXCIR_REPORT_POINTS       2
+#define AUO_PIXCIR_MAX_AREA            0xff
+#define AUO_PIXCIR_PENUP_TIMEOUT_MS    10
+
+struct auo_pixcir_ts {
+       struct i2c_client       *client;
+       struct input_dev        *input;
+       char                    phys[32];
+
+       /* special handling for touch_indicate interupt mode */
+       bool                    touch_ind_mode;
+
+       wait_queue_head_t       wait;
+       bool                    stopped;
+};
+
+struct auo_point_t {
+       int     coord_x;
+       int     coord_y;
+       int     area_major;
+       int     area_minor;
+       int     orientation;
+};
+
+static int auo_pixcir_collect_data(struct auo_pixcir_ts *ts,
+                                  struct auo_point_t *point)
+{
+       struct i2c_client *client = ts->client;
+       const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+       uint8_t raw_coord[8];
+       uint8_t raw_area[4];
+       int i, ret;
+
+       /* touch coordinates */
+       ret = i2c_smbus_read_i2c_block_data(client, AUO_PIXCIR_REG_X1_LSB,
+                                           8, raw_coord);
+       if (ret < 0) {
+               dev_err(&client->dev, "failed to read coordinate, %d\n", ret);
+               return ret;
+       }
+
+       /* touch area */
+       ret = i2c_smbus_read_i2c_block_data(client, AUO_PIXCIR_REG_TOUCHAREA_X1,
+                                           4, raw_area);
+       if (ret < 0) {
+               dev_err(&client->dev, "could not read touch area, %d\n", ret);
+               return ret;
+       }
+
+       for (i = 0; i < AUO_PIXCIR_REPORT_POINTS; i++) {
+               point[i].coord_x =
+                       raw_coord[4 * i + 1] << 8 | raw_coord[4 * i];
+               point[i].coord_y =
+                       raw_coord[4 * i + 3] << 8 | raw_coord[4 * i + 2];
+
+               if (point[i].coord_x > pdata->x_max ||
+                   point[i].coord_y > pdata->y_max) {
+                       dev_warn(&client->dev, "coordinates (%d,%d) invalid\n",
+                               point[i].coord_x, point[i].coord_y);
+                       point[i].coord_x = point[i].coord_y = 0;
+               }
+
+               /* determine touch major, minor and orientation */
+               point[i].area_major = max(raw_area[2 * i], raw_area[2 * i + 1]);
+               point[i].area_minor = min(raw_area[2 * i], raw_area[2 * i + 1]);
+               point[i].orientation = raw_area[2 * i] > raw_area[2 * i + 1];
+       }
+
+       return 0;
+}
+
+static irqreturn_t auo_pixcir_interrupt(int irq, void *dev_id)
+{
+       struct auo_pixcir_ts *ts = dev_id;
+       struct i2c_client *client = ts->client;
+       const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+       struct auo_point_t point[AUO_PIXCIR_REPORT_POINTS];
+       int i;
+       int ret;
+       int fingers = 0;
+       int abs = -1;
+
+       while (!ts->stopped) {
+
+               /* check for up event in touch touch_ind_mode */
+               if (ts->touch_ind_mode) {
+                       if (gpio_get_value(pdata->gpio_int) == 0) {
+                               input_mt_sync(ts->input);
+                               input_report_key(ts->input, BTN_TOUCH, 0);
+                               input_sync(ts->input);
+                               break;
+                       }
+               }
+
+               ret = auo_pixcir_collect_data(ts, point);
+               if (ret < 0) {
+                       /* we want to loop only in touch_ind_mode */
+                       if (!ts->touch_ind_mode)
+                               break;
+
+                       wait_event_timeout(ts->wait, ts->stopped,
+                               msecs_to_jiffies(AUO_PIXCIR_PENUP_TIMEOUT_MS));
+                       continue;
+               }
+
+               for (i = 0; i < AUO_PIXCIR_REPORT_POINTS; i++) {
+                       if (point[i].coord_x > 0 || point[i].coord_y > 0) {
+                               input_report_abs(ts->input, ABS_MT_POSITION_X,
+                                                point[i].coord_x);
+                               input_report_abs(ts->input, ABS_MT_POSITION_Y,
+                                                point[i].coord_y);
+                               input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR,
+                                                point[i].area_major);
+                               input_report_abs(ts->input, ABS_MT_TOUCH_MINOR,
+                                                point[i].area_minor);
+                               input_report_abs(ts->input, ABS_MT_ORIENTATION,
+                                                point[i].orientation);
+                               input_mt_sync(ts->input);
+
+                               /* use first finger as source for singletouch */
+                               if (fingers == 0)
+                                       abs = i;
+
+                               /* number of touch points could also be queried
+                                * via i2c but would require an additional call
+                                */
+                               fingers++;
+                       }
+               }
+
+               input_report_key(ts->input, BTN_TOUCH, fingers > 0);
+
+               if (abs > -1) {
+                       input_report_abs(ts->input, ABS_X, point[abs].coord_x);
+                       input_report_abs(ts->input, ABS_Y, point[abs].coord_y);
+               }
+
+               input_sync(ts->input);
+
+               /* we want to loop only in touch_ind_mode */
+               if (!ts->touch_ind_mode)
+                       break;
+
+               wait_event_timeout(ts->wait, ts->stopped,
+                                msecs_to_jiffies(AUO_PIXCIR_PENUP_TIMEOUT_MS));
+       }
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * Set the power mode of the device.
+ * Valid modes are
+ * - AUO_PIXCIR_POWER_ACTIVE
+ * - AUO_PIXCIR_POWER_SLEEP - automatically left on first touch
+ * - AUO_PIXCIR_POWER_DEEP_SLEEP
+ */
+static int auo_pixcir_power_mode(struct auo_pixcir_ts *ts, int mode)
+{
+       struct i2c_client *client = ts->client;
+       int ret;
+
+       ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_POWER_MODE);
+       if (ret < 0) {
+               dev_err(&client->dev, "unable to read reg %Xh, %d\n",
+                       AUO_PIXCIR_REG_POWER_MODE, ret);
+               return ret;
+       }
+
+       ret &= ~AUO_PIXCIR_POWER_MASK;
+       ret |= mode;
+
+       ret = i2c_smbus_write_byte_data(client, AUO_PIXCIR_REG_POWER_MODE, ret);
+       if (ret) {
+               dev_err(&client->dev, "unable to write reg %Xh, %d\n",
+                       AUO_PIXCIR_REG_POWER_MODE, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static __devinit int auo_pixcir_int_config(struct auo_pixcir_ts *ts,
+                                          int int_setting)
+{
+       struct i2c_client *client = ts->client;
+       struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+       int ret;
+
+       ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_INT_SETTING);
+       if (ret < 0) {
+               dev_err(&client->dev, "unable to read reg %Xh, %d\n",
+                       AUO_PIXCIR_REG_INT_SETTING, ret);
+               return ret;
+       }
+
+       ret &= ~AUO_PIXCIR_INT_MODE_MASK;
+       ret |= int_setting;
+       ret |= AUO_PIXCIR_INT_POL_HIGH; /* always use high for interrupts */
+
+       ret = i2c_smbus_write_byte_data(client, AUO_PIXCIR_REG_INT_SETTING,
+                                       ret);
+       if (ret < 0) {
+               dev_err(&client->dev, "unable to write reg %Xh, %d\n",
+                       AUO_PIXCIR_REG_INT_SETTING, ret);
+               return ret;
+       }
+
+       ts->touch_ind_mode = pdata->int_setting == AUO_PIXCIR_INT_TOUCH_IND;
+
+       return 0;
+}
+
+/* control the generation of interrupts on the device side */
+static int auo_pixcir_int_toggle(struct auo_pixcir_ts *ts, bool enable)
+{
+       struct i2c_client *client = ts->client;
+       int ret;
+
+       ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_INT_SETTING);
+       if (ret < 0) {
+               dev_err(&client->dev, "unable to read reg %Xh, %d\n",
+                       AUO_PIXCIR_REG_INT_SETTING, ret);
+               return ret;
+       }
+
+       if (enable)
+               ret |= AUO_PIXCIR_INT_ENABLE;
+       else
+               ret &= ~AUO_PIXCIR_INT_ENABLE;
+
+       ret = i2c_smbus_write_byte_data(client, AUO_PIXCIR_REG_INT_SETTING,
+                                       ret);
+       if (ret < 0) {
+               dev_err(&client->dev, "unable to write reg %Xh, %d\n",
+                       AUO_PIXCIR_REG_INT_SETTING, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int auo_pixcir_start(struct auo_pixcir_ts *ts)
+{
+       struct i2c_client *client = ts->client;
+       int ret;
+
+       ret = auo_pixcir_power_mode(ts, AUO_PIXCIR_POWER_ACTIVE);
+       if (ret < 0) {
+               dev_err(&client->dev, "could not set power mode, %d\n",
+                       ret);
+               return ret;
+       }
+
+       ts->stopped = false;
+       mb();
+       enable_irq(client->irq);
+
+       ret = auo_pixcir_int_toggle(ts, 1);
+       if (ret < 0) {
+               dev_err(&client->dev, "could not enable interrupt, %d\n",
+                       ret);
+               disable_irq(client->irq);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int auo_pixcir_stop(struct auo_pixcir_ts *ts)
+{
+       struct i2c_client *client = ts->client;
+       int ret;
+
+       ret = auo_pixcir_int_toggle(ts, 0);
+       if (ret < 0) {
+               dev_err(&client->dev, "could not disable interrupt, %d\n",
+                       ret);
+               return ret;
+       }
+
+       /* disable receiving of interrupts */
+       disable_irq(client->irq);
+       ts->stopped = true;
+       mb();
+       wake_up(&ts->wait);
+
+       return auo_pixcir_power_mode(ts, AUO_PIXCIR_POWER_DEEP_SLEEP);
+}
+
+static int auo_pixcir_input_open(struct input_dev *dev)
+{
+       struct auo_pixcir_ts *ts = input_get_drvdata(dev);
+       int ret;
+
+       ret = auo_pixcir_start(ts);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void auo_pixcir_input_close(struct input_dev *dev)
+{
+       struct auo_pixcir_ts *ts = input_get_drvdata(dev);
+
+       auo_pixcir_stop(ts);
+
+       return;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int auo_pixcir_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
+       struct input_dev *input = ts->input;
+       int ret = 0;
+
+       mutex_lock(&input->mutex);
+
+       /* when configured as wakeup source, device should always wake system
+        * therefore start device if necessary
+        */
+       if (device_may_wakeup(&client->dev)) {
+               /* need to start device if not open, to be wakeup source */
+               if (!input->users) {
+                       ret = auo_pixcir_start(ts);
+                       if (ret)
+                               goto unlock;
+               }
+
+               enable_irq_wake(client->irq);
+               ret = auo_pixcir_power_mode(ts, AUO_PIXCIR_POWER_SLEEP);
+       } else if (input->users) {
+               ret = auo_pixcir_stop(ts);
+       }
+
+unlock:
+       mutex_unlock(&input->mutex);
+
+       return ret;
+}
+
+static int auo_pixcir_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
+       struct input_dev *input = ts->input;
+       int ret = 0;
+
+       mutex_lock(&input->mutex);
+
+       if (device_may_wakeup(&client->dev)) {
+               disable_irq_wake(client->irq);
+
+               /* need to stop device if it was not open on suspend */
+               if (!input->users) {
+                       ret = auo_pixcir_stop(ts);
+                       if (ret)
+                               goto unlock;
+               }
+
+               /* device wakes automatically from SLEEP */
+       } else if (input->users) {
+               ret = auo_pixcir_start(ts);
+       }
+
+unlock:
+       mutex_unlock(&input->mutex);
+
+       return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(auo_pixcir_pm_ops, auo_pixcir_suspend,
+                        auo_pixcir_resume);
+
+static int __devinit auo_pixcir_probe(struct i2c_client *client,
+                                     const struct i2c_device_id *id)
+{
+       const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+       struct auo_pixcir_ts *ts;
+       struct input_dev *input_dev;
+       int ret;
+
+       if (!pdata)
+               return -EINVAL;
+
+       ts = kzalloc(sizeof(struct auo_pixcir_ts), GFP_KERNEL);
+       if (!ts)
+               return -ENOMEM;
+
+       ret = gpio_request(pdata->gpio_int, "auo_pixcir_ts_int");
+       if (ret) {
+               dev_err(&client->dev, "request of gpio %d failed, %d\n",
+                       pdata->gpio_int, ret);
+               goto err_gpio_int;
+       }
+
+       if (pdata->init_hw)
+               pdata->init_hw(client);
+
+       ts->client = client;
+       ts->touch_ind_mode = 0;
+       init_waitqueue_head(&ts->wait);
+
+       snprintf(ts->phys, sizeof(ts->phys),
+                "%s/input0", dev_name(&client->dev));
+
+       input_dev = input_allocate_device();
+       if (!input_dev) {
+               dev_err(&client->dev, "could not allocate input device\n");
+               goto err_input_alloc;
+       }
+
+       ts->input = input_dev;
+
+       input_dev->name = "AUO-Pixcir touchscreen";
+       input_dev->phys = ts->phys;
+       input_dev->id.bustype = BUS_I2C;
+       input_dev->dev.parent = &client->dev;
+
+       input_dev->open = auo_pixcir_input_open;
+       input_dev->close = auo_pixcir_input_close;
+
+       __set_bit(EV_ABS, input_dev->evbit);
+       __set_bit(EV_KEY, input_dev->evbit);
+
+       __set_bit(BTN_TOUCH, input_dev->keybit);
+
+       /* For single touch */
+       input_set_abs_params(input_dev, ABS_X, 0, pdata->x_max, 0, 0);
+       input_set_abs_params(input_dev, ABS_Y, 0, pdata->y_max, 0, 0);
+
+       /* For multi touch */
+       input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
+                            pdata->x_max, 0, 0);
+       input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
+                            pdata->y_max, 0, 0);
+       input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
+                            AUO_PIXCIR_MAX_AREA, 0, 0);
+       input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0,
+                            AUO_PIXCIR_MAX_AREA, 0, 0);
+       input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+
+       ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_VERSION);
+       if (ret < 0)
+               goto err_fw_vers;
+       dev_info(&client->dev, "firmware version 0x%X\n", ret);
+
+       ret = auo_pixcir_int_config(ts, pdata->int_setting);
+       if (ret)
+               goto err_fw_vers;
+
+       input_set_drvdata(ts->input, ts);
+       ts->stopped = true;
+
+       ret = request_threaded_irq(client->irq, NULL, auo_pixcir_interrupt,
+                                  IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                  input_dev->name, ts);
+       if (ret) {
+               dev_err(&client->dev, "irq %d requested failed\n", client->irq);
+               goto err_fw_vers;
+       }
+
+       /* stop device and put it into deep sleep until it is opened */
+       ret = auo_pixcir_stop(ts);
+       if (ret < 0)
+               goto err_input_register;
+
+       ret = input_register_device(input_dev);
+       if (ret) {
+               dev_err(&client->dev, "could not register input device\n");
+               goto err_input_register;
+       }
+
+       i2c_set_clientdata(client, ts);
+
+       return 0;
+
+err_input_register:
+       free_irq(client->irq, ts);
+err_fw_vers:
+       input_free_device(input_dev);
+err_input_alloc:
+       if (pdata->exit_hw)
+               pdata->exit_hw(client);
+       gpio_free(pdata->gpio_int);
+err_gpio_int:
+       kfree(ts);
+
+       return ret;
+}
+
+static int __devexit auo_pixcir_remove(struct i2c_client *client)
+{
+       struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
+       const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+
+       free_irq(client->irq, ts);
+
+       input_unregister_device(ts->input);
+
+       if (pdata->exit_hw)
+               pdata->exit_hw(client);
+
+       gpio_free(pdata->gpio_int);
+
+       kfree(ts);
+
+       return 0;
+}
+
+static const struct i2c_device_id auo_pixcir_idtable[] = {
+       { "auo_pixcir_ts", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, auo_pixcir_idtable);
+
+static struct i2c_driver auo_pixcir_driver = {
+       .driver = {
+               .owner  = THIS_MODULE,
+               .name   = "auo_pixcir_ts",
+               .pm     = &auo_pixcir_pm_ops,
+       },
+       .probe          = auo_pixcir_probe,
+       .remove         = __devexit_p(auo_pixcir_remove),
+       .id_table       = auo_pixcir_idtable,
+};
+
+static int __init auo_pixcir_init(void)
+{
+       return i2c_add_driver(&auo_pixcir_driver);
+}
+module_init(auo_pixcir_init);
+
+static void __exit auo_pixcir_exit(void)
+{
+       i2c_del_driver(&auo_pixcir_driver);
+}
+module_exit(auo_pixcir_exit);
+
+MODULE_DESCRIPTION("AUO-PIXCIR touchscreen driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
index 2b72a5923c16a41f9cfbd1400f958f736842fd6f..36b65cf10d7f83b25a34b2148986c3a27b7c4bfc 100644 (file)
@@ -379,18 +379,7 @@ static struct platform_driver da9034_touch_driver = {
        .probe          = da9034_touch_probe,
        .remove         = __devexit_p(da9034_touch_remove),
 };
-
-static int __init da9034_touch_init(void)
-{
-       return platform_driver_register(&da9034_touch_driver);
-}
-module_init(da9034_touch_init);
-
-static void __exit da9034_touch_exit(void)
-{
-       platform_driver_unregister(&da9034_touch_driver);
-}
-module_exit(da9034_touch_exit);
+module_platform_driver(da9034_touch_driver);
 
 MODULE_DESCRIPTION("Touchscreen driver for Dialog Semiconductor DA9034");
 MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>, Bin Yang <bin.yang@marvell.com>");
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
new file mode 100644 (file)
index 0000000..eadcc2e
--- /dev/null
@@ -0,0 +1,303 @@
+/*
+ * Driver for EETI eGalax Multiple Touch Controller
+ *
+ * Copyright (C) 2011 Freescale Semiconductor, Inc.
+ *
+ * based on max11801_ts.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* EETI eGalax serial touch screen controller is a I2C based multiple
+ * touch screen controller, it supports 5 point multiple touch. */
+
+/* TODO:
+  - auto idle mode support
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/input/mt.h>
+
+/*
+ * Mouse Mode: some panel may configure the controller to mouse mode,
+ * which can only report one point at a given time.
+ * This driver will ignore events in this mode.
+ */
+#define REPORT_MODE_MOUSE              0x1
+/*
+ * Vendor Mode: this mode is used to transfer some vendor specific
+ * messages.
+ * This driver will ignore events in this mode.
+ */
+#define REPORT_MODE_VENDOR             0x3
+/* Multiple Touch Mode */
+#define REPORT_MODE_MTTOUCH            0x4
+
+#define MAX_SUPPORT_POINTS             5
+
+#define EVENT_VALID_OFFSET     7
+#define EVENT_VALID_MASK       (0x1 << EVENT_VALID_OFFSET)
+#define EVENT_ID_OFFSET                2
+#define EVENT_ID_MASK          (0xf << EVENT_ID_OFFSET)
+#define EVENT_IN_RANGE         (0x1 << 1)
+#define EVENT_DOWN_UP          (0X1 << 0)
+
+#define MAX_I2C_DATA_LEN       10
+
+#define EGALAX_MAX_X   32760
+#define EGALAX_MAX_Y   32760
+#define EGALAX_MAX_TRIES 100
+
+struct egalax_ts {
+       struct i2c_client               *client;
+       struct input_dev                *input_dev;
+};
+
+static irqreturn_t egalax_ts_interrupt(int irq, void *dev_id)
+{
+       struct egalax_ts *ts = dev_id;
+       struct input_dev *input_dev = ts->input_dev;
+       struct i2c_client *client = ts->client;
+       u8 buf[MAX_I2C_DATA_LEN];
+       int id, ret, x, y, z;
+       int tries = 0;
+       bool down, valid;
+       u8 state;
+
+       do {
+               ret = i2c_master_recv(client, buf, MAX_I2C_DATA_LEN);
+       } while (ret == -EAGAIN && tries++ < EGALAX_MAX_TRIES);
+
+       if (ret < 0)
+               return IRQ_HANDLED;
+
+       if (buf[0] != REPORT_MODE_MTTOUCH) {
+               /* ignore mouse events and vendor events */
+               return IRQ_HANDLED;
+       }
+
+       state = buf[1];
+       x = (buf[3] << 8) | buf[2];
+       y = (buf[5] << 8) | buf[4];
+       z = (buf[7] << 8) | buf[6];
+
+       valid = state & EVENT_VALID_MASK;
+       id = (state & EVENT_ID_MASK) >> EVENT_ID_OFFSET;
+       down = state & EVENT_DOWN_UP;
+
+       if (!valid || id > MAX_SUPPORT_POINTS) {
+               dev_dbg(&client->dev, "point invalid\n");
+               return IRQ_HANDLED;
+       }
+
+       input_mt_slot(input_dev, id);
+       input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, down);
+
+       dev_dbg(&client->dev, "%s id:%d x:%d y:%d z:%d",
+               down ? "down" : "up", id, x, y, z);
+
+       if (down) {
+               input_report_abs(input_dev, ABS_MT_POSITION_X, x);
+               input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+               input_report_abs(input_dev, ABS_MT_PRESSURE, z);
+       }
+
+       input_mt_report_pointer_emulation(input_dev, true);
+       input_sync(input_dev);
+
+       return IRQ_HANDLED;
+}
+
+/* wake up controller by an falling edge of interrupt gpio.  */
+static int egalax_wake_up_device(struct i2c_client *client)
+{
+       int gpio = irq_to_gpio(client->irq);
+       int ret;
+
+       ret = gpio_request(gpio, "egalax_irq");
+       if (ret < 0) {
+               dev_err(&client->dev,
+                       "request gpio failed, cannot wake up controller: %d\n",
+                       ret);
+               return ret;
+       }
+
+       /* wake up controller via an falling edge on IRQ gpio. */
+       gpio_direction_output(gpio, 0);
+       gpio_set_value(gpio, 1);
+
+       /* controller should be waken up, return irq.  */
+       gpio_direction_input(gpio);
+       gpio_free(gpio);
+
+       return 0;
+}
+
+static int __devinit egalax_firmware_version(struct i2c_client *client)
+{
+       static const u8 cmd[MAX_I2C_DATA_LEN] = { 0x03, 0x03, 0xa, 0x01, 0x41 };
+       int ret;
+
+       ret = i2c_master_send(client, cmd, MAX_I2C_DATA_LEN);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int __devinit egalax_ts_probe(struct i2c_client *client,
+                                      const struct i2c_device_id *id)
+{
+       struct egalax_ts *ts;
+       struct input_dev *input_dev;
+       int ret;
+       int error;
+
+       ts = kzalloc(sizeof(struct egalax_ts), GFP_KERNEL);
+       if (!ts) {
+               dev_err(&client->dev, "Failed to allocate memory\n");
+               return -ENOMEM;
+       }
+
+       input_dev = input_allocate_device();
+       if (!input_dev) {
+               dev_err(&client->dev, "Failed to allocate memory\n");
+               error = -ENOMEM;
+               goto err_free_ts;
+       }
+
+       ts->client = client;
+       ts->input_dev = input_dev;
+
+       /* controller may be in sleep, wake it up. */
+       egalax_wake_up_device(client);
+
+       ret = egalax_firmware_version(client);
+       if (ret < 0) {
+               dev_err(&client->dev, "Failed to read firmware version\n");
+               error = -EIO;
+               goto err_free_dev;
+       }
+
+       input_dev->name = "EETI eGalax Touch Screen";
+       input_dev->id.bustype = BUS_I2C;
+       input_dev->dev.parent = &client->dev;
+
+       __set_bit(EV_ABS, input_dev->evbit);
+       __set_bit(EV_KEY, input_dev->evbit);
+       __set_bit(BTN_TOUCH, input_dev->keybit);
+
+       input_set_abs_params(input_dev, ABS_X, 0, EGALAX_MAX_X, 0, 0);
+       input_set_abs_params(input_dev, ABS_Y, 0, EGALAX_MAX_Y, 0, 0);
+       input_set_abs_params(input_dev,
+                            ABS_MT_POSITION_X, 0, EGALAX_MAX_X, 0, 0);
+       input_set_abs_params(input_dev,
+                            ABS_MT_POSITION_X, 0, EGALAX_MAX_Y, 0, 0);
+       input_mt_init_slots(input_dev, MAX_SUPPORT_POINTS);
+
+       input_set_drvdata(input_dev, ts);
+
+       error = request_threaded_irq(client->irq, NULL, egalax_ts_interrupt,
+                                    IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+                                    "egalax_ts", ts);
+       if (error < 0) {
+               dev_err(&client->dev, "Failed to register interrupt\n");
+               goto err_free_dev;
+       }
+
+       error = input_register_device(ts->input_dev);
+       if (error)
+               goto err_free_irq;
+
+       i2c_set_clientdata(client, ts);
+       return 0;
+
+err_free_irq:
+       free_irq(client->irq, ts);
+err_free_dev:
+       input_free_device(input_dev);
+err_free_ts:
+       kfree(ts);
+
+       return error;
+}
+
+static __devexit int egalax_ts_remove(struct i2c_client *client)
+{
+       struct egalax_ts *ts = i2c_get_clientdata(client);
+
+       free_irq(client->irq, ts);
+
+       input_unregister_device(ts->input_dev);
+       kfree(ts);
+
+       return 0;
+}
+
+static const struct i2c_device_id egalax_ts_id[] = {
+       { "egalax_ts", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, egalax_ts_id);
+
+#ifdef CONFIG_PM_SLEEP
+static int egalax_ts_suspend(struct device *dev)
+{
+       static const u8 suspend_cmd[MAX_I2C_DATA_LEN] = {
+               0x3, 0x6, 0xa, 0x3, 0x36, 0x3f, 0x2, 0, 0, 0
+       };
+       struct i2c_client *client = to_i2c_client(dev);
+       int ret;
+
+       ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN);
+       return ret > 0 ? 0 : ret;
+}
+
+static int egalax_ts_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+
+       return egalax_wake_up_device(client);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(egalax_ts_pm_ops, egalax_ts_suspend, egalax_ts_resume);
+
+static struct i2c_driver egalax_ts_driver = {
+       .driver = {
+               .name   = "egalax_ts",
+               .owner  = THIS_MODULE,
+               .pm     = &egalax_ts_pm_ops,
+       },
+       .id_table       = egalax_ts_id,
+       .probe          = egalax_ts_probe,
+       .remove         = __devexit_p(egalax_ts_remove),
+};
+
+static int __init egalax_ts_init(void)
+{
+       return i2c_add_driver(&egalax_ts_driver);
+}
+
+static void __exit egalax_ts_exit(void)
+{
+       i2c_del_driver(&egalax_ts_driver);
+}
+
+module_init(egalax_ts_init);
+module_exit(egalax_ts_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("Touchscreen driver for EETI eGalax touch controller");
+MODULE_LICENSE("GPL");
index 62811de6f18f0856b46e992ac8dd8e035398e32d..81e33862394443261d3958ae60d17bb2ec98fbc9 100644 (file)
@@ -47,12 +47,6 @@ static int invert_y;
 module_param(invert_y, bool, 0644);
 MODULE_PARM_DESC(invert_y, "If set, Y axis is inverted");
 
-static struct pnp_device_id pnp_ids[] = {
-       { .id = "PNP0cc0" },
-       { .id = "" }
-};
-MODULE_DEVICE_TABLE(pnp, pnp_ids);
-
 static irqreturn_t htcpen_interrupt(int irq, void *handle)
 {
        struct input_dev *htcpen_dev = handle;
@@ -237,6 +231,7 @@ static struct dmi_system_id __initdata htcshift_dmi_table[] = {
        },
        { }
 };
+MODULE_DEVICE_TABLE(dmi, htcshift_dmi_table);
 
 static int __init htcpen_isa_init(void)
 {
index 327695268e06ed2413ddbb8e02e4079ed3eee231..3cd7a837f82b203f41f5ff1aa7cefcf5b3714594 100644 (file)
@@ -664,18 +664,7 @@ static struct platform_driver mrstouch_driver = {
        .probe          = mrstouch_probe,
        .remove         = __devexit_p(mrstouch_remove),
 };
-
-static int __init mrstouch_init(void)
-{
-       return platform_driver_register(&mrstouch_driver);
-}
-module_init(mrstouch_init);
-
-static void __exit mrstouch_exit(void)
-{
-       platform_driver_unregister(&mrstouch_driver);
-}
-module_exit(mrstouch_exit);
+module_platform_driver(mrstouch_driver);
 
 MODULE_AUTHOR("Sreedhara Murthy. D.S, sreedhara.ds@intel.com");
 MODULE_DESCRIPTION("Intel Moorestown Resistive Touch Screen Driver");
index 50076c2d59e23467e2be9bd1da2f00b7d59db2b5..c3848ad2325bcbe48d6a244d374ffd08cc70d8a9 100644 (file)
@@ -172,16 +172,4 @@ static struct platform_driver jornada720_ts_driver = {
                .owner  = THIS_MODULE,
        },
 };
-
-static int __init jornada720_ts_init(void)
-{
-       return platform_driver_register(&jornada720_ts_driver);
-}
-
-static void __exit jornada720_ts_exit(void)
-{
-       platform_driver_unregister(&jornada720_ts_driver);
-}
-
-module_init(jornada720_ts_init);
-module_exit(jornada720_ts_exit);
+module_platform_driver(jornada720_ts_driver);
index 0a484ed5295c282f03dda9cf3f3e1810fb440537..afcd0691ec678a03de9a5f36578284574f05424a 100644 (file)
@@ -392,18 +392,7 @@ static struct platform_driver lpc32xx_ts_driver = {
                .pm     = LPC32XX_TS_PM_OPS,
        },
 };
-
-static int __init lpc32xx_ts_init(void)
-{
-       return platform_driver_register(&lpc32xx_ts_driver);
-}
-module_init(lpc32xx_ts_init);
-
-static void __exit lpc32xx_ts_exit(void)
-{
-       platform_driver_unregister(&lpc32xx_ts_driver);
-}
-module_exit(lpc32xx_ts_exit);
+module_platform_driver(lpc32xx_ts_driver);
 
 MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com");
 MODULE_DESCRIPTION("LPC32XX TSC Driver");
index e966c29ff1bb1f81e035eaeb8a5a4144828119aa..7d2b2136e5ad89f1a63a1ffce345f2c227096a71 100644 (file)
@@ -302,19 +302,7 @@ static struct platform_driver mainstone_wm97xx_driver = {
                .name = "wm97xx-touch",
        },
 };
-
-static int __init mainstone_wm97xx_init(void)
-{
-       return platform_driver_register(&mainstone_wm97xx_driver);
-}
-
-static void __exit mainstone_wm97xx_exit(void)
-{
-       platform_driver_unregister(&mainstone_wm97xx_driver);
-}
-
-module_init(mainstone_wm97xx_init);
-module_exit(mainstone_wm97xx_exit);
+module_platform_driver(mainstone_wm97xx_driver);
 
 /* Module information */
 MODULE_AUTHOR("Liam Girdwood <lrg@slimlogic.co.uk>");
index ede02743eac1997b426a7970f96429a486ea4059..68f86f7dabbce712f80ebe3c6322f008bf934ea2 100644 (file)
@@ -240,18 +240,7 @@ static struct platform_driver mc13783_ts_driver = {
                .name   = MC13783_TS_NAME,
        },
 };
-
-static int __init mc13783_ts_init(void)
-{
-       return platform_driver_probe(&mc13783_ts_driver, &mc13783_ts_probe);
-}
-module_init(mc13783_ts_init);
-
-static void __exit mc13783_ts_exit(void)
-{
-       platform_driver_unregister(&mc13783_ts_driver);
-}
-module_exit(mc13783_ts_exit);
+module_platform_driver(mc13783_ts_driver);
 
 MODULE_DESCRIPTION("MC13783 input touchscreen driver");
 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
index 5803bd0c1cca9efc24f0529d700b983243609817..5226194aa78ef678aa1adedd8d08013194ff16b3 100644 (file)
@@ -36,7 +36,6 @@
 struct migor_ts_priv {
        struct i2c_client *client;
        struct input_dev *input;
-       struct delayed_work work;
        int irq;
 };
 
@@ -44,15 +43,24 @@ static const u_int8_t migor_ts_ena_seq[17] = { 0x33, 0x22, 0x11,
                                               0x01, 0x06, 0x07, };
 static const u_int8_t migor_ts_dis_seq[17] = { };
 
-static void migor_ts_poscheck(struct work_struct *work)
+static irqreturn_t migor_ts_isr(int irq, void *dev_id)
 {
-       struct migor_ts_priv *priv = container_of(work,
-                                                 struct migor_ts_priv,
-                                                 work.work);
+       struct migor_ts_priv *priv = dev_id;
        unsigned short xpos, ypos;
        unsigned char event;
        u_int8_t buf[16];
 
+       /*
+        * The touch screen controller chip is hooked up to the CPU
+        * using I2C and a single interrupt line. The interrupt line
+        * is pulled low whenever someone taps the screen. To deassert
+        * the interrupt line we need to acknowledge the interrupt by
+        * communicating with the controller over the slow i2c bus.
+        *
+        * Since I2C bus controller may sleep we are using threaded
+        * IRQ here.
+        */
+
        memset(buf, 0, sizeof(buf));
 
        /* Set Index 0 */
@@ -72,41 +80,25 @@ static void migor_ts_poscheck(struct work_struct *work)
        xpos = ((buf[11] & 0x03) << 8 | buf[10]);
        event = buf[12];
 
-       if (event == EVENT_PENDOWN || event == EVENT_REPEAT) {
+       switch (event) {
+       case EVENT_PENDOWN:
+       case EVENT_REPEAT:
                input_report_key(priv->input, BTN_TOUCH, 1);
                input_report_abs(priv->input, ABS_X, ypos); /*X-Y swap*/
                input_report_abs(priv->input, ABS_Y, xpos);
                input_sync(priv->input);
-       } else if (event == EVENT_PENUP) {
+               break;
+
+       case EVENT_PENUP:
                input_report_key(priv->input, BTN_TOUCH, 0);
                input_sync(priv->input);
+               break;
        }
- out:
-       enable_irq(priv->irq);
-}
-
-static irqreturn_t migor_ts_isr(int irq, void *dev_id)
-{
-       struct migor_ts_priv *priv = dev_id;
-
-       /* the touch screen controller chip is hooked up to the cpu
-        * using i2c and a single interrupt line. the interrupt line
-        * is pulled low whenever someone taps the screen. to deassert
-        * the interrupt line we need to acknowledge the interrupt by
-        * communicating with the controller over the slow i2c bus.
-        *
-        * we can't acknowledge from interrupt context since the i2c
-        * bus controller may sleep, so we just disable the interrupt
-        * here and handle the acknowledge using delayed work.
-        */
-
-       disable_irq_nosync(irq);
-       schedule_delayed_work(&priv->work, HZ / 20);
 
+ out:
        return IRQ_HANDLED;
 }
 
-
 static int migor_ts_open(struct input_dev *dev)
 {
        struct migor_ts_priv *priv = input_get_drvdata(dev);
@@ -131,15 +123,6 @@ static void migor_ts_close(struct input_dev *dev)
 
        disable_irq(priv->irq);
 
-       /* cancel pending work and wait for migor_ts_poscheck() to finish */
-       if (cancel_delayed_work_sync(&priv->work)) {
-               /*
-                * if migor_ts_poscheck was canceled we need to enable IRQ
-                * here to balance disable done in migor_ts_isr.
-                */
-               enable_irq(priv->irq);
-       }
-
        /* disable controller */
        i2c_master_send(client, migor_ts_dis_seq, sizeof(migor_ts_dis_seq));
 
@@ -154,23 +137,20 @@ static int migor_ts_probe(struct i2c_client *client,
        int error;
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv) {
-               dev_err(&client->dev, "failed to allocate driver data\n");
-               error = -ENOMEM;
-               goto err0;
-       }
-
-       dev_set_drvdata(&client->dev, priv);
-
        input = input_allocate_device();
-       if (!input) {
-               dev_err(&client->dev, "Failed to allocate input device.\n");
+       if (!priv || !input) {
+               dev_err(&client->dev, "failed to allocate memory\n");
                error = -ENOMEM;
-               goto err1;
+               goto err_free_mem;
        }
 
+       priv->client = client;
+       priv->input = input;
+       priv->irq = client->irq;
+
        input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-       input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+       __set_bit(BTN_TOUCH, input->keybit);
 
        input_set_abs_params(input, ABS_X, 95, 955, 0, 0);
        input_set_abs_params(input, ABS_Y, 85, 935, 0, 0);
@@ -184,39 +164,34 @@ static int migor_ts_probe(struct i2c_client *client,
 
        input_set_drvdata(input, priv);
 
-       priv->client = client;
-       priv->input = input;
-       INIT_DELAYED_WORK(&priv->work, migor_ts_poscheck);
-       priv->irq = client->irq;
-
-       error = input_register_device(input);
-       if (error)
-               goto err1;
-
-       error = request_irq(priv->irq, migor_ts_isr, IRQF_TRIGGER_LOW,
-                           client->name, priv);
+       error = request_threaded_irq(priv->irq, NULL, migor_ts_isr,
+                                     IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+                                     client->name, priv);
        if (error) {
                dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
-               goto err2;
+               goto err_free_mem;
        }
 
+       error = input_register_device(input);
+       if (error)
+               goto err_free_irq;
+
+       i2c_set_clientdata(client, priv);
        device_init_wakeup(&client->dev, 1);
+
        return 0;
 
- err2:
-       input_unregister_device(input);
-       input = NULL; /* so we dont try to free it below */
- err1:
+ err_free_irq:
+       free_irq(priv->irq, priv);
+ err_free_mem:
        input_free_device(input);
        kfree(priv);
- err0:
-       dev_set_drvdata(&client->dev, NULL);
        return error;
 }
 
 static int migor_ts_remove(struct i2c_client *client)
 {
-       struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
+       struct migor_ts_priv *priv = i2c_get_clientdata(client);
 
        free_irq(priv->irq, priv);
        input_unregister_device(priv->input);
@@ -230,7 +205,7 @@ static int migor_ts_remove(struct i2c_client *client)
 static int migor_ts_suspend(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
-       struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
+       struct migor_ts_priv *priv = i2c_get_clientdata(client);
 
        if (device_may_wakeup(&client->dev))
                enable_irq_wake(priv->irq);
@@ -241,7 +216,7 @@ static int migor_ts_suspend(struct device *dev)
 static int migor_ts_resume(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
-       struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
+       struct migor_ts_priv *priv = i2c_get_clientdata(client);
 
        if (device_may_wakeup(&client->dev))
                disable_irq_wake(priv->irq);
index ea6ef16e59b428e9b7338ce57bde07d51bbb44e0..f57aeb80f7e3ac4f097428c8355fb9ed65999e5e 100644 (file)
@@ -252,19 +252,7 @@ static struct platform_driver pcap_ts_driver = {
                .pm     = PCAP_TS_PM_OPS,
        },
 };
-
-static int __init pcap_ts_init(void)
-{
-       return platform_driver_register(&pcap_ts_driver);
-}
-
-static void __exit pcap_ts_exit(void)
-{
-       platform_driver_unregister(&pcap_ts_driver);
-}
-
-module_init(pcap_ts_init);
-module_exit(pcap_ts_exit);
+module_platform_driver(pcap_ts_driver);
 
 MODULE_DESCRIPTION("Motorola PCAP2 touchscreen driver");
 MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
new file mode 100644 (file)
index 0000000..d5ac09a
--- /dev/null
@@ -0,0 +1,239 @@
+/*
+ * Driver for Pixcir I2C touchscreen controllers.
+ *
+ * Copyright (C) 2010-2011 Pixcir, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/pixcir_ts.h>
+
+struct pixcir_i2c_ts_data {
+       struct i2c_client *client;
+       struct input_dev *input;
+       const struct pixcir_ts_platform_data *chip;
+       bool exiting;
+};
+
+static void pixcir_ts_poscheck(struct pixcir_i2c_ts_data *data)
+{
+       struct pixcir_i2c_ts_data *tsdata = data;
+       u8 rdbuf[10], wrbuf[1] = { 0 };
+       u8 touch;
+       int ret;
+
+       ret = i2c_master_send(tsdata->client, wrbuf, sizeof(wrbuf));
+       if (ret != sizeof(wrbuf)) {
+               dev_err(&tsdata->client->dev,
+                       "%s: i2c_master_send failed(), ret=%d\n",
+                       __func__, ret);
+               return;
+       }
+
+       ret = i2c_master_recv(tsdata->client, rdbuf, sizeof(rdbuf));
+       if (ret != sizeof(rdbuf)) {
+               dev_err(&tsdata->client->dev,
+                       "%s: i2c_master_recv failed(), ret=%d\n",
+                       __func__, ret);
+               return;
+       }
+
+       touch = rdbuf[0];
+       if (touch) {
+               u16 posx1 = (rdbuf[3] << 8) | rdbuf[2];
+               u16 posy1 = (rdbuf[5] << 8) | rdbuf[4];
+               u16 posx2 = (rdbuf[7] << 8) | rdbuf[6];
+               u16 posy2 = (rdbuf[9] << 8) | rdbuf[8];
+
+               input_report_key(tsdata->input, BTN_TOUCH, 1);
+               input_report_abs(tsdata->input, ABS_X, posx1);
+               input_report_abs(tsdata->input, ABS_Y, posy1);
+
+               input_report_abs(tsdata->input, ABS_MT_POSITION_X, posx1);
+               input_report_abs(tsdata->input, ABS_MT_POSITION_Y, posy1);
+               input_mt_sync(tsdata->input);
+
+               if (touch == 2) {
+                       input_report_abs(tsdata->input,
+                                        ABS_MT_POSITION_X, posx2);
+                       input_report_abs(tsdata->input,
+                                        ABS_MT_POSITION_Y, posy2);
+                       input_mt_sync(tsdata->input);
+               }
+       } else {
+               input_report_key(tsdata->input, BTN_TOUCH, 0);
+       }
+
+       input_sync(tsdata->input);
+}
+
+static irqreturn_t pixcir_ts_isr(int irq, void *dev_id)
+{
+       struct pixcir_i2c_ts_data *tsdata = dev_id;
+
+       while (!tsdata->exiting) {
+               pixcir_ts_poscheck(tsdata);
+
+               if (tsdata->chip->attb_read_val())
+                       break;
+
+               msleep(20);
+       }
+
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pixcir_i2c_ts_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+
+       if (device_may_wakeup(&client->dev))
+               enable_irq_wake(client->irq);
+
+       return 0;
+}
+
+static int pixcir_i2c_ts_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+
+       if (device_may_wakeup(&client->dev))
+               disable_irq_wake(client->irq);
+
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(pixcir_dev_pm_ops,
+                        pixcir_i2c_ts_suspend, pixcir_i2c_ts_resume);
+
+static int __devinit pixcir_i2c_ts_probe(struct i2c_client *client,
+                                        const struct i2c_device_id *id)
+{
+       const struct pixcir_ts_platform_data *pdata = client->dev.platform_data;
+       struct pixcir_i2c_ts_data *tsdata;
+       struct input_dev *input;
+       int error;
+
+       if (!pdata) {
+               dev_err(&client->dev, "platform data not defined\n");
+               return -EINVAL;
+       }
+
+       tsdata = kzalloc(sizeof(*tsdata), GFP_KERNEL);
+       input = input_allocate_device();
+       if (!tsdata || !input) {
+               dev_err(&client->dev, "Failed to allocate driver data!\n");
+               error = -ENOMEM;
+               goto err_free_mem;
+       }
+
+       tsdata->client = client;
+       tsdata->input = input;
+       tsdata->chip = pdata;
+
+       input->name = client->name;
+       input->id.bustype = BUS_I2C;
+       input->dev.parent = &client->dev;
+
+       __set_bit(EV_KEY, input->evbit);
+       __set_bit(EV_ABS, input->evbit);
+       __set_bit(BTN_TOUCH, input->keybit);
+       input_set_abs_params(input, ABS_X, 0, pdata->x_max, 0, 0);
+       input_set_abs_params(input, ABS_Y, 0, pdata->y_max, 0, 0);
+       input_set_abs_params(input, ABS_MT_POSITION_X, 0, pdata->x_max, 0, 0);
+       input_set_abs_params(input, ABS_MT_POSITION_Y, 0, pdata->y_max, 0, 0);
+
+       input_set_drvdata(input, tsdata);
+
+       error = request_threaded_irq(client->irq, NULL, pixcir_ts_isr,
+                                    IRQF_TRIGGER_FALLING,
+                                    client->name, tsdata);
+       if (error) {
+               dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
+               goto err_free_mem;
+       }
+
+       error = input_register_device(input);
+       if (error)
+               goto err_free_irq;
+
+       i2c_set_clientdata(client, tsdata);
+       device_init_wakeup(&client->dev, 1);
+
+       return 0;
+
+err_free_irq:
+       free_irq(client->irq, tsdata);
+err_free_mem:
+       input_free_device(input);
+       kfree(tsdata);
+       return error;
+}
+
+static int __devexit pixcir_i2c_ts_remove(struct i2c_client *client)
+{
+       struct pixcir_i2c_ts_data *tsdata = i2c_get_clientdata(client);
+
+       device_init_wakeup(&client->dev, 0);
+
+       tsdata->exiting = true;
+       mb();
+       free_irq(client->irq, tsdata);
+
+       input_unregister_device(tsdata->input);
+       kfree(tsdata);
+
+       return 0;
+}
+
+static const struct i2c_device_id pixcir_i2c_ts_id[] = {
+       { "pixcir_ts", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, pixcir_i2c_ts_id);
+
+static struct i2c_driver pixcir_i2c_ts_driver = {
+       .driver = {
+               .owner  = THIS_MODULE,
+               .name   = "pixcir_ts",
+               .pm     = &pixcir_dev_pm_ops,
+       },
+       .probe          = pixcir_i2c_ts_probe,
+       .remove         = __devexit_p(pixcir_i2c_ts_remove),
+       .id_table       = pixcir_i2c_ts_id,
+};
+
+static int __init pixcir_i2c_ts_init(void)
+{
+       return i2c_add_driver(&pixcir_i2c_ts_driver);
+}
+module_init(pixcir_i2c_ts_init);
+
+static void __exit pixcir_i2c_ts_exit(void)
+{
+       i2c_del_driver(&pixcir_i2c_ts_driver);
+}
+module_exit(pixcir_i2c_ts_exit);
+
+MODULE_AUTHOR("Jianchun Bian <jcbian@pixcir.com.cn>, Dequan Meng <dqmeng@pixcir.com.cn>");
+MODULE_DESCRIPTION("Pixcir I2C Touchscreen Driver");
+MODULE_LICENSE("GPL");
index 64ce697a34562b0f129b8859e8d95c7f7d96fd8b..bf1a06400067b10dea6b24fc10b5c303e2459b28 100644 (file)
@@ -432,19 +432,7 @@ static struct platform_driver s3c_ts_driver = {
        .probe          = s3c2410ts_probe,
        .remove         = __devexit_p(s3c2410ts_remove),
 };
-
-static int __init s3c2410ts_init(void)
-{
-       return platform_driver_register(&s3c_ts_driver);
-}
-
-static void __exit s3c2410ts_exit(void)
-{
-       platform_driver_unregister(&s3c_ts_driver);
-}
-
-module_init(s3c2410ts_init);
-module_exit(s3c2410ts_exit);
+module_platform_driver(s3c_ts_driver);
 
 MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>, "
              "Ben Dooks <ben@simtec.co.uk>, "
index ae88e13c99ff5194f88bb48f0096daf820c9bf47..692b685720ce9e8c238c8f1a08f67204214ab6d0 100644 (file)
@@ -379,20 +379,7 @@ static struct platform_driver stmpe_ts_driver = {
        .probe = stmpe_input_probe,
        .remove = __devexit_p(stmpe_ts_remove),
 };
-
-static int __init stmpe_ts_init(void)
-{
-       return platform_driver_register(&stmpe_ts_driver);
-}
-
-module_init(stmpe_ts_init);
-
-static void __exit stmpe_ts_exit(void)
-{
-       platform_driver_unregister(&stmpe_ts_driver);
-}
-
-module_exit(stmpe_ts_exit);
+module_platform_driver(stmpe_ts_driver);
 
 MODULE_AUTHOR("Luotao Fu <l.fu@pengutronix.de>");
 MODULE_DESCRIPTION("STMPEXXX touchscreen driver");
index 0e8f63e5b36ffbd3ee99e1a751dff2d4400e06e7..7e74880973591bb61daa6e3317a94e43bb8f5415 100644 (file)
@@ -378,19 +378,7 @@ static struct platform_driver tsc_driver = {
        .driver.name    = "tnetv107x-ts",
        .driver.owner   = THIS_MODULE,
 };
-
-static int __init tsc_init(void)
-{
-       return platform_driver_register(&tsc_driver);
-}
-
-static void __exit tsc_exit(void)
-{
-       platform_driver_unregister(&tsc_driver);
-}
-
-module_init(tsc_init);
-module_exit(tsc_exit);
+module_platform_driver(tsc_driver);
 
 MODULE_AUTHOR("Cyril Chemparathy");
 MODULE_DESCRIPTION("TNETV107X Touchscreen Driver");
index 43031492d733a4c51b1888481b93265b2d13e881..6c6f6d8ea9b413d68b97b47c72e6d1e192e70e05 100644 (file)
@@ -371,18 +371,7 @@ static struct platform_driver tps6507x_ts_driver = {
        .probe = tps6507x_ts_probe,
        .remove = __devexit_p(tps6507x_ts_remove),
 };
-
-static int __init tps6507x_ts_init(void)
-{
-       return platform_driver_register(&tps6507x_ts_driver);
-}
-module_init(tps6507x_ts_init);
-
-static void __exit tps6507x_ts_exit(void)
-{
-       platform_driver_unregister(&tps6507x_ts_driver);
-}
-module_exit(tps6507x_ts_exit);
+module_platform_driver(tps6507x_ts_driver);
 
 MODULE_AUTHOR("Todd Fischer <todd.fischer@ridgerun.com>");
 MODULE_DESCRIPTION("TPS6507x - TouchScreen driver");
index 3b5b5df04dd686ec77b20d32c16141ce44bc0865..d2b57536feeabd776e105df75433e9296444a398 100644 (file)
 
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/completion.h>
 #include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
 #include <linux/input.h>
 #include <linux/device.h>
 #include <linux/interrupt.h>
-#include <linux/suspend.h>
-#include <linux/kthread.h>
-#include <linux/freezer.h>
 #include <linux/ucb1400.h>
 
+#define UCB1400_TS_POLL_PERIOD 10 /* ms */
+
 static int adcsync;
 static int ts_delay = 55; /* us */
 static int ts_delay_pressure;  /* us */
 
 /* Switch to interrupt mode. */
-static inline void ucb1400_ts_mode_int(struct snd_ac97 *ac97)
+static void ucb1400_ts_mode_int(struct ucb1400_ts *ucb)
 {
-       ucb1400_reg_write(ac97, UCB_TS_CR,
+       ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
                        UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW |
                        UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND |
                        UCB_TS_CR_MODE_INT);
@@ -47,13 +47,15 @@ static inline void ucb1400_ts_mode_int(struct snd_ac97 *ac97)
  * Switch to pressure mode, and read pressure.  We don't need to wait
  * here, since both plates are being driven.
  */
-static inline unsigned int ucb1400_ts_read_pressure(struct ucb1400_ts *ucb)
+static unsigned int ucb1400_ts_read_pressure(struct ucb1400_ts *ucb)
 {
        ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
                        UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW |
                        UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND |
                        UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
+
        udelay(ts_delay_pressure);
+
        return ucb1400_adc_read(ucb->ac97, UCB_ADC_INP_TSPY, adcsync);
 }
 
@@ -63,7 +65,7 @@ static inline unsigned int ucb1400_ts_read_pressure(struct ucb1400_ts *ucb)
  * gives a faster response time.  Even so, we need to wait about 55us
  * for things to stabilise.
  */
-static inline unsigned int ucb1400_ts_read_xpos(struct ucb1400_ts *ucb)
+static unsigned int ucb1400_ts_read_xpos(struct ucb1400_ts *ucb)
 {
        ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
                        UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
@@ -86,7 +88,7 @@ static inline unsigned int ucb1400_ts_read_xpos(struct ucb1400_ts *ucb)
  * gives a faster response time.  Even so, we need to wait about 55us
  * for things to stabilise.
  */
-static inline unsigned int ucb1400_ts_read_ypos(struct ucb1400_ts *ucb)
+static int ucb1400_ts_read_ypos(struct ucb1400_ts *ucb)
 {
        ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
                        UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
@@ -107,7 +109,7 @@ static inline unsigned int ucb1400_ts_read_ypos(struct ucb1400_ts *ucb)
  * Switch to X plate resistance mode.  Set MX to ground, PX to
  * supply.  Measure current.
  */
-static inline unsigned int ucb1400_ts_read_xres(struct ucb1400_ts *ucb)
+static unsigned int ucb1400_ts_read_xres(struct ucb1400_ts *ucb)
 {
        ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
                        UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
@@ -119,7 +121,7 @@ static inline unsigned int ucb1400_ts_read_xres(struct ucb1400_ts *ucb)
  * Switch to Y plate resistance mode.  Set MY to ground, PY to
  * supply.  Measure current.
  */
-static inline unsigned int ucb1400_ts_read_yres(struct ucb1400_ts *ucb)
+static unsigned int ucb1400_ts_read_yres(struct ucb1400_ts *ucb)
 {
        ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
                        UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
@@ -127,26 +129,26 @@ static inline unsigned int ucb1400_ts_read_yres(struct ucb1400_ts *ucb)
        return ucb1400_adc_read(ucb->ac97, 0, adcsync);
 }
 
-static inline int ucb1400_ts_pen_up(struct snd_ac97 *ac97)
+static int ucb1400_ts_pen_up(struct ucb1400_ts *ucb)
 {
-       unsigned short val = ucb1400_reg_read(ac97, UCB_TS_CR);
+       unsigned short val = ucb1400_reg_read(ucb->ac97, UCB_TS_CR);
 
        return val & (UCB_TS_CR_TSPX_LOW | UCB_TS_CR_TSMX_LOW);
 }
 
-static inline void ucb1400_ts_irq_enable(struct snd_ac97 *ac97)
+static void ucb1400_ts_irq_enable(struct ucb1400_ts *ucb)
 {
-       ucb1400_reg_write(ac97, UCB_IE_CLEAR, UCB_IE_TSPX);
-       ucb1400_reg_write(ac97, UCB_IE_CLEAR, 0);
-       ucb1400_reg_write(ac97, UCB_IE_FAL, UCB_IE_TSPX);
+       ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, UCB_IE_TSPX);
+       ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
+       ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, UCB_IE_TSPX);
 }
 
-static inline void ucb1400_ts_irq_disable(struct snd_ac97 *ac97)
+static void ucb1400_ts_irq_disable(struct ucb1400_ts *ucb)
 {
-       ucb1400_reg_write(ac97, UCB_IE_FAL, 0);
+       ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, 0);
 }
 
-static void ucb1400_ts_evt_add(struct input_dev *idev, u16 pressure, u16 x, u16 y)
+static void ucb1400_ts_report_event(struct input_dev *idev, u16 pressure, u16 x, u16 y)
 {
        input_report_abs(idev, ABS_X, x);
        input_report_abs(idev, ABS_Y, y);
@@ -162,7 +164,7 @@ static void ucb1400_ts_event_release(struct input_dev *idev)
        input_sync(idev);
 }
 
-static void ucb1400_handle_pending_irq(struct ucb1400_ts *ucb)
+static void ucb1400_clear_pending_irq(struct ucb1400_ts *ucb)
 {
        unsigned int isr;
 
@@ -171,32 +173,34 @@ static void ucb1400_handle_pending_irq(struct ucb1400_ts *ucb)
        ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
 
        if (isr & UCB_IE_TSPX)
-               ucb1400_ts_irq_disable(ucb->ac97);
+               ucb1400_ts_irq_disable(ucb);
        else
-               dev_dbg(&ucb->ts_idev->dev, "ucb1400: unexpected IE_STATUS = %#x\n", isr);
-       enable_irq(ucb->irq);
+               dev_dbg(&ucb->ts_idev->dev,
+                       "ucb1400: unexpected IE_STATUS = %#x\n", isr);
 }
 
-static int ucb1400_ts_thread(void *_ucb)
+/*
+ * A restriction with interrupts exists when using the ucb1400, as
+ * the codec read/write routines may sleep while waiting for codec
+ * access completion and uses semaphores for access control to the
+ * AC97 bus. Therefore the driver is forced to use threaded interrupt
+ * handler.
+ */
+static irqreturn_t ucb1400_irq(int irqnr, void *devid)
 {
-       struct ucb1400_ts *ucb = _ucb;
-       struct task_struct *tsk = current;
-       int valid = 0;
-       struct sched_param param = { .sched_priority = 1 };
+       struct ucb1400_ts *ucb = devid;
+       unsigned int x, y, p;
+       bool penup;
 
-       sched_setscheduler(tsk, SCHED_FIFO, &param);
+       if (unlikely(irqnr != ucb->irq))
+               return IRQ_NONE;
 
-       set_freezable();
-       while (!kthread_should_stop()) {
-               unsigned int x, y, p;
-               long timeout;
+       ucb1400_clear_pending_irq(ucb);
 
-               ucb->ts_restart = 0;
+       /* Start with a small delay before checking pendown state */
+       msleep(UCB1400_TS_POLL_PERIOD);
 
-               if (ucb->irq_pending) {
-                       ucb->irq_pending = 0;
-                       ucb1400_handle_pending_irq(ucb);
-               }
+       while (!ucb->stopped && !(penup = ucb1400_ts_pen_up(ucb))) {
 
                ucb1400_adc_enable(ucb->ac97);
                x = ucb1400_ts_read_xpos(ucb);
@@ -204,91 +208,62 @@ static int ucb1400_ts_thread(void *_ucb)
                p = ucb1400_ts_read_pressure(ucb);
                ucb1400_adc_disable(ucb->ac97);
 
-               /* Switch back to interrupt mode. */
-               ucb1400_ts_mode_int(ucb->ac97);
-
-               msleep(10);
-
-               if (ucb1400_ts_pen_up(ucb->ac97)) {
-                       ucb1400_ts_irq_enable(ucb->ac97);
-
-                       /*
-                        * If we spat out a valid sample set last time,
-                        * spit out a "pen off" sample here.
-                        */
-                       if (valid) {
-                               ucb1400_ts_event_release(ucb->ts_idev);
-                               valid = 0;
-                       }
-
-                       timeout = MAX_SCHEDULE_TIMEOUT;
-               } else {
-                       valid = 1;
-                       ucb1400_ts_evt_add(ucb->ts_idev, p, x, y);
-                       timeout = msecs_to_jiffies(10);
-               }
+               ucb1400_ts_report_event(ucb->ts_idev, p, x, y);
 
-               wait_event_freezable_timeout(ucb->ts_wait,
-                       ucb->irq_pending || ucb->ts_restart ||
-                       kthread_should_stop(), timeout);
+               wait_event_timeout(ucb->ts_wait, ucb->stopped,
+                                  msecs_to_jiffies(UCB1400_TS_POLL_PERIOD));
        }
 
-       /* Send the "pen off" if we are stopping with the pen still active */
-       if (valid)
-               ucb1400_ts_event_release(ucb->ts_idev);
+       ucb1400_ts_event_release(ucb->ts_idev);
 
-       ucb->ts_task = NULL;
-       return 0;
+       if (!ucb->stopped) {
+               /* Switch back to interrupt mode. */
+               ucb1400_ts_mode_int(ucb);
+               ucb1400_ts_irq_enable(ucb);
+       }
+
+       return IRQ_HANDLED;
 }
 
-/*
- * A restriction with interrupts exists when using the ucb1400, as
- * the codec read/write routines may sleep while waiting for codec
- * access completion and uses semaphores for access control to the
- * AC97 bus.  A complete codec read cycle could take  anywhere from
- * 60 to 100uSec so we *definitely* don't want to spin inside the
- * interrupt handler waiting for codec access.  So, we handle the
- * interrupt by scheduling a RT kernel thread to run in process
- * context instead of interrupt context.
- */
-static irqreturn_t ucb1400_hard_irq(int irqnr, void *devid)
+static void ucb1400_ts_stop(struct ucb1400_ts *ucb)
 {
-       struct ucb1400_ts *ucb = devid;
+       /* Signal IRQ thread to stop polling and disable the handler. */
+       ucb->stopped = true;
+       mb();
+       wake_up(&ucb->ts_wait);
+       disable_irq(ucb->irq);
 
-       if (irqnr == ucb->irq) {
-               disable_irq_nosync(ucb->irq);
-               ucb->irq_pending = 1;
-               wake_up(&ucb->ts_wait);
-               return IRQ_HANDLED;
-       }
-       return IRQ_NONE;
+       ucb1400_ts_irq_disable(ucb);
+       ucb1400_reg_write(ucb->ac97, UCB_TS_CR, 0);
+}
+
+/* Must be called with ts->lock held */
+static void ucb1400_ts_start(struct ucb1400_ts *ucb)
+{
+       /* Tell IRQ thread that it may poll the device. */
+       ucb->stopped = false;
+       mb();
+
+       ucb1400_ts_mode_int(ucb);
+       ucb1400_ts_irq_enable(ucb);
+
+       enable_irq(ucb->irq);
 }
 
 static int ucb1400_ts_open(struct input_dev *idev)
 {
        struct ucb1400_ts *ucb = input_get_drvdata(idev);
-       int ret = 0;
 
-       BUG_ON(ucb->ts_task);
+       ucb1400_ts_start(ucb);
 
-       ucb->ts_task = kthread_run(ucb1400_ts_thread, ucb, "UCB1400_ts");
-       if (IS_ERR(ucb->ts_task)) {
-               ret = PTR_ERR(ucb->ts_task);
-               ucb->ts_task = NULL;
-       }
-
-       return ret;
+       return 0;
 }
 
 static void ucb1400_ts_close(struct input_dev *idev)
 {
        struct ucb1400_ts *ucb = input_get_drvdata(idev);
 
-       if (ucb->ts_task)
-               kthread_stop(ucb->ts_task);
-
-       ucb1400_ts_irq_disable(ucb->ac97);
-       ucb1400_reg_write(ucb->ac97, UCB_TS_CR, 0);
+       ucb1400_ts_stop(ucb);
 }
 
 #ifndef NO_IRQ
@@ -299,7 +274,8 @@ static void ucb1400_ts_close(struct input_dev *idev)
  * Try to probe our interrupt, rather than relying on lots of
  * hard-coded machine dependencies.
  */
-static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb)
+static int __devinit ucb1400_ts_detect_irq(struct ucb1400_ts *ucb,
+                                          struct platform_device *pdev)
 {
        unsigned long mask, timeout;
 
@@ -321,7 +297,7 @@ static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb)
                                                UCB_ADC_DAT_VALID)) {
                cpu_relax();
                if (time_after(jiffies, timeout)) {
-                       printk(KERN_ERR "ucb1400: timed out in IRQ probe\n");
+                       dev_err(&pdev->dev, "timed out in IRQ probe\n");
                        probe_irq_off(mask);
                        return -ENODEV;
                }
@@ -342,11 +318,11 @@ static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb)
        return 0;
 }
 
-static int ucb1400_ts_probe(struct platform_device *dev)
+static int __devinit ucb1400_ts_probe(struct platform_device *pdev)
 {
+       struct ucb1400_ts *ucb = pdev->dev.platform_data;
        int error, x_res, y_res;
        u16 fcsr;
-       struct ucb1400_ts *ucb = dev->dev.platform_data;
 
        ucb->ts_idev = input_allocate_device();
        if (!ucb->ts_idev) {
@@ -356,27 +332,19 @@ static int ucb1400_ts_probe(struct platform_device *dev)
 
        /* Only in case the IRQ line wasn't supplied, try detecting it */
        if (ucb->irq < 0) {
-               error = ucb1400_ts_detect_irq(ucb);
+               error = ucb1400_ts_detect_irq(ucb, pdev);
                if (error) {
-                       printk(KERN_ERR "UCB1400: IRQ probe failed\n");
+                       dev_err(&pdev->dev, "IRQ probe failed\n");
                        goto err_free_devs;
                }
        }
+       dev_dbg(&pdev->dev, "found IRQ %d\n", ucb->irq);
 
        init_waitqueue_head(&ucb->ts_wait);
 
-       error = request_irq(ucb->irq, ucb1400_hard_irq, IRQF_TRIGGER_RISING,
-                               "UCB1400", ucb);
-       if (error) {
-               printk(KERN_ERR "ucb1400: unable to grab irq%d: %d\n",
-                               ucb->irq, error);
-               goto err_free_devs;
-       }
-       printk(KERN_DEBUG "UCB1400: found IRQ %d\n", ucb->irq);
-
        input_set_drvdata(ucb->ts_idev, ucb);
 
-       ucb->ts_idev->dev.parent        = &dev->dev;
+       ucb->ts_idev->dev.parent        = &pdev->dev;
        ucb->ts_idev->name              = "UCB1400 touchscreen interface";
        ucb->ts_idev->id.vendor         = ucb1400_reg_read(ucb->ac97,
                                                AC97_VENDOR_ID1);
@@ -398,12 +366,23 @@ static int ucb1400_ts_probe(struct platform_device *dev)
        x_res = ucb1400_ts_read_xres(ucb);
        y_res = ucb1400_ts_read_yres(ucb);
        ucb1400_adc_disable(ucb->ac97);
-       printk(KERN_DEBUG "UCB1400: x/y = %d/%d\n", x_res, y_res);
+       dev_dbg(&pdev->dev, "x/y = %d/%d\n", x_res, y_res);
 
        input_set_abs_params(ucb->ts_idev, ABS_X, 0, x_res, 0, 0);
        input_set_abs_params(ucb->ts_idev, ABS_Y, 0, y_res, 0, 0);
        input_set_abs_params(ucb->ts_idev, ABS_PRESSURE, 0, 0, 0, 0);
 
+       ucb1400_ts_stop(ucb);
+
+       error = request_threaded_irq(ucb->irq, NULL, ucb1400_irq,
+                                    IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                    "UCB1400", ucb);
+       if (error) {
+               dev_err(&pdev->dev,
+                       "unable to grab irq%d: %d\n", ucb->irq, error);
+               goto err_free_devs;
+       }
+
        error = input_register_device(ucb->ts_idev);
        if (error)
                goto err_free_irq;
@@ -416,56 +395,61 @@ err_free_devs:
        input_free_device(ucb->ts_idev);
 err:
        return error;
-
 }
 
-static int ucb1400_ts_remove(struct platform_device *dev)
+static int __devexit ucb1400_ts_remove(struct platform_device *pdev)
 {
-       struct ucb1400_ts *ucb = dev->dev.platform_data;
+       struct ucb1400_ts *ucb = pdev->dev.platform_data;
 
        free_irq(ucb->irq, ucb);
        input_unregister_device(ucb->ts_idev);
+
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int ucb1400_ts_resume(struct platform_device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int ucb1400_ts_suspend(struct device *dev)
 {
-       struct ucb1400_ts *ucb = dev->dev.platform_data;
-
-       if (ucb->ts_task) {
-               /*
-                * Restart the TS thread to ensure the
-                * TS interrupt mode is set up again
-                * after sleep.
-                */
-               ucb->ts_restart = 1;
-               wake_up(&ucb->ts_wait);
-       }
+       struct ucb1400_ts *ucb = dev->platform_data;
+       struct input_dev *idev = ucb->ts_idev;
+
+       mutex_lock(&idev->mutex);
+
+       if (idev->users)
+               ucb1400_ts_start(ucb);
+
+       mutex_unlock(&idev->mutex);
+       return 0;
+}
+
+static int ucb1400_ts_resume(struct device *dev)
+{
+       struct ucb1400_ts *ucb = dev->platform_data;
+       struct input_dev *idev = ucb->ts_idev;
+
+       mutex_lock(&idev->mutex);
+
+       if (idev->users)
+               ucb1400_ts_stop(ucb);
+
+       mutex_unlock(&idev->mutex);
        return 0;
 }
-#else
-#define ucb1400_ts_resume NULL
 #endif
 
+static SIMPLE_DEV_PM_OPS(ucb1400_ts_pm_ops,
+                        ucb1400_ts_suspend, ucb1400_ts_resume);
+
 static struct platform_driver ucb1400_ts_driver = {
        .probe  = ucb1400_ts_probe,
-       .remove = ucb1400_ts_remove,
-       .resume = ucb1400_ts_resume,
+       .remove = __devexit_p(ucb1400_ts_remove),
        .driver = {
                .name   = "ucb1400_ts",
+               .owner  = THIS_MODULE,
+               .pm     = &ucb1400_ts_pm_ops,
        },
 };
-
-static int __init ucb1400_ts_init(void)
-{
-       return platform_driver_register(&ucb1400_ts_driver);
-}
-
-static void __exit ucb1400_ts_exit(void)
-{
-       platform_driver_unregister(&ucb1400_ts_driver);
-}
+module_platform_driver(ucb1400_ts_driver);
 
 module_param(adcsync, bool, 0444);
 MODULE_PARM_DESC(adcsync, "Synchronize touch readings with ADCSYNC pin.");
@@ -479,8 +463,5 @@ MODULE_PARM_DESC(ts_delay_pressure,
                "delay between panel setup and pressure read."
                "  Default = 0us.");
 
-module_init(ucb1400_ts_init);
-module_exit(ucb1400_ts_exit);
-
 MODULE_DESCRIPTION("Philips UCB1400 touchscreen driver");
 MODULE_LICENSE("GPL");
index e539d92cc62674c6e72beb8d75dbd5f0a26f611c..06cef3ccc63a7d180864c7e99e4f5ad16a8b7cbc 100644 (file)
@@ -16,6 +16,7 @@
  *  - JASTEC USB touch controller/DigiTech DTR-02U
  *  - Zytronic capacitive touchscreen
  *  - NEXIO/iNexio
+ *  - Elo TouchSystems 2700 IntelliTouch
  *
  * Copyright (C) 2004-2007 by Daniel Ritz <daniel.ritz@gmx.ch>
  * Copyright (C) by Todd E. Johnson (mtouchusb.c)
@@ -138,6 +139,7 @@ enum {
        DEVTYPE_ZYTRONIC,
        DEVTYPE_TC45USB,
        DEVTYPE_NEXIO,
+       DEVTYPE_ELO,
 };
 
 #define USB_DEVICE_HID_CLASS(vend, prod) \
@@ -239,6 +241,10 @@ static const struct usb_device_id usbtouch_devices[] = {
                .driver_info = DEVTYPE_NEXIO},
 #endif
 
+#ifdef CONFIG_TOUCHSCREEN_USB_ELO
+       {USB_DEVICE(0x04e7, 0x0020), .driver_info = DEVTYPE_ELO},
+#endif
+
        {}
 };
 
@@ -944,6 +950,24 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
 #endif
 
 
+/*****************************************************************************
+ * ELO part
+ */
+
+#ifdef CONFIG_TOUCHSCREEN_USB_ELO
+
+static int elo_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
+{
+       dev->x = (pkt[3] << 8) | pkt[2];
+       dev->y = (pkt[5] << 8) | pkt[4];
+       dev->touch = pkt[6] > 0;
+       dev->press = pkt[6];
+
+       return 1;
+}
+#endif
+
+
 /*****************************************************************************
  * the different device descriptors
  */
@@ -953,6 +977,18 @@ static void usbtouch_process_multi(struct usbtouch_usb *usbtouch,
 #endif
 
 static struct usbtouch_device_info usbtouch_dev_info[] = {
+#ifdef CONFIG_TOUCHSCREEN_USB_ELO
+       [DEVTYPE_ELO] = {
+               .min_xc         = 0x0,
+               .max_xc         = 0x0fff,
+               .min_yc         = 0x0,
+               .max_yc         = 0x0fff,
+               .max_press      = 0xff,
+               .rept_size      = 8,
+               .read_data      = elo_read_data,
+       },
+#endif
+
 #ifdef CONFIG_TOUCHSCREEN_USB_EGALAX
        [DEVTYPE_EGALAX] = {
                .min_xc         = 0x0,
index 217aa51135c59bda6a9e50498749030c3a593303..9396b21d0e8f953a45e5ac852ec65bbdb590854b 100644 (file)
@@ -331,19 +331,7 @@ static struct platform_driver w90x900ts_driver = {
                .owner  = THIS_MODULE,
        },
 };
-
-static int __init w90x900ts_init(void)
-{
-       return platform_driver_register(&w90x900ts_driver);
-}
-
-static void __exit w90x900ts_exit(void)
-{
-       platform_driver_unregister(&w90x900ts_driver);
-}
-
-module_init(w90x900ts_init);
-module_exit(w90x900ts_exit);
+module_platform_driver(w90x900ts_driver);
 
 MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
 MODULE_DESCRIPTION("w90p910 touch screen driver!");
index 9175d49d25469d0eaa7794960580c14d7740b9a9..4bc851a9dc3d6cf821f1c8ec05f7abf37fb4c781 100644 (file)
@@ -401,18 +401,7 @@ static struct platform_driver wm831x_ts_driver = {
        .probe = wm831x_ts_probe,
        .remove = __devexit_p(wm831x_ts_remove),
 };
-
-static int __init wm831x_ts_init(void)
-{
-       return platform_driver_register(&wm831x_ts_driver);
-}
-module_init(wm831x_ts_init);
-
-static void __exit wm831x_ts_exit(void)
-{
-       platform_driver_unregister(&wm831x_ts_driver);
-}
-module_exit(wm831x_ts_exit);
+module_platform_driver(wm831x_ts_driver);
 
 /* Module information */
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
index 0a707bbbbea61eb7bcc32d276c298a28e99c5e74..bf0869a7a78e2754d38e0883b5105efac8a76452 100644 (file)
@@ -224,19 +224,7 @@ static struct platform_driver zylonite_wm97xx_driver = {
                .name   = "wm97xx-touch",
        },
 };
-
-static int __init zylonite_wm97xx_init(void)
-{
-       return platform_driver_register(&zylonite_wm97xx_driver);
-}
-
-static void __exit zylonite_wm97xx_exit(void)
-{
-       platform_driver_unregister(&zylonite_wm97xx_driver);
-}
-
-module_init(zylonite_wm97xx_init);
-module_exit(zylonite_wm97xx_exit);
+module_platform_driver(zylonite_wm97xx_driver);
 
 /* Module information */
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
index 5414253b185a3ada15daa7c4adc423d65dfe0285..6bea6962f8ee6d8d8f5a73c7f5309654725054e5 100644 (file)
@@ -34,7 +34,9 @@ config AMD_IOMMU
        bool "AMD IOMMU support"
        select SWIOTLB
        select PCI_MSI
-       select PCI_IOV
+       select PCI_ATS
+       select PCI_PRI
+       select PCI_PASID
        select IOMMU_API
        depends on X86_64 && PCI && ACPI
        ---help---
@@ -58,6 +60,15 @@ config AMD_IOMMU_STATS
          information to userspace via debugfs.
          If unsure, say N.
 
+config AMD_IOMMU_V2
+       tristate "AMD IOMMU Version 2 driver (EXPERIMENTAL)"
+       depends on AMD_IOMMU && PROFILING && EXPERIMENTAL
+       select MMU_NOTIFIER
+       ---help---
+         This option enables support for the AMD IOMMUv2 features of the IOMMU
+         hardware. Select this option if you want to use devices that support
+         the the PCI PRI and PASID interface.
+
 # Intel IOMMU support
 config DMAR_TABLE
        bool
index 2f4448794bc793133d5f1e8a3145be4c1cc685b3..0e36b4934affc0a3b4a2f7debc8809babd719734 100644 (file)
@@ -1,6 +1,7 @@
 obj-$(CONFIG_IOMMU_API) += iommu.o
 obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
 obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
+obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
 obj-$(CONFIG_DMAR_TABLE) += dmar.o
 obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
 obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o
index 4ee277a8521a49eb41b5056daebf13cadc3d68dd..cce1f03b8895324d7d6e92cd6094cc39e7673cbe 100644 (file)
@@ -17,6 +17,7 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
 
+#include <linux/ratelimit.h>
 #include <linux/pci.h>
 #include <linux/pci-ats.h>
 #include <linux/bitmap.h>
@@ -28,6 +29,8 @@
 #include <linux/iommu.h>
 #include <linux/delay.h>
 #include <linux/amd-iommu.h>
+#include <linux/notifier.h>
+#include <linux/export.h>
 #include <asm/msidef.h>
 #include <asm/proto.h>
 #include <asm/iommu.h>
 
 #define LOOP_TIMEOUT   100000
 
+/*
+ * This bitmap is used to advertise the page sizes our hardware support
+ * to the IOMMU core, which will then use this information to split
+ * physically contiguous memory regions it is mapping into page sizes
+ * that we support.
+ *
+ * Traditionally the IOMMU core just handed us the mappings directly,
+ * after making sure the size is an order of a 4KiB page and that the
+ * mapping has natural alignment.
+ *
+ * To retain this behavior, we currently advertise that we support
+ * all page sizes that are an order of 4KiB.
+ *
+ * If at some point we'd like to utilize the IOMMU core's new behavior,
+ * we could change this to advertise the real page sizes we support.
+ */
+#define AMD_IOMMU_PGSIZES      (~0xFFFUL)
+
 static DEFINE_RWLOCK(amd_iommu_devtable_lock);
 
 /* A list of preallocated protection domains */
@@ -59,6 +80,9 @@ static struct protection_domain *pt_domain;
 
 static struct iommu_ops amd_iommu_ops;
 
+static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
+int amd_iommu_max_glx_val = -1;
+
 /*
  * general struct to manage commands send to an IOMMU
  */
@@ -67,6 +91,7 @@ struct iommu_cmd {
 };
 
 static void update_domain(struct protection_domain *domain);
+static int __init alloc_passthrough_domain(void);
 
 /****************************************************************************
  *
@@ -147,6 +172,33 @@ static struct iommu_dev_data *get_dev_data(struct device *dev)
        return dev->archdata.iommu;
 }
 
+static bool pci_iommuv2_capable(struct pci_dev *pdev)
+{
+       static const int caps[] = {
+               PCI_EXT_CAP_ID_ATS,
+               PCI_EXT_CAP_ID_PRI,
+               PCI_EXT_CAP_ID_PASID,
+       };
+       int i, pos;
+
+       for (i = 0; i < 3; ++i) {
+               pos = pci_find_ext_capability(pdev, caps[i]);
+               if (pos == 0)
+                       return false;
+       }
+
+       return true;
+}
+
+static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
+{
+       struct iommu_dev_data *dev_data;
+
+       dev_data = get_dev_data(&pdev->dev);
+
+       return dev_data->errata & (1 << erratum) ? true : false;
+}
+
 /*
  * In this function the list of preallocated protection domains is traversed to
  * find the domain for a specific device
@@ -204,6 +256,7 @@ static bool check_device(struct device *dev)
 
 static int iommu_init_device(struct device *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev);
        struct iommu_dev_data *dev_data;
        u16 alias;
 
@@ -228,6 +281,13 @@ static int iommu_init_device(struct device *dev)
                dev_data->alias_data = alias_data;
        }
 
+       if (pci_iommuv2_capable(pdev)) {
+               struct amd_iommu *iommu;
+
+               iommu              = amd_iommu_rlookup_table[dev_data->devid];
+               dev_data->iommu_v2 = iommu->is_iommu_v2;
+       }
+
        dev->archdata.iommu = dev_data;
 
        return 0;
@@ -317,6 +377,11 @@ DECLARE_STATS_COUNTER(domain_flush_single);
 DECLARE_STATS_COUNTER(domain_flush_all);
 DECLARE_STATS_COUNTER(alloced_io_mem);
 DECLARE_STATS_COUNTER(total_map_requests);
+DECLARE_STATS_COUNTER(complete_ppr);
+DECLARE_STATS_COUNTER(invalidate_iotlb);
+DECLARE_STATS_COUNTER(invalidate_iotlb_all);
+DECLARE_STATS_COUNTER(pri_requests);
+
 
 static struct dentry *stats_dir;
 static struct dentry *de_fflush;
@@ -351,6 +416,10 @@ static void amd_iommu_stats_init(void)
        amd_iommu_stats_add(&domain_flush_all);
        amd_iommu_stats_add(&alloced_io_mem);
        amd_iommu_stats_add(&total_map_requests);
+       amd_iommu_stats_add(&complete_ppr);
+       amd_iommu_stats_add(&invalidate_iotlb);
+       amd_iommu_stats_add(&invalidate_iotlb_all);
+       amd_iommu_stats_add(&pri_requests);
 }
 
 #endif
@@ -365,8 +434,8 @@ static void dump_dte_entry(u16 devid)
 {
        int i;
 
-       for (i = 0; i < 8; ++i)
-               pr_err("AMD-Vi: DTE[%d]: %08x\n", i,
+       for (i = 0; i < 4; ++i)
+               pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
                        amd_iommu_dev_table[devid].data[i]);
 }
 
@@ -461,12 +530,84 @@ static void iommu_poll_events(struct amd_iommu *iommu)
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
+static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
+{
+       struct amd_iommu_fault fault;
+       volatile u64 *raw;
+       int i;
+
+       INC_STATS_COUNTER(pri_requests);
+
+       raw = (u64 *)(iommu->ppr_log + head);
+
+       /*
+        * Hardware bug: Interrupt may arrive before the entry is written to
+        * memory. If this happens we need to wait for the entry to arrive.
+        */
+       for (i = 0; i < LOOP_TIMEOUT; ++i) {
+               if (PPR_REQ_TYPE(raw[0]) != 0)
+                       break;
+               udelay(1);
+       }
+
+       if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
+               pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
+               return;
+       }
+
+       fault.address   = raw[1];
+       fault.pasid     = PPR_PASID(raw[0]);
+       fault.device_id = PPR_DEVID(raw[0]);
+       fault.tag       = PPR_TAG(raw[0]);
+       fault.flags     = PPR_FLAGS(raw[0]);
+
+       /*
+        * To detect the hardware bug we need to clear the entry
+        * to back to zero.
+        */
+       raw[0] = raw[1] = 0;
+
+       atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
+}
+
+static void iommu_poll_ppr_log(struct amd_iommu *iommu)
+{
+       unsigned long flags;
+       u32 head, tail;
+
+       if (iommu->ppr_log == NULL)
+               return;
+
+       spin_lock_irqsave(&iommu->lock, flags);
+
+       head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+       tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+
+       while (head != tail) {
+
+               /* Handle PPR entry */
+               iommu_handle_ppr_entry(iommu, head);
+
+               /* Update and refresh ring-buffer state*/
+               head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
+               writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+               tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+       }
+
+       /* enable ppr interrupts again */
+       writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
+       spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
 irqreturn_t amd_iommu_int_thread(int irq, void *data)
 {
        struct amd_iommu *iommu;
 
-       for_each_iommu(iommu)
+       for_each_iommu(iommu) {
                iommu_poll_events(iommu);
+               iommu_poll_ppr_log(iommu);
+       }
 
        return IRQ_HANDLED;
 }
@@ -595,6 +736,60 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
                cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 }
 
+static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
+                                 u64 address, bool size)
+{
+       memset(cmd, 0, sizeof(*cmd));
+
+       address &= ~(0xfffULL);
+
+       cmd->data[0]  = pasid & PASID_MASK;
+       cmd->data[1]  = domid;
+       cmd->data[2]  = lower_32_bits(address);
+       cmd->data[3]  = upper_32_bits(address);
+       cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+       cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+       if (size)
+               cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+       CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
+}
+
+static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
+                                 int qdep, u64 address, bool size)
+{
+       memset(cmd, 0, sizeof(*cmd));
+
+       address &= ~(0xfffULL);
+
+       cmd->data[0]  = devid;
+       cmd->data[0] |= (pasid & 0xff) << 16;
+       cmd->data[0] |= (qdep  & 0xff) << 24;
+       cmd->data[1]  = devid;
+       cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16;
+       cmd->data[2]  = lower_32_bits(address);
+       cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+       cmd->data[3]  = upper_32_bits(address);
+       if (size)
+               cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+       CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
+}
+
+static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
+                              int status, int tag, bool gn)
+{
+       memset(cmd, 0, sizeof(*cmd));
+
+       cmd->data[0]  = devid;
+       if (gn) {
+               cmd->data[1]  = pasid & PASID_MASK;
+               cmd->data[2]  = CMD_INV_IOMMU_PAGES_GN_MASK;
+       }
+       cmd->data[3]  = tag & 0x1ff;
+       cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
+
+       CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
+}
+
 static void build_inv_all(struct iommu_cmd *cmd)
 {
        memset(cmd, 0, sizeof(*cmd));
@@ -1496,6 +1691,48 @@ static void free_pagetable(struct protection_domain *domain)
        domain->pt_root = NULL;
 }
 
+static void free_gcr3_tbl_level1(u64 *tbl)
+{
+       u64 *ptr;
+       int i;
+
+       for (i = 0; i < 512; ++i) {
+               if (!(tbl[i] & GCR3_VALID))
+                       continue;
+
+               ptr = __va(tbl[i] & PAGE_MASK);
+
+               free_page((unsigned long)ptr);
+       }
+}
+
+static void free_gcr3_tbl_level2(u64 *tbl)
+{
+       u64 *ptr;
+       int i;
+
+       for (i = 0; i < 512; ++i) {
+               if (!(tbl[i] & GCR3_VALID))
+                       continue;
+
+               ptr = __va(tbl[i] & PAGE_MASK);
+
+               free_gcr3_tbl_level1(ptr);
+       }
+}
+
+static void free_gcr3_table(struct protection_domain *domain)
+{
+       if (domain->glx == 2)
+               free_gcr3_tbl_level2(domain->gcr3_tbl);
+       else if (domain->glx == 1)
+               free_gcr3_tbl_level1(domain->gcr3_tbl);
+       else if (domain->glx != 0)
+               BUG();
+
+       free_page((unsigned long)domain->gcr3_tbl);
+}
+
 /*
  * Free a domain, only used if something went wrong in the
  * allocation path and we need to free an already allocated page table
@@ -1582,20 +1819,52 @@ static bool dma_ops_domain(struct protection_domain *domain)
 
 static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
 {
-       u64 pte_root = virt_to_phys(domain->pt_root);
-       u32 flags = 0;
+       u64 pte_root = 0;
+       u64 flags = 0;
+
+       if (domain->mode != PAGE_MODE_NONE)
+               pte_root = virt_to_phys(domain->pt_root);
 
        pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
                    << DEV_ENTRY_MODE_SHIFT;
        pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
 
+       flags = amd_iommu_dev_table[devid].data[1];
+
        if (ats)
                flags |= DTE_FLAG_IOTLB;
 
-       amd_iommu_dev_table[devid].data[3] |= flags;
-       amd_iommu_dev_table[devid].data[2]  = domain->id;
-       amd_iommu_dev_table[devid].data[1]  = upper_32_bits(pte_root);
-       amd_iommu_dev_table[devid].data[0]  = lower_32_bits(pte_root);
+       if (domain->flags & PD_IOMMUV2_MASK) {
+               u64 gcr3 = __pa(domain->gcr3_tbl);
+               u64 glx  = domain->glx;
+               u64 tmp;
+
+               pte_root |= DTE_FLAG_GV;
+               pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
+
+               /* First mask out possible old values for GCR3 table */
+               tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
+               flags    &= ~tmp;
+
+               tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
+               flags    &= ~tmp;
+
+               /* Encode GCR3 table into DTE */
+               tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
+               pte_root |= tmp;
+
+               tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
+               flags    |= tmp;
+
+               tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
+               flags    |= tmp;
+       }
+
+       flags &= ~(0xffffUL);
+       flags |= domain->id;
+
+       amd_iommu_dev_table[devid].data[1]  = flags;
+       amd_iommu_dev_table[devid].data[0]  = pte_root;
 }
 
 static void clear_dte_entry(u16 devid)
@@ -1603,7 +1872,6 @@ static void clear_dte_entry(u16 devid)
        /* remove entry from the device table seen by the hardware */
        amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
        amd_iommu_dev_table[devid].data[1] = 0;
-       amd_iommu_dev_table[devid].data[2] = 0;
 
        amd_iommu_apply_erratum_63(devid);
 }
@@ -1696,6 +1964,93 @@ out_unlock:
        return ret;
 }
 
+
+static void pdev_iommuv2_disable(struct pci_dev *pdev)
+{
+       pci_disable_ats(pdev);
+       pci_disable_pri(pdev);
+       pci_disable_pasid(pdev);
+}
+
+/* FIXME: Change generic reset-function to do the same */
+static int pri_reset_while_enabled(struct pci_dev *pdev)
+{
+       u16 control;
+       int pos;
+
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+       if (!pos)
+               return -EINVAL;
+
+       pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+       control |= PCI_PRI_CTRL_RESET;
+       pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+
+       return 0;
+}
+
+static int pdev_iommuv2_enable(struct pci_dev *pdev)
+{
+       bool reset_enable;
+       int reqs, ret;
+
+       /* FIXME: Hardcode number of outstanding requests for now */
+       reqs = 32;
+       if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
+               reqs = 1;
+       reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
+
+       /* Only allow access to user-accessible pages */
+       ret = pci_enable_pasid(pdev, 0);
+       if (ret)
+               goto out_err;
+
+       /* First reset the PRI state of the device */
+       ret = pci_reset_pri(pdev);
+       if (ret)
+               goto out_err;
+
+       /* Enable PRI */
+       ret = pci_enable_pri(pdev, reqs);
+       if (ret)
+               goto out_err;
+
+       if (reset_enable) {
+               ret = pri_reset_while_enabled(pdev);
+               if (ret)
+                       goto out_err;
+       }
+
+       ret = pci_enable_ats(pdev, PAGE_SHIFT);
+       if (ret)
+               goto out_err;
+
+       return 0;
+
+out_err:
+       pci_disable_pri(pdev);
+       pci_disable_pasid(pdev);
+
+       return ret;
+}
+
+/* FIXME: Move this to PCI code */
+#define PCI_PRI_TLP_OFF                (1 << 2)
+
+bool pci_pri_tlp_required(struct pci_dev *pdev)
+{
+       u16 control;
+       int pos;
+
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+       if (!pos)
+               return false;
+
+       pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+
+       return (control & PCI_PRI_TLP_OFF) ? true : false;
+}
+
 /*
  * If a device is not yet associated with a domain, this function does
  * assigns it visible for the hardware
@@ -1710,7 +2065,18 @@ static int attach_device(struct device *dev,
 
        dev_data = get_dev_data(dev);
 
-       if (amd_iommu_iotlb_sup && pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
+       if (domain->flags & PD_IOMMUV2_MASK) {
+               if (!dev_data->iommu_v2 || !dev_data->passthrough)
+                       return -EINVAL;
+
+               if (pdev_iommuv2_enable(pdev) != 0)
+                       return -EINVAL;
+
+               dev_data->ats.enabled = true;
+               dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
+               dev_data->pri_tlp     = pci_pri_tlp_required(pdev);
+       } else if (amd_iommu_iotlb_sup &&
+                  pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
                dev_data->ats.enabled = true;
                dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
        }
@@ -1760,7 +2126,7 @@ static void __detach_device(struct iommu_dev_data *dev_data)
         * passthrough domain if it is detached from any other domain.
         * Make sure we can deassign from the pt_domain itself.
         */
-       if (iommu_pass_through &&
+       if (dev_data->passthrough &&
            (dev_data->domain == NULL && domain != pt_domain))
                __attach_device(dev_data, pt_domain);
 }
@@ -1770,20 +2136,24 @@ static void __detach_device(struct iommu_dev_data *dev_data)
  */
 static void detach_device(struct device *dev)
 {
+       struct protection_domain *domain;
        struct iommu_dev_data *dev_data;
        unsigned long flags;
 
        dev_data = get_dev_data(dev);
+       domain   = dev_data->domain;
 
        /* lock device table */
        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
        __detach_device(dev_data);
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 
-       if (dev_data->ats.enabled) {
+       if (domain->flags & PD_IOMMUV2_MASK)
+               pdev_iommuv2_disable(to_pci_dev(dev));
+       else if (dev_data->ats.enabled)
                pci_disable_ats(to_pci_dev(dev));
-               dev_data->ats.enabled = false;
-       }
+
+       dev_data->ats.enabled = false;
 }
 
 /*
@@ -1818,18 +2188,20 @@ static struct protection_domain *domain_for_device(struct device *dev)
 static int device_change_notifier(struct notifier_block *nb,
                                  unsigned long action, void *data)
 {
-       struct device *dev = data;
-       u16 devid;
-       struct protection_domain *domain;
        struct dma_ops_domain *dma_domain;
+       struct protection_domain *domain;
+       struct iommu_dev_data *dev_data;
+       struct device *dev = data;
        struct amd_iommu *iommu;
        unsigned long flags;
+       u16 devid;
 
        if (!check_device(dev))
                return 0;
 
-       devid  = get_device_id(dev);
-       iommu  = amd_iommu_rlookup_table[devid];
+       devid    = get_device_id(dev);
+       iommu    = amd_iommu_rlookup_table[devid];
+       dev_data = get_dev_data(dev);
 
        switch (action) {
        case BUS_NOTIFY_UNBOUND_DRIVER:
@@ -1838,7 +2210,7 @@ static int device_change_notifier(struct notifier_block *nb,
 
                if (!domain)
                        goto out;
-               if (iommu_pass_through)
+               if (dev_data->passthrough)
                        break;
                detach_device(dev);
                break;
@@ -2434,8 +2806,9 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
  */
 static void prealloc_protection_domains(void)
 {
-       struct pci_dev *dev = NULL;
+       struct iommu_dev_data *dev_data;
        struct dma_ops_domain *dma_dom;
+       struct pci_dev *dev = NULL;
        u16 devid;
 
        for_each_pci_dev(dev) {
@@ -2444,6 +2817,16 @@ static void prealloc_protection_domains(void)
                if (!check_device(&dev->dev))
                        continue;
 
+               dev_data = get_dev_data(&dev->dev);
+               if (!amd_iommu_force_isolation && dev_data->iommu_v2) {
+                       /* Make sure passthrough domain is allocated */
+                       alloc_passthrough_domain();
+                       dev_data->passthrough = true;
+                       attach_device(&dev->dev, pt_domain);
+                       pr_info("AMD-Vi: Using passthough domain for device %s\n",
+                               dev_name(&dev->dev));
+               }
+
                /* Is there already any domain for it? */
                if (domain_for_device(&dev->dev))
                        continue;
@@ -2474,6 +2857,7 @@ static struct dma_map_ops amd_iommu_dma_ops = {
 
 static unsigned device_dma_ops_init(void)
 {
+       struct iommu_dev_data *dev_data;
        struct pci_dev *pdev = NULL;
        unsigned unhandled = 0;
 
@@ -2483,7 +2867,12 @@ static unsigned device_dma_ops_init(void)
                        continue;
                }
 
-               pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
+               dev_data = get_dev_data(&pdev->dev);
+
+               if (!dev_data->passthrough)
+                       pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
+               else
+                       pdev->dev.archdata.dma_ops = &nommu_dma_ops;
        }
 
        return unhandled;
@@ -2610,6 +2999,20 @@ out_err:
        return NULL;
 }
 
+static int __init alloc_passthrough_domain(void)
+{
+       if (pt_domain != NULL)
+               return 0;
+
+       /* allocate passthrough domain */
+       pt_domain = protection_domain_alloc();
+       if (!pt_domain)
+               return -ENOMEM;
+
+       pt_domain->mode = PAGE_MODE_NONE;
+
+       return 0;
+}
 static int amd_iommu_domain_init(struct iommu_domain *dom)
 {
        struct protection_domain *domain;
@@ -2623,6 +3026,8 @@ static int amd_iommu_domain_init(struct iommu_domain *dom)
        if (!domain->pt_root)
                goto out_free;
 
+       domain->iommu_domain = dom;
+
        dom->priv = domain;
 
        return 0;
@@ -2645,7 +3050,11 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
 
        BUG_ON(domain->dev_cnt != 0);
 
-       free_pagetable(domain);
+       if (domain->mode != PAGE_MODE_NONE)
+               free_pagetable(domain);
+
+       if (domain->flags & PD_IOMMUV2_MASK)
+               free_gcr3_table(domain);
 
        protection_domain_free(domain);
 
@@ -2702,13 +3111,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
 }
 
 static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
-                        phys_addr_t paddr, int gfp_order, int iommu_prot)
+                        phys_addr_t paddr, size_t page_size, int iommu_prot)
 {
-       unsigned long page_size = 0x1000UL << gfp_order;
        struct protection_domain *domain = dom->priv;
        int prot = 0;
        int ret;
 
+       if (domain->mode == PAGE_MODE_NONE)
+               return -EINVAL;
+
        if (iommu_prot & IOMMU_READ)
                prot |= IOMMU_PROT_IR;
        if (iommu_prot & IOMMU_WRITE)
@@ -2721,13 +3132,14 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
        return ret;
 }
 
-static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
-                          int gfp_order)
+static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
+                          size_t page_size)
 {
        struct protection_domain *domain = dom->priv;
-       unsigned long page_size, unmap_size;
+       size_t unmap_size;
 
-       page_size  = 0x1000UL << gfp_order;
+       if (domain->mode == PAGE_MODE_NONE)
+               return -EINVAL;
 
        mutex_lock(&domain->api_lock);
        unmap_size = iommu_unmap_page(domain, iova, page_size);
@@ -2735,7 +3147,7 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
 
        domain_flush_tlb_pde(domain);
 
-       return get_order(unmap_size);
+       return unmap_size;
 }
 
 static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -2746,6 +3158,9 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
        phys_addr_t paddr;
        u64 *pte, __pte;
 
+       if (domain->mode == PAGE_MODE_NONE)
+               return iova;
+
        pte = fetch_pte(domain, iova);
 
        if (!pte || !IOMMU_PTE_PRESENT(*pte))
@@ -2773,6 +3188,26 @@ static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
        return 0;
 }
 
+static int amd_iommu_device_group(struct device *dev, unsigned int *groupid)
+{
+       struct iommu_dev_data *dev_data = dev->archdata.iommu;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       u16 devid;
+
+       if (!dev_data)
+               return -ENODEV;
+
+       if (pdev->is_virtfn || !iommu_group_mf)
+               devid = dev_data->devid;
+       else
+               devid = calc_devid(pdev->bus->number,
+                                  PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
+
+       *groupid = amd_iommu_alias_table[devid];
+
+       return 0;
+}
+
 static struct iommu_ops amd_iommu_ops = {
        .domain_init = amd_iommu_domain_init,
        .domain_destroy = amd_iommu_domain_destroy,
@@ -2782,6 +3217,8 @@ static struct iommu_ops amd_iommu_ops = {
        .unmap = amd_iommu_unmap,
        .iova_to_phys = amd_iommu_iova_to_phys,
        .domain_has_cap = amd_iommu_domain_has_cap,
+       .device_group = amd_iommu_device_group,
+       .pgsize_bitmap  = AMD_IOMMU_PGSIZES,
 };
 
 /*****************************************************************************
@@ -2796,21 +3233,23 @@ static struct iommu_ops amd_iommu_ops = {
 
 int __init amd_iommu_init_passthrough(void)
 {
-       struct amd_iommu *iommu;
+       struct iommu_dev_data *dev_data;
        struct pci_dev *dev = NULL;
+       struct amd_iommu *iommu;
        u16 devid;
+       int ret;
 
-       /* allocate passthrough domain */
-       pt_domain = protection_domain_alloc();
-       if (!pt_domain)
-               return -ENOMEM;
-
-       pt_domain->mode |= PAGE_MODE_NONE;
+       ret = alloc_passthrough_domain();
+       if (ret)
+               return ret;
 
        for_each_pci_dev(dev) {
                if (!check_device(&dev->dev))
                        continue;
 
+               dev_data = get_dev_data(&dev->dev);
+               dev_data->passthrough = true;
+
                devid = get_device_id(&dev->dev);
 
                iommu = amd_iommu_rlookup_table[devid];
@@ -2820,7 +3259,375 @@ int __init amd_iommu_init_passthrough(void)
                attach_device(&dev->dev, pt_domain);
        }
 
+       amd_iommu_stats_init();
+
        pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
 
        return 0;
 }
+
+/* IOMMUv2 specific functions */
+int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_register(&ppr_notifier, nb);
+}
+EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
+
+int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_unregister(&ppr_notifier, nb);
+}
+EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
+
+void amd_iommu_domain_direct_map(struct iommu_domain *dom)
+{
+       struct protection_domain *domain = dom->priv;
+       unsigned long flags;
+
+       spin_lock_irqsave(&domain->lock, flags);
+
+       /* Update data structure */
+       domain->mode    = PAGE_MODE_NONE;
+       domain->updated = true;
+
+       /* Make changes visible to IOMMUs */
+       update_domain(domain);
+
+       /* Page-table is not visible to IOMMU anymore, so free it */
+       free_pagetable(domain);
+
+       spin_unlock_irqrestore(&domain->lock, flags);
+}
+EXPORT_SYMBOL(amd_iommu_domain_direct_map);
+
+int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
+{
+       struct protection_domain *domain = dom->priv;
+       unsigned long flags;
+       int levels, ret;
+
+       if (pasids <= 0 || pasids > (PASID_MASK + 1))
+               return -EINVAL;
+
+       /* Number of GCR3 table levels required */
+       for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
+               levels += 1;
+
+       if (levels > amd_iommu_max_glx_val)
+               return -EINVAL;
+
+       spin_lock_irqsave(&domain->lock, flags);
+
+       /*
+        * Save us all sanity checks whether devices already in the
+        * domain support IOMMUv2. Just force that the domain has no
+        * devices attached when it is switched into IOMMUv2 mode.
+        */
+       ret = -EBUSY;
+       if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
+               goto out;
+
+       ret = -ENOMEM;
+       domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
+       if (domain->gcr3_tbl == NULL)
+               goto out;
+
+       domain->glx      = levels;
+       domain->flags   |= PD_IOMMUV2_MASK;
+       domain->updated  = true;
+
+       update_domain(domain);
+
+       ret = 0;
+
+out:
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
+
+static int __flush_pasid(struct protection_domain *domain, int pasid,
+                        u64 address, bool size)
+{
+       struct iommu_dev_data *dev_data;
+       struct iommu_cmd cmd;
+       int i, ret;
+
+       if (!(domain->flags & PD_IOMMUV2_MASK))
+               return -EINVAL;
+
+       build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
+
+       /*
+        * IOMMU TLB needs to be flushed before Device TLB to
+        * prevent device TLB refill from IOMMU TLB
+        */
+       for (i = 0; i < amd_iommus_present; ++i) {
+               if (domain->dev_iommu[i] == 0)
+                       continue;
+
+               ret = iommu_queue_command(amd_iommus[i], &cmd);
+               if (ret != 0)
+                       goto out;
+       }
+
+       /* Wait until IOMMU TLB flushes are complete */
+       domain_flush_complete(domain);
+
+       /* Now flush device TLBs */
+       list_for_each_entry(dev_data, &domain->dev_list, list) {
+               struct amd_iommu *iommu;
+               int qdep;
+
+               BUG_ON(!dev_data->ats.enabled);
+
+               qdep  = dev_data->ats.qdep;
+               iommu = amd_iommu_rlookup_table[dev_data->devid];
+
+               build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
+                                     qdep, address, size);
+
+               ret = iommu_queue_command(iommu, &cmd);
+               if (ret != 0)
+                       goto out;
+       }
+
+       /* Wait until all device TLBs are flushed */
+       domain_flush_complete(domain);
+
+       ret = 0;
+
+out:
+
+       return ret;
+}
+
+static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
+                                 u64 address)
+{
+       INC_STATS_COUNTER(invalidate_iotlb);
+
+       return __flush_pasid(domain, pasid, address, false);
+}
+
+int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
+                        u64 address)
+{
+       struct protection_domain *domain = dom->priv;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&domain->lock, flags);
+       ret = __amd_iommu_flush_page(domain, pasid, address);
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_flush_page);
+
+static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
+{
+       INC_STATS_COUNTER(invalidate_iotlb_all);
+
+       return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+                            true);
+}
+
+int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
+{
+       struct protection_domain *domain = dom->priv;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&domain->lock, flags);
+       ret = __amd_iommu_flush_tlb(domain, pasid);
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_flush_tlb);
+
+static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
+{
+       int index;
+       u64 *pte;
+
+       while (true) {
+
+               index = (pasid >> (9 * level)) & 0x1ff;
+               pte   = &root[index];
+
+               if (level == 0)
+                       break;
+
+               if (!(*pte & GCR3_VALID)) {
+                       if (!alloc)
+                               return NULL;
+
+                       root = (void *)get_zeroed_page(GFP_ATOMIC);
+                       if (root == NULL)
+                               return NULL;
+
+                       *pte = __pa(root) | GCR3_VALID;
+               }
+
+               root = __va(*pte & PAGE_MASK);
+
+               level -= 1;
+       }
+
+       return pte;
+}
+
+static int __set_gcr3(struct protection_domain *domain, int pasid,
+                     unsigned long cr3)
+{
+       u64 *pte;
+
+       if (domain->mode != PAGE_MODE_NONE)
+               return -EINVAL;
+
+       pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
+       if (pte == NULL)
+               return -ENOMEM;
+
+       *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
+
+       return __amd_iommu_flush_tlb(domain, pasid);
+}
+
+static int __clear_gcr3(struct protection_domain *domain, int pasid)
+{
+       u64 *pte;
+
+       if (domain->mode != PAGE_MODE_NONE)
+               return -EINVAL;
+
+       pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
+       if (pte == NULL)
+               return 0;
+
+       *pte = 0;
+
+       return __amd_iommu_flush_tlb(domain, pasid);
+}
+
+int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
+                             unsigned long cr3)
+{
+       struct protection_domain *domain = dom->priv;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&domain->lock, flags);
+       ret = __set_gcr3(domain, pasid, cr3);
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
+
+int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
+{
+       struct protection_domain *domain = dom->priv;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&domain->lock, flags);
+       ret = __clear_gcr3(domain, pasid);
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
+
+int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
+                          int status, int tag)
+{
+       struct iommu_dev_data *dev_data;
+       struct amd_iommu *iommu;
+       struct iommu_cmd cmd;
+
+       INC_STATS_COUNTER(complete_ppr);
+
+       dev_data = get_dev_data(&pdev->dev);
+       iommu    = amd_iommu_rlookup_table[dev_data->devid];
+
+       build_complete_ppr(&cmd, dev_data->devid, pasid, status,
+                          tag, dev_data->pri_tlp);
+
+       return iommu_queue_command(iommu, &cmd);
+}
+EXPORT_SYMBOL(amd_iommu_complete_ppr);
+
+struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
+{
+       struct protection_domain *domain;
+
+       domain = get_domain(&pdev->dev);
+       if (IS_ERR(domain))
+               return NULL;
+
+       /* Only return IOMMUv2 domains */
+       if (!(domain->flags & PD_IOMMUV2_MASK))
+               return NULL;
+
+       return domain->iommu_domain;
+}
+EXPORT_SYMBOL(amd_iommu_get_v2_domain);
+
+void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
+{
+       struct iommu_dev_data *dev_data;
+
+       if (!amd_iommu_v2_supported())
+               return;
+
+       dev_data = get_dev_data(&pdev->dev);
+       dev_data->errata |= (1 << erratum);
+}
+EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
+
+int amd_iommu_device_info(struct pci_dev *pdev,
+                          struct amd_iommu_device_info *info)
+{
+       int max_pasids;
+       int pos;
+
+       if (pdev == NULL || info == NULL)
+               return -EINVAL;
+
+       if (!amd_iommu_v2_supported())
+               return -EINVAL;
+
+       memset(info, 0, sizeof(*info));
+
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
+       if (pos)
+               info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
+
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+       if (pos)
+               info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
+
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+       if (pos) {
+               int features;
+
+               max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
+               max_pasids = min(max_pasids, (1 << 20));
+
+               info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
+               info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
+
+               features = pci_pasid_features(pdev);
+               if (features & PCI_PASID_CAP_EXEC)
+                       info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
+               if (features & PCI_PASID_CAP_PRIV)
+                       info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(amd_iommu_device_info);
index 82d2410f4205d8e5977fce0a9a6ba59ad01b7088..bdea288dc185c619e7e944a2959ed215e2a22c10 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/interrupt.h>
 #include <linux/msi.h>
 #include <linux/amd-iommu.h>
+#include <linux/export.h>
 #include <asm/pci-direct.h>
 #include <asm/iommu.h>
 #include <asm/gart.h>
@@ -141,6 +142,12 @@ int amd_iommus_present;
 bool amd_iommu_np_cache __read_mostly;
 bool amd_iommu_iotlb_sup __read_mostly = true;
 
+u32 amd_iommu_max_pasids __read_mostly = ~0;
+
+bool amd_iommu_v2_present __read_mostly;
+
+bool amd_iommu_force_isolation __read_mostly;
+
 /*
  * The ACPI table parsing functions set this variable on an error
  */
@@ -299,6 +306,16 @@ static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
        writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
 }
 
+static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
+{
+       u32 ctrl;
+
+       ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+       ctrl &= ~CTRL_INV_TO_MASK;
+       ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
+       writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+}
+
 /* Function to enable the hardware */
 static void iommu_enable(struct amd_iommu *iommu)
 {
@@ -581,21 +598,69 @@ static void __init free_event_buffer(struct amd_iommu *iommu)
        free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
 }
 
+/* allocates the memory where the IOMMU will log its events to */
+static u8 * __init alloc_ppr_log(struct amd_iommu *iommu)
+{
+       iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                               get_order(PPR_LOG_SIZE));
+
+       if (iommu->ppr_log == NULL)
+               return NULL;
+
+       return iommu->ppr_log;
+}
+
+static void iommu_enable_ppr_log(struct amd_iommu *iommu)
+{
+       u64 entry;
+
+       if (iommu->ppr_log == NULL)
+               return;
+
+       entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
+
+       memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
+                   &entry, sizeof(entry));
+
+       /* set head and tail to zero manually */
+       writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+       writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+
+       iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
+       iommu_feature_enable(iommu, CONTROL_PPR_EN);
+}
+
+static void __init free_ppr_log(struct amd_iommu *iommu)
+{
+       if (iommu->ppr_log == NULL)
+               return;
+
+       free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
+}
+
+static void iommu_enable_gt(struct amd_iommu *iommu)
+{
+       if (!iommu_feature(iommu, FEATURE_GT))
+               return;
+
+       iommu_feature_enable(iommu, CONTROL_GT_EN);
+}
+
 /* sets a specific bit in the device table entry. */
 static void set_dev_entry_bit(u16 devid, u8 bit)
 {
-       int i = (bit >> 5) & 0x07;
-       int _bit = bit & 0x1f;
+       int i = (bit >> 6) & 0x03;
+       int _bit = bit & 0x3f;
 
-       amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
+       amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
 }
 
 static int get_dev_entry_bit(u16 devid, u8 bit)
 {
-       int i = (bit >> 5) & 0x07;
-       int _bit = bit & 0x1f;
+       int i = (bit >> 6) & 0x03;
+       int _bit = bit & 0x3f;
 
-       return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit;
+       return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
 }
 
 
@@ -699,6 +764,32 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
 
        iommu->features = ((u64)high << 32) | low;
 
+       if (iommu_feature(iommu, FEATURE_GT)) {
+               int glxval;
+               u32 pasids;
+               u64 shift;
+
+               shift   = iommu->features & FEATURE_PASID_MASK;
+               shift >>= FEATURE_PASID_SHIFT;
+               pasids  = (1 << shift);
+
+               amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
+
+               glxval   = iommu->features & FEATURE_GLXVAL_MASK;
+               glxval >>= FEATURE_GLXVAL_SHIFT;
+
+               if (amd_iommu_max_glx_val == -1)
+                       amd_iommu_max_glx_val = glxval;
+               else
+                       amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
+       }
+
+       if (iommu_feature(iommu, FEATURE_GT) &&
+           iommu_feature(iommu, FEATURE_PPR)) {
+               iommu->is_iommu_v2   = true;
+               amd_iommu_v2_present = true;
+       }
+
        if (!is_rd890_iommu(iommu->dev))
                return;
 
@@ -901,6 +992,7 @@ static void __init free_iommu_one(struct amd_iommu *iommu)
 {
        free_command_buffer(iommu);
        free_event_buffer(iommu);
+       free_ppr_log(iommu);
        iommu_unmap_mmio_space(iommu);
 }
 
@@ -964,6 +1056,12 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
        init_iommu_from_acpi(iommu, h);
        init_iommu_devices(iommu);
 
+       if (iommu_feature(iommu, FEATURE_PPR)) {
+               iommu->ppr_log = alloc_ppr_log(iommu);
+               if (!iommu->ppr_log)
+                       return -ENOMEM;
+       }
+
        if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
                amd_iommu_np_cache = true;
 
@@ -1050,6 +1148,9 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
        iommu->int_enabled = true;
        iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
 
+       if (iommu->ppr_log != NULL)
+               iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
+
        return 0;
 }
 
@@ -1209,6 +1310,9 @@ static void iommu_init_flags(struct amd_iommu *iommu)
         * make IOMMU memory accesses cache coherent
         */
        iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+
+       /* Set IOTLB invalidation timeout to 1s */
+       iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
 }
 
 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
@@ -1274,6 +1378,8 @@ static void enable_iommus(void)
                iommu_set_device_table(iommu);
                iommu_enable_command_buffer(iommu);
                iommu_enable_event_buffer(iommu);
+               iommu_enable_ppr_log(iommu);
+               iommu_enable_gt(iommu);
                iommu_set_exclusion_range(iommu);
                iommu_init_msi(iommu);
                iommu_enable(iommu);
@@ -1303,13 +1409,6 @@ static void amd_iommu_resume(void)
 
        /* re-load the hardware */
        enable_iommus();
-
-       /*
-        * we have to flush after the IOMMUs are enabled because a
-        * disabled IOMMU will never execute the commands we send
-        */
-       for_each_iommu(iommu)
-               iommu_flush_all_caches(iommu);
 }
 
 static int amd_iommu_suspend(void)
@@ -1560,6 +1659,8 @@ static int __init parse_amd_iommu_options(char *str)
                        amd_iommu_unmap_flush = true;
                if (strncmp(str, "off", 3) == 0)
                        amd_iommu_disabled = true;
+               if (strncmp(str, "force_isolation", 15) == 0)
+                       amd_iommu_force_isolation = true;
        }
 
        return 1;
@@ -1572,3 +1673,9 @@ IOMMU_INIT_FINISH(amd_iommu_detect,
                  gart_iommu_hole_init,
                  0,
                  0);
+
+bool amd_iommu_v2_supported(void)
+{
+       return amd_iommu_v2_present;
+}
+EXPORT_SYMBOL(amd_iommu_v2_supported);
index 7ffaa64410b0c9cda6a59f4fece123413d0e0118..1a7f41c6cc66b4eff17092ef62d46a41f93d676d 100644 (file)
@@ -31,6 +31,30 @@ extern int amd_iommu_init_devices(void);
 extern void amd_iommu_uninit_devices(void);
 extern void amd_iommu_init_notifier(void);
 extern void amd_iommu_init_api(void);
+
+/* IOMMUv2 specific functions */
+struct iommu_domain;
+
+extern bool amd_iommu_v2_supported(void);
+extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
+extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
+extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
+extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
+extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
+                               u64 address);
+extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid);
+extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
+                                    unsigned long cr3);
+extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
+extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
+
+#define PPR_SUCCESS                    0x0
+#define PPR_INVALID                    0x1
+#define PPR_FAILURE                    0xf
+
+extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
+                                 int status, int tag);
+
 #ifndef CONFIG_AMD_IOMMU_STATS
 
 static inline void amd_iommu_stats_init(void) { }
index 5b9c5075e81a67bd11938d228b674628aa61933b..2452f3b7173619c449c04f025e2e2ff73cc44f29 100644 (file)
 #define MMIO_EXCL_BASE_OFFSET   0x0020
 #define MMIO_EXCL_LIMIT_OFFSET  0x0028
 #define MMIO_EXT_FEATURES      0x0030
+#define MMIO_PPR_LOG_OFFSET    0x0038
 #define MMIO_CMD_HEAD_OFFSET   0x2000
 #define MMIO_CMD_TAIL_OFFSET   0x2008
 #define MMIO_EVT_HEAD_OFFSET   0x2010
 #define MMIO_EVT_TAIL_OFFSET   0x2018
 #define MMIO_STATUS_OFFSET     0x2020
+#define MMIO_PPR_HEAD_OFFSET   0x2030
+#define MMIO_PPR_TAIL_OFFSET   0x2038
 
 
 /* Extended Feature Bits */
 #define FEATURE_HE             (1ULL<<8)
 #define FEATURE_PC             (1ULL<<9)
 
+#define FEATURE_PASID_SHIFT    32
+#define FEATURE_PASID_MASK     (0x1fULL << FEATURE_PASID_SHIFT)
+
+#define FEATURE_GLXVAL_SHIFT   14
+#define FEATURE_GLXVAL_MASK    (0x03ULL << FEATURE_GLXVAL_SHIFT)
+
+#define PASID_MASK             0x000fffff
+
 /* MMIO status bits */
-#define MMIO_STATUS_COM_WAIT_INT_MASK  0x04
+#define MMIO_STATUS_COM_WAIT_INT_MASK  (1 << 2)
+#define MMIO_STATUS_PPR_INT_MASK       (1 << 6)
 
 /* event logging constants */
 #define EVENT_ENTRY_SIZE       0x10
 #define CONTROL_EVT_LOG_EN      0x02ULL
 #define CONTROL_EVT_INT_EN      0x03ULL
 #define CONTROL_COMWAIT_EN      0x04ULL
+#define CONTROL_INV_TIMEOUT    0x05ULL
 #define CONTROL_PASSPW_EN       0x08ULL
 #define CONTROL_RESPASSPW_EN    0x09ULL
 #define CONTROL_COHERENT_EN     0x0aULL
 #define CONTROL_CMDBUF_EN       0x0cULL
 #define CONTROL_PPFLOG_EN       0x0dULL
 #define CONTROL_PPFINT_EN       0x0eULL
+#define CONTROL_PPR_EN          0x0fULL
+#define CONTROL_GT_EN           0x10ULL
+
+#define CTRL_INV_TO_MASK       (7 << CONTROL_INV_TIMEOUT)
+#define CTRL_INV_TO_NONE       0
+#define CTRL_INV_TO_1MS                1
+#define CTRL_INV_TO_10MS       2
+#define CTRL_INV_TO_100MS      3
+#define CTRL_INV_TO_1S         4
+#define CTRL_INV_TO_10S                5
+#define CTRL_INV_TO_100S       6
 
 /* command specific defines */
 #define CMD_COMPL_WAIT          0x01
 #define CMD_INV_DEV_ENTRY       0x02
 #define CMD_INV_IOMMU_PAGES    0x03
 #define CMD_INV_IOTLB_PAGES    0x04
+#define CMD_COMPLETE_PPR       0x07
 #define CMD_INV_ALL            0x08
 
 #define CMD_COMPL_WAIT_STORE_MASK      0x01
 #define CMD_COMPL_WAIT_INT_MASK                0x02
 #define CMD_INV_IOMMU_PAGES_SIZE_MASK  0x01
 #define CMD_INV_IOMMU_PAGES_PDE_MASK   0x02
+#define CMD_INV_IOMMU_PAGES_GN_MASK    0x04
+
+#define PPR_STATUS_MASK                        0xf
+#define PPR_STATUS_SHIFT               12
 
 #define CMD_INV_IOMMU_ALL_PAGES_ADDRESS        0x7fffffffffffffffULL
 
 #define EVT_BUFFER_SIZE                8192 /* 512 entries */
 #define EVT_LEN_MASK           (0x9ULL << 56)
 
+/* Constants for PPR Log handling */
+#define PPR_LOG_ENTRIES                512
+#define PPR_LOG_SIZE_SHIFT     56
+#define PPR_LOG_SIZE_512       (0x9ULL << PPR_LOG_SIZE_SHIFT)
+#define PPR_ENTRY_SIZE         16
+#define PPR_LOG_SIZE           (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
+
+#define PPR_REQ_TYPE(x)                (((x) >> 60) & 0xfULL)
+#define PPR_FLAGS(x)           (((x) >> 48) & 0xfffULL)
+#define PPR_DEVID(x)           ((x) & 0xffffULL)
+#define PPR_TAG(x)             (((x) >> 32) & 0x3ffULL)
+#define PPR_PASID1(x)          (((x) >> 16) & 0xffffULL)
+#define PPR_PASID2(x)          (((x) >> 42) & 0xfULL)
+#define PPR_PASID(x)           ((PPR_PASID2(x) << 16) | PPR_PASID1(x))
+
+#define PPR_REQ_FAULT          0x01
+
 #define PAGE_MODE_NONE    0x00
 #define PAGE_MODE_1_LEVEL 0x01
 #define PAGE_MODE_2_LEVEL 0x02
 #define IOMMU_PTE_IR (1ULL << 61)
 #define IOMMU_PTE_IW (1ULL << 62)
 
-#define DTE_FLAG_IOTLB 0x01
+#define DTE_FLAG_IOTLB (0x01UL << 32)
+#define DTE_FLAG_GV    (0x01ULL << 55)
+#define DTE_GLX_SHIFT  (56)
+#define DTE_GLX_MASK   (3)
+
+#define DTE_GCR3_VAL_A(x)      (((x) >> 12) & 0x00007ULL)
+#define DTE_GCR3_VAL_B(x)      (((x) >> 15) & 0x0ffffULL)
+#define DTE_GCR3_VAL_C(x)      (((x) >> 31) & 0xfffffULL)
+
+#define DTE_GCR3_INDEX_A       0
+#define DTE_GCR3_INDEX_B       1
+#define DTE_GCR3_INDEX_C       1
+
+#define DTE_GCR3_SHIFT_A       58
+#define DTE_GCR3_SHIFT_B       16
+#define DTE_GCR3_SHIFT_C       43
+
+#define GCR3_VALID             0x01ULL
 
 #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
 #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
                                              domain for an IOMMU */
 #define PD_PASSTHROUGH_MASK    (1UL << 2) /* domain has no page
                                              translation */
+#define PD_IOMMUV2_MASK                (1UL << 3) /* domain has gcr3 table */
 
 extern bool amd_iommu_dump;
 #define DUMP_printk(format, arg...)                                    \
@@ -285,6 +349,29 @@ extern bool amd_iommu_iotlb_sup;
 #define APERTURE_RANGE_INDEX(a)        ((a) >> APERTURE_RANGE_SHIFT)
 #define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
 
+
+/*
+ * This struct is used to pass information about
+ * incoming PPR faults around.
+ */
+struct amd_iommu_fault {
+       u64 address;    /* IO virtual address of the fault*/
+       u32 pasid;      /* Address space identifier */
+       u16 device_id;  /* Originating PCI device id */
+       u16 tag;        /* PPR tag */
+       u16 flags;      /* Fault flags */
+
+};
+
+#define PPR_FAULT_EXEC (1 << 1)
+#define PPR_FAULT_READ  (1 << 2)
+#define PPR_FAULT_WRITE (1 << 5)
+#define PPR_FAULT_USER  (1 << 6)
+#define PPR_FAULT_RSVD  (1 << 7)
+#define PPR_FAULT_GN    (1 << 8)
+
+struct iommu_domain;
+
 /*
  * This structure contains generic data for  IOMMU protection domains
  * independent of their use.
@@ -297,11 +384,15 @@ struct protection_domain {
        u16 id;                 /* the domain id written to the device table */
        int mode;               /* paging mode (0-6 levels) */
        u64 *pt_root;           /* page table root pointer */
+       int glx;                /* Number of levels for GCR3 table */
+       u64 *gcr3_tbl;          /* Guest CR3 table */
        unsigned long flags;    /* flags to find out type of domain */
        bool updated;           /* complete domain flush required */
        unsigned dev_cnt;       /* devices assigned to this domain */
        unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
        void *priv;             /* private data */
+       struct iommu_domain *iommu_domain; /* Pointer to generic
+                                             domain structure */
 
 };
 
@@ -315,10 +406,15 @@ struct iommu_dev_data {
        struct protection_domain *domain; /* Domain the device is bound to */
        atomic_t bind;                    /* Domain attach reverent count */
        u16 devid;                        /* PCI Device ID */
+       bool iommu_v2;                    /* Device can make use of IOMMUv2 */
+       bool passthrough;                 /* Default for device is pt_domain */
        struct {
                bool enabled;
                int qdep;
        } ats;                            /* ATS state */
+       bool pri_tlp;                     /* PASID TLB required for
+                                            PPR completions */
+       u32 errata;                       /* Bitmap for errata to apply */
 };
 
 /*
@@ -399,6 +495,9 @@ struct amd_iommu {
        /* Extended features */
        u64 features;
 
+       /* IOMMUv2 */
+       bool is_iommu_v2;
+
        /*
         * Capability pointer. There could be more than one IOMMU per PCI
         * device function if there are more than one AMD IOMMU capability
@@ -431,6 +530,9 @@ struct amd_iommu {
        /* MSI number for event interrupt */
        u16 evt_msi_num;
 
+       /* Base of the PPR log, if present */
+       u8 *ppr_log;
+
        /* true if interrupts for this IOMMU are already enabled */
        bool int_enabled;
 
@@ -484,7 +586,7 @@ extern struct list_head amd_iommu_pd_list;
  * Structure defining one entry in the device table
  */
 struct dev_table_entry {
-       u32 data[8];
+       u64 data[4];
 };
 
 /*
@@ -549,6 +651,16 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
  */
 extern bool amd_iommu_unmap_flush;
 
+/* Smallest number of PASIDs supported by any IOMMU in the system */
+extern u32 amd_iommu_max_pasids;
+
+extern bool amd_iommu_v2_present;
+
+extern bool amd_iommu_force_isolation;
+
+/* Max levels of glxval supported */
+extern int amd_iommu_max_glx_val;
+
 /* takes bus and device/function and returns the device id
  * FIXME: should that be in generic PCI code? */
 static inline u16 calc_devid(u8 bus, u8 devfn)
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
new file mode 100644 (file)
index 0000000..8add9f1
--- /dev/null
@@ -0,0 +1,994 @@
+/*
+ * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/mmu_notifier.h>
+#include <linux/amd-iommu.h>
+#include <linux/mm_types.h>
+#include <linux/profile.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/iommu.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/gfp.h>
+
+#include "amd_iommu_types.h"
+#include "amd_iommu_proto.h"
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
+
+#define MAX_DEVICES            0x10000
+#define PRI_QUEUE_SIZE         512
+
+struct pri_queue {
+       atomic_t inflight;
+       bool finish;
+       int status;
+};
+
+struct pasid_state {
+       struct list_head list;                  /* For global state-list */
+       atomic_t count;                         /* Reference count */
+       struct task_struct *task;               /* Task bound to this PASID */
+       struct mm_struct *mm;                   /* mm_struct for the faults */
+       struct mmu_notifier mn;                 /* mmu_otifier handle */
+       struct pri_queue pri[PRI_QUEUE_SIZE];   /* PRI tag states */
+       struct device_state *device_state;      /* Link to our device_state */
+       int pasid;                              /* PASID index */
+       spinlock_t lock;                        /* Protect pri_queues */
+       wait_queue_head_t wq;                   /* To wait for count == 0 */
+};
+
+struct device_state {
+       atomic_t count;
+       struct pci_dev *pdev;
+       struct pasid_state **states;
+       struct iommu_domain *domain;
+       int pasid_levels;
+       int max_pasids;
+       amd_iommu_invalid_ppr_cb inv_ppr_cb;
+       amd_iommu_invalidate_ctx inv_ctx_cb;
+       spinlock_t lock;
+       wait_queue_head_t wq;
+};
+
+struct fault {
+       struct work_struct work;
+       struct device_state *dev_state;
+       struct pasid_state *state;
+       struct mm_struct *mm;
+       u64 address;
+       u16 devid;
+       u16 pasid;
+       u16 tag;
+       u16 finish;
+       u16 flags;
+};
+
+struct device_state **state_table;
+static spinlock_t state_lock;
+
+/* List and lock for all pasid_states */
+static LIST_HEAD(pasid_state_list);
+static DEFINE_SPINLOCK(ps_lock);
+
+static struct workqueue_struct *iommu_wq;
+
+/*
+ * Empty page table - Used between
+ * mmu_notifier_invalidate_range_start and
+ * mmu_notifier_invalidate_range_end
+ */
+static u64 *empty_page_table;
+
+static void free_pasid_states(struct device_state *dev_state);
+static void unbind_pasid(struct device_state *dev_state, int pasid);
+static int task_exit(struct notifier_block *nb, unsigned long e, void *data);
+
+static u16 device_id(struct pci_dev *pdev)
+{
+       u16 devid;
+
+       devid = pdev->bus->number;
+       devid = (devid << 8) | pdev->devfn;
+
+       return devid;
+}
+
+static struct device_state *get_device_state(u16 devid)
+{
+       struct device_state *dev_state;
+       unsigned long flags;
+
+       spin_lock_irqsave(&state_lock, flags);
+       dev_state = state_table[devid];
+       if (dev_state != NULL)
+               atomic_inc(&dev_state->count);
+       spin_unlock_irqrestore(&state_lock, flags);
+
+       return dev_state;
+}
+
+static void free_device_state(struct device_state *dev_state)
+{
+       /*
+        * First detach device from domain - No more PRI requests will arrive
+        * from that device after it is unbound from the IOMMUv2 domain.
+        */
+       iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
+
+       /* Everything is down now, free the IOMMUv2 domain */
+       iommu_domain_free(dev_state->domain);
+
+       /* Finally get rid of the device-state */
+       kfree(dev_state);
+}
+
+static void put_device_state(struct device_state *dev_state)
+{
+       if (atomic_dec_and_test(&dev_state->count))
+               wake_up(&dev_state->wq);
+}
+
+static void put_device_state_wait(struct device_state *dev_state)
+{
+       DEFINE_WAIT(wait);
+
+       prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE);
+       if (!atomic_dec_and_test(&dev_state->count))
+               schedule();
+       finish_wait(&dev_state->wq, &wait);
+
+       free_device_state(dev_state);
+}
+
+static struct notifier_block profile_nb = {
+       .notifier_call = task_exit,
+};
+
+static void link_pasid_state(struct pasid_state *pasid_state)
+{
+       spin_lock(&ps_lock);
+       list_add_tail(&pasid_state->list, &pasid_state_list);
+       spin_unlock(&ps_lock);
+}
+
+static void __unlink_pasid_state(struct pasid_state *pasid_state)
+{
+       list_del(&pasid_state->list);
+}
+
+static void unlink_pasid_state(struct pasid_state *pasid_state)
+{
+       spin_lock(&ps_lock);
+       __unlink_pasid_state(pasid_state);
+       spin_unlock(&ps_lock);
+}
+
+/* Must be called under dev_state->lock */
+static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
+                                                 int pasid, bool alloc)
+{
+       struct pasid_state **root, **ptr;
+       int level, index;
+
+       level = dev_state->pasid_levels;
+       root  = dev_state->states;
+
+       while (true) {
+
+               index = (pasid >> (9 * level)) & 0x1ff;
+               ptr   = &root[index];
+
+               if (level == 0)
+                       break;
+
+               if (*ptr == NULL) {
+                       if (!alloc)
+                               return NULL;
+
+                       *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
+                       if (*ptr == NULL)
+                               return NULL;
+               }
+
+               root   = (struct pasid_state **)*ptr;
+               level -= 1;
+       }
+
+       return ptr;
+}
+
+static int set_pasid_state(struct device_state *dev_state,
+                          struct pasid_state *pasid_state,
+                          int pasid)
+{
+       struct pasid_state **ptr;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&dev_state->lock, flags);
+       ptr = __get_pasid_state_ptr(dev_state, pasid, true);
+
+       ret = -ENOMEM;
+       if (ptr == NULL)
+               goto out_unlock;
+
+       ret = -ENOMEM;
+       if (*ptr != NULL)
+               goto out_unlock;
+
+       *ptr = pasid_state;
+
+       ret = 0;
+
+out_unlock:
+       spin_unlock_irqrestore(&dev_state->lock, flags);
+
+       return ret;
+}
+
+static void clear_pasid_state(struct device_state *dev_state, int pasid)
+{
+       struct pasid_state **ptr;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_state->lock, flags);
+       ptr = __get_pasid_state_ptr(dev_state, pasid, true);
+
+       if (ptr == NULL)
+               goto out_unlock;
+
+       *ptr = NULL;
+
+out_unlock:
+       spin_unlock_irqrestore(&dev_state->lock, flags);
+}
+
+static struct pasid_state *get_pasid_state(struct device_state *dev_state,
+                                          int pasid)
+{
+       struct pasid_state **ptr, *ret = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_state->lock, flags);
+       ptr = __get_pasid_state_ptr(dev_state, pasid, false);
+
+       if (ptr == NULL)
+               goto out_unlock;
+
+       ret = *ptr;
+       if (ret)
+               atomic_inc(&ret->count);
+
+out_unlock:
+       spin_unlock_irqrestore(&dev_state->lock, flags);
+
+       return ret;
+}
+
+static void free_pasid_state(struct pasid_state *pasid_state)
+{
+       kfree(pasid_state);
+}
+
+static void put_pasid_state(struct pasid_state *pasid_state)
+{
+       if (atomic_dec_and_test(&pasid_state->count)) {
+               put_device_state(pasid_state->device_state);
+               wake_up(&pasid_state->wq);
+       }
+}
+
+static void put_pasid_state_wait(struct pasid_state *pasid_state)
+{
+       DEFINE_WAIT(wait);
+
+       prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
+
+       if (atomic_dec_and_test(&pasid_state->count))
+               put_device_state(pasid_state->device_state);
+       else
+               schedule();
+
+       finish_wait(&pasid_state->wq, &wait);
+       mmput(pasid_state->mm);
+       free_pasid_state(pasid_state);
+}
+
+static void __unbind_pasid(struct pasid_state *pasid_state)
+{
+       struct iommu_domain *domain;
+
+       domain = pasid_state->device_state->domain;
+
+       amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
+       clear_pasid_state(pasid_state->device_state, pasid_state->pasid);
+
+       /* Make sure no more pending faults are in the queue */
+       flush_workqueue(iommu_wq);
+
+       mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
+
+       put_pasid_state(pasid_state); /* Reference taken in bind() function */
+}
+
+static void unbind_pasid(struct device_state *dev_state, int pasid)
+{
+       struct pasid_state *pasid_state;
+
+       pasid_state = get_pasid_state(dev_state, pasid);
+       if (pasid_state == NULL)
+               return;
+
+       unlink_pasid_state(pasid_state);
+       __unbind_pasid(pasid_state);
+       put_pasid_state_wait(pasid_state); /* Reference taken in this function */
+}
+
+static void free_pasid_states_level1(struct pasid_state **tbl)
+{
+       int i;
+
+       for (i = 0; i < 512; ++i) {
+               if (tbl[i] == NULL)
+                       continue;
+
+               free_page((unsigned long)tbl[i]);
+       }
+}
+
+static void free_pasid_states_level2(struct pasid_state **tbl)
+{
+       struct pasid_state **ptr;
+       int i;
+
+       for (i = 0; i < 512; ++i) {
+               if (tbl[i] == NULL)
+                       continue;
+
+               ptr = (struct pasid_state **)tbl[i];
+               free_pasid_states_level1(ptr);
+       }
+}
+
+static void free_pasid_states(struct device_state *dev_state)
+{
+       struct pasid_state *pasid_state;
+       int i;
+
+       for (i = 0; i < dev_state->max_pasids; ++i) {
+               pasid_state = get_pasid_state(dev_state, i);
+               if (pasid_state == NULL)
+                       continue;
+
+               put_pasid_state(pasid_state);
+               unbind_pasid(dev_state, i);
+       }
+
+       if (dev_state->pasid_levels == 2)
+               free_pasid_states_level2(dev_state->states);
+       else if (dev_state->pasid_levels == 1)
+               free_pasid_states_level1(dev_state->states);
+       else if (dev_state->pasid_levels != 0)
+               BUG();
+
+       free_page((unsigned long)dev_state->states);
+}
+
+static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
+{
+       return container_of(mn, struct pasid_state, mn);
+}
+
+static void __mn_flush_page(struct mmu_notifier *mn,
+                           unsigned long address)
+{
+       struct pasid_state *pasid_state;
+       struct device_state *dev_state;
+
+       pasid_state = mn_to_state(mn);
+       dev_state   = pasid_state->device_state;
+
+       amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
+}
+
+static int mn_clear_flush_young(struct mmu_notifier *mn,
+                               struct mm_struct *mm,
+                               unsigned long address)
+{
+       __mn_flush_page(mn, address);
+
+       return 0;
+}
+
+static void mn_change_pte(struct mmu_notifier *mn,
+                         struct mm_struct *mm,
+                         unsigned long address,
+                         pte_t pte)
+{
+       __mn_flush_page(mn, address);
+}
+
+static void mn_invalidate_page(struct mmu_notifier *mn,
+                              struct mm_struct *mm,
+                              unsigned long address)
+{
+       __mn_flush_page(mn, address);
+}
+
+static void mn_invalidate_range_start(struct mmu_notifier *mn,
+                                     struct mm_struct *mm,
+                                     unsigned long start, unsigned long end)
+{
+       struct pasid_state *pasid_state;
+       struct device_state *dev_state;
+
+       pasid_state = mn_to_state(mn);
+       dev_state   = pasid_state->device_state;
+
+       amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
+                                 __pa(empty_page_table));
+}
+
+static void mn_invalidate_range_end(struct mmu_notifier *mn,
+                                   struct mm_struct *mm,
+                                   unsigned long start, unsigned long end)
+{
+       struct pasid_state *pasid_state;
+       struct device_state *dev_state;
+
+       pasid_state = mn_to_state(mn);
+       dev_state   = pasid_state->device_state;
+
+       amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
+                                 __pa(pasid_state->mm->pgd));
+}
+
+static struct mmu_notifier_ops iommu_mn = {
+       .clear_flush_young      = mn_clear_flush_young,
+       .change_pte             = mn_change_pte,
+       .invalidate_page        = mn_invalidate_page,
+       .invalidate_range_start = mn_invalidate_range_start,
+       .invalidate_range_end   = mn_invalidate_range_end,
+};
+
+static void set_pri_tag_status(struct pasid_state *pasid_state,
+                              u16 tag, int status)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&pasid_state->lock, flags);
+       pasid_state->pri[tag].status = status;
+       spin_unlock_irqrestore(&pasid_state->lock, flags);
+}
+
+static void finish_pri_tag(struct device_state *dev_state,
+                          struct pasid_state *pasid_state,
+                          u16 tag)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&pasid_state->lock, flags);
+       if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
+           pasid_state->pri[tag].finish) {
+               amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
+                                      pasid_state->pri[tag].status, tag);
+               pasid_state->pri[tag].finish = false;
+               pasid_state->pri[tag].status = PPR_SUCCESS;
+       }
+       spin_unlock_irqrestore(&pasid_state->lock, flags);
+}
+
+static void do_fault(struct work_struct *work)
+{
+       struct fault *fault = container_of(work, struct fault, work);
+       int npages, write;
+       struct page *page;
+
+       write = !!(fault->flags & PPR_FAULT_WRITE);
+
+       npages = get_user_pages(fault->state->task, fault->state->mm,
+                               fault->address, 1, write, 0, &page, NULL);
+
+       if (npages == 1) {
+               put_page(page);
+       } else if (fault->dev_state->inv_ppr_cb) {
+               int status;
+
+               status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
+                                                     fault->pasid,
+                                                     fault->address,
+                                                     fault->flags);
+               switch (status) {
+               case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
+                       set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
+                       break;
+               case AMD_IOMMU_INV_PRI_RSP_INVALID:
+                       set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
+                       break;
+               case AMD_IOMMU_INV_PRI_RSP_FAIL:
+                       set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
+                       break;
+               default:
+                       BUG();
+               }
+       } else {
+               set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
+       }
+
+       finish_pri_tag(fault->dev_state, fault->state, fault->tag);
+
+       put_pasid_state(fault->state);
+
+       kfree(fault);
+}
+
+static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
+{
+       struct amd_iommu_fault *iommu_fault;
+       struct pasid_state *pasid_state;
+       struct device_state *dev_state;
+       unsigned long flags;
+       struct fault *fault;
+       bool finish;
+       u16 tag;
+       int ret;
+
+       iommu_fault = data;
+       tag         = iommu_fault->tag & 0x1ff;
+       finish      = (iommu_fault->tag >> 9) & 1;
+
+       ret = NOTIFY_DONE;
+       dev_state = get_device_state(iommu_fault->device_id);
+       if (dev_state == NULL)
+               goto out;
+
+       pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
+       if (pasid_state == NULL) {
+               /* We know the device but not the PASID -> send INVALID */
+               amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
+                                      PPR_INVALID, tag);
+               goto out_drop_state;
+       }
+
+       spin_lock_irqsave(&pasid_state->lock, flags);
+       atomic_inc(&pasid_state->pri[tag].inflight);
+       if (finish)
+               pasid_state->pri[tag].finish = true;
+       spin_unlock_irqrestore(&pasid_state->lock, flags);
+
+       fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
+       if (fault == NULL) {
+               /* We are OOM - send success and let the device re-fault */
+               finish_pri_tag(dev_state, pasid_state, tag);
+               goto out_drop_state;
+       }
+
+       fault->dev_state = dev_state;
+       fault->address   = iommu_fault->address;
+       fault->state     = pasid_state;
+       fault->tag       = tag;
+       fault->finish    = finish;
+       fault->flags     = iommu_fault->flags;
+       INIT_WORK(&fault->work, do_fault);
+
+       queue_work(iommu_wq, &fault->work);
+
+       ret = NOTIFY_OK;
+
+out_drop_state:
+       put_device_state(dev_state);
+
+out:
+       return ret;
+}
+
+static struct notifier_block ppr_nb = {
+       .notifier_call = ppr_notifier,
+};
+
+static int task_exit(struct notifier_block *nb, unsigned long e, void *data)
+{
+       struct pasid_state *pasid_state;
+       struct task_struct *task;
+
+       task = data;
+
+       /*
+        * Using this notifier is a hack - but there is no other choice
+        * at the moment. What I really want is a sleeping notifier that
+        * is called when an MM goes down. But such a notifier doesn't
+        * exist yet. The notifier needs to sleep because it has to make
+        * sure that the device does not use the PASID and the address
+        * space anymore before it is destroyed. This includes waiting
+        * for pending PRI requests to pass the workqueue. The
+        * MMU-Notifiers would be a good fit, but they use RCU and so
+        * they are not allowed to sleep. Lets see how we can solve this
+        * in a more intelligent way in the future.
+        */
+again:
+       spin_lock(&ps_lock);
+       list_for_each_entry(pasid_state, &pasid_state_list, list) {
+               struct device_state *dev_state;
+               int pasid;
+
+               if (pasid_state->task != task)
+                       continue;
+
+               /* Drop Lock and unbind */
+               spin_unlock(&ps_lock);
+
+               dev_state = pasid_state->device_state;
+               pasid     = pasid_state->pasid;
+
+               if (pasid_state->device_state->inv_ctx_cb)
+                       dev_state->inv_ctx_cb(dev_state->pdev, pasid);
+
+               unbind_pasid(dev_state, pasid);
+
+               /* Task may be in the list multiple times */
+               goto again;
+       }
+       spin_unlock(&ps_lock);
+
+       return NOTIFY_OK;
+}
+
+int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
+                        struct task_struct *task)
+{
+       struct pasid_state *pasid_state;
+       struct device_state *dev_state;
+       u16 devid;
+       int ret;
+
+       might_sleep();
+
+       if (!amd_iommu_v2_supported())
+               return -ENODEV;
+
+       devid     = device_id(pdev);
+       dev_state = get_device_state(devid);
+
+       if (dev_state == NULL)
+               return -EINVAL;
+
+       ret = -EINVAL;
+       if (pasid < 0 || pasid >= dev_state->max_pasids)
+               goto out;
+
+       ret = -ENOMEM;
+       pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
+       if (pasid_state == NULL)
+               goto out;
+
+       atomic_set(&pasid_state->count, 1);
+       init_waitqueue_head(&pasid_state->wq);
+       pasid_state->task         = task;
+       pasid_state->mm           = get_task_mm(task);
+       pasid_state->device_state = dev_state;
+       pasid_state->pasid        = pasid;
+       pasid_state->mn.ops       = &iommu_mn;
+
+       if (pasid_state->mm == NULL)
+               goto out_free;
+
+       mmu_notifier_register(&pasid_state->mn, pasid_state->mm);
+
+       ret = set_pasid_state(dev_state, pasid_state, pasid);
+       if (ret)
+               goto out_unregister;
+
+       ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
+                                       __pa(pasid_state->mm->pgd));
+       if (ret)
+               goto out_clear_state;
+
+       link_pasid_state(pasid_state);
+
+       return 0;
+
+out_clear_state:
+       clear_pasid_state(dev_state, pasid);
+
+out_unregister:
+       mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
+
+out_free:
+       free_pasid_state(pasid_state);
+
+out:
+       put_device_state(dev_state);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_bind_pasid);
+
+void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
+{
+       struct device_state *dev_state;
+       u16 devid;
+
+       might_sleep();
+
+       if (!amd_iommu_v2_supported())
+               return;
+
+       devid = device_id(pdev);
+       dev_state = get_device_state(devid);
+       if (dev_state == NULL)
+               return;
+
+       if (pasid < 0 || pasid >= dev_state->max_pasids)
+               goto out;
+
+       unbind_pasid(dev_state, pasid);
+
+out:
+       put_device_state(dev_state);
+}
+EXPORT_SYMBOL(amd_iommu_unbind_pasid);
+
+int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
+{
+       struct device_state *dev_state;
+       unsigned long flags;
+       int ret, tmp;
+       u16 devid;
+
+       might_sleep();
+
+       if (!amd_iommu_v2_supported())
+               return -ENODEV;
+
+       if (pasids <= 0 || pasids > (PASID_MASK + 1))
+               return -EINVAL;
+
+       devid = device_id(pdev);
+
+       dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
+       if (dev_state == NULL)
+               return -ENOMEM;
+
+       spin_lock_init(&dev_state->lock);
+       init_waitqueue_head(&dev_state->wq);
+       dev_state->pdev = pdev;
+
+       tmp = pasids;
+       for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
+               dev_state->pasid_levels += 1;
+
+       atomic_set(&dev_state->count, 1);
+       dev_state->max_pasids = pasids;
+
+       ret = -ENOMEM;
+       dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
+       if (dev_state->states == NULL)
+               goto out_free_dev_state;
+
+       dev_state->domain = iommu_domain_alloc(&pci_bus_type);
+       if (dev_state->domain == NULL)
+               goto out_free_states;
+
+       amd_iommu_domain_direct_map(dev_state->domain);
+
+       ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
+       if (ret)
+               goto out_free_domain;
+
+       ret = iommu_attach_device(dev_state->domain, &pdev->dev);
+       if (ret != 0)
+               goto out_free_domain;
+
+       spin_lock_irqsave(&state_lock, flags);
+
+       if (state_table[devid] != NULL) {
+               spin_unlock_irqrestore(&state_lock, flags);
+               ret = -EBUSY;
+               goto out_free_domain;
+       }
+
+       state_table[devid] = dev_state;
+
+       spin_unlock_irqrestore(&state_lock, flags);
+
+       return 0;
+
+out_free_domain:
+       iommu_domain_free(dev_state->domain);
+
+out_free_states:
+       free_page((unsigned long)dev_state->states);
+
+out_free_dev_state:
+       kfree(dev_state);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_init_device);
+
+void amd_iommu_free_device(struct pci_dev *pdev)
+{
+       struct device_state *dev_state;
+       unsigned long flags;
+       u16 devid;
+
+       if (!amd_iommu_v2_supported())
+               return;
+
+       devid = device_id(pdev);
+
+       spin_lock_irqsave(&state_lock, flags);
+
+       dev_state = state_table[devid];
+       if (dev_state == NULL) {
+               spin_unlock_irqrestore(&state_lock, flags);
+               return;
+       }
+
+       state_table[devid] = NULL;
+
+       spin_unlock_irqrestore(&state_lock, flags);
+
+       /* Get rid of any remaining pasid states */
+       free_pasid_states(dev_state);
+
+       put_device_state_wait(dev_state);
+}
+EXPORT_SYMBOL(amd_iommu_free_device);
+
+int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
+                                amd_iommu_invalid_ppr_cb cb)
+{
+       struct device_state *dev_state;
+       unsigned long flags;
+       u16 devid;
+       int ret;
+
+       if (!amd_iommu_v2_supported())
+               return -ENODEV;
+
+       devid = device_id(pdev);
+
+       spin_lock_irqsave(&state_lock, flags);
+
+       ret = -EINVAL;
+       dev_state = state_table[devid];
+       if (dev_state == NULL)
+               goto out_unlock;
+
+       dev_state->inv_ppr_cb = cb;
+
+       ret = 0;
+
+out_unlock:
+       spin_unlock_irqrestore(&state_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
+
+int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
+                                   amd_iommu_invalidate_ctx cb)
+{
+       struct device_state *dev_state;
+       unsigned long flags;
+       u16 devid;
+       int ret;
+
+       if (!amd_iommu_v2_supported())
+               return -ENODEV;
+
+       devid = device_id(pdev);
+
+       spin_lock_irqsave(&state_lock, flags);
+
+       ret = -EINVAL;
+       dev_state = state_table[devid];
+       if (dev_state == NULL)
+               goto out_unlock;
+
+       dev_state->inv_ctx_cb = cb;
+
+       ret = 0;
+
+out_unlock:
+       spin_unlock_irqrestore(&state_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
+
+static int __init amd_iommu_v2_init(void)
+{
+       size_t state_table_size;
+       int ret;
+
+       pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>");
+
+       spin_lock_init(&state_lock);
+
+       state_table_size = MAX_DEVICES * sizeof(struct device_state *);
+       state_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                              get_order(state_table_size));
+       if (state_table == NULL)
+               return -ENOMEM;
+
+       ret = -ENOMEM;
+       iommu_wq = create_workqueue("amd_iommu_v2");
+       if (iommu_wq == NULL)
+               goto out_free;
+
+       ret = -ENOMEM;
+       empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
+       if (empty_page_table == NULL)
+               goto out_destroy_wq;
+
+       amd_iommu_register_ppr_notifier(&ppr_nb);
+       profile_event_register(PROFILE_TASK_EXIT, &profile_nb);
+
+       return 0;
+
+out_destroy_wq:
+       destroy_workqueue(iommu_wq);
+
+out_free:
+       free_pages((unsigned long)state_table, get_order(state_table_size));
+
+       return ret;
+}
+
+static void __exit amd_iommu_v2_exit(void)
+{
+       struct device_state *dev_state;
+       size_t state_table_size;
+       int i;
+
+       profile_event_unregister(PROFILE_TASK_EXIT, &profile_nb);
+       amd_iommu_unregister_ppr_notifier(&ppr_nb);
+
+       flush_workqueue(iommu_wq);
+
+       /*
+        * The loop below might call flush_workqueue(), so call
+        * destroy_workqueue() after it
+        */
+       for (i = 0; i < MAX_DEVICES; ++i) {
+               dev_state = get_device_state(i);
+
+               if (dev_state == NULL)
+                       continue;
+
+               WARN_ON_ONCE(1);
+
+               put_device_state(dev_state);
+               amd_iommu_free_device(dev_state->pdev);
+       }
+
+       destroy_workqueue(iommu_wq);
+
+       state_table_size = MAX_DEVICES * sizeof(struct device_state *);
+       free_pages((unsigned long)state_table, get_order(state_table_size));
+
+       free_page((unsigned long)empty_page_table);
+}
+
+module_init(amd_iommu_v2_init);
+module_exit(amd_iommu_v2_exit);
index 31053a951c3452640d7dd4e2a6f4c9c2460dee63..c9c6053198d403626d08e2d0826fde7b633baef9 100644 (file)
 #define LEVEL_STRIDE           (9)
 #define LEVEL_MASK             (((u64)1 << LEVEL_STRIDE) - 1)
 
+/*
+ * This bitmap is used to advertise the page sizes our hardware support
+ * to the IOMMU core, which will then use this information to split
+ * physically contiguous memory regions it is mapping into page sizes
+ * that we support.
+ *
+ * Traditionally the IOMMU core just handed us the mappings directly,
+ * after making sure the size is an order of a 4KiB page and that the
+ * mapping has natural alignment.
+ *
+ * To retain this behavior, we currently advertise that we support
+ * all page sizes that are an order of 4KiB.
+ *
+ * If at some point we'd like to utilize the IOMMU core's new behavior,
+ * we could change this to advertise the real page sizes we support.
+ */
+#define INTEL_IOMMU_PGSIZES    (~0xFFFUL)
+
 static inline int agaw_to_level(int agaw)
 {
        return agaw + 2;
@@ -3979,12 +3997,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
 
 static int intel_iommu_map(struct iommu_domain *domain,
                           unsigned long iova, phys_addr_t hpa,
-                          int gfp_order, int iommu_prot)
+                          size_t size, int iommu_prot)
 {
        struct dmar_domain *dmar_domain = domain->priv;
        u64 max_addr;
        int prot = 0;
-       size_t size;
        int ret;
 
        if (iommu_prot & IOMMU_READ)
@@ -3994,7 +4011,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
        if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
                prot |= DMA_PTE_SNP;
 
-       size     = PAGE_SIZE << gfp_order;
        max_addr = iova + size;
        if (dmar_domain->max_addr < max_addr) {
                u64 end;
@@ -4017,11 +4033,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
        return ret;
 }
 
-static int intel_iommu_unmap(struct iommu_domain *domain,
-                            unsigned long iova, int gfp_order)
+static size_t intel_iommu_unmap(struct iommu_domain *domain,
+                            unsigned long iova, size_t size)
 {
        struct dmar_domain *dmar_domain = domain->priv;
-       size_t size = PAGE_SIZE << gfp_order;
        int order;
 
        order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
@@ -4030,7 +4045,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
        if (dmar_domain->max_addr == iova + size)
                dmar_domain->max_addr = iova;
 
-       return order;
+       return PAGE_SIZE << order;
 }
 
 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -4060,6 +4075,54 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
        return 0;
 }
 
+/*
+ * Group numbers are arbitrary.  Device with the same group number
+ * indicate the iommu cannot differentiate between them.  To avoid
+ * tracking used groups we just use the seg|bus|devfn of the lowest
+ * level we're able to differentiate devices
+ */
+static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct pci_dev *bridge;
+       union {
+               struct {
+                       u8 devfn;
+                       u8 bus;
+                       u16 segment;
+               } pci;
+               u32 group;
+       } id;
+
+       if (iommu_no_mapping(dev))
+               return -ENODEV;
+
+       id.pci.segment = pci_domain_nr(pdev->bus);
+       id.pci.bus = pdev->bus->number;
+       id.pci.devfn = pdev->devfn;
+
+       if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
+               return -ENODEV;
+
+       bridge = pci_find_upstream_pcie_bridge(pdev);
+       if (bridge) {
+               if (pci_is_pcie(bridge)) {
+                       id.pci.bus = bridge->subordinate->number;
+                       id.pci.devfn = 0;
+               } else {
+                       id.pci.bus = bridge->bus->number;
+                       id.pci.devfn = bridge->devfn;
+               }
+       }
+
+       if (!pdev->is_virtfn && iommu_group_mf)
+               id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
+
+       *groupid = id.group;
+
+       return 0;
+}
+
 static struct iommu_ops intel_iommu_ops = {
        .domain_init    = intel_iommu_domain_init,
        .domain_destroy = intel_iommu_domain_destroy,
@@ -4069,6 +4132,8 @@ static struct iommu_ops intel_iommu_ops = {
        .unmap          = intel_iommu_unmap,
        .iova_to_phys   = intel_iommu_iova_to_phys,
        .domain_has_cap = intel_iommu_domain_has_cap,
+       .device_group   = intel_iommu_device_group,
+       .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
 };
 
 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
index 5b5fa5cdaa3108da74b7358ae187dd4ee8a00181..2198b2dbbcd3ad964b03a13dd6fd8dd336f27bed 100644 (file)
@@ -16,6 +16,8 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
 
+#define pr_fmt(fmt)    "%s: " fmt, __func__
+
 #include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/bug.h>
 #include <linux/errno.h>
 #include <linux/iommu.h>
 
+static ssize_t show_iommu_group(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       unsigned int groupid;
+
+       if (iommu_device_group(dev, &groupid))
+               return 0;
+
+       return sprintf(buf, "%u", groupid);
+}
+static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
+
+static int add_iommu_group(struct device *dev, void *data)
+{
+       unsigned int groupid;
+
+       if (iommu_device_group(dev, &groupid) == 0)
+               return device_create_file(dev, &dev_attr_iommu_group);
+
+       return 0;
+}
+
+static int remove_iommu_group(struct device *dev)
+{
+       unsigned int groupid;
+
+       if (iommu_device_group(dev, &groupid) == 0)
+               device_remove_file(dev, &dev_attr_iommu_group);
+
+       return 0;
+}
+
+static int iommu_device_notifier(struct notifier_block *nb,
+                                unsigned long action, void *data)
+{
+       struct device *dev = data;
+
+       if (action == BUS_NOTIFY_ADD_DEVICE)
+               return add_iommu_group(dev, NULL);
+       else if (action == BUS_NOTIFY_DEL_DEVICE)
+               return remove_iommu_group(dev);
+
+       return 0;
+}
+
+static struct notifier_block iommu_device_nb = {
+       .notifier_call = iommu_device_notifier,
+};
+
 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
 {
+       bus_register_notifier(bus, &iommu_device_nb);
+       bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
 }
 
 /**
@@ -157,32 +210,134 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
 EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
 
 int iommu_map(struct iommu_domain *domain, unsigned long iova,
-             phys_addr_t paddr, int gfp_order, int prot)
+             phys_addr_t paddr, size_t size, int prot)
 {
-       size_t size;
+       unsigned long orig_iova = iova;
+       unsigned int min_pagesz;
+       size_t orig_size = size;
+       int ret = 0;
 
        if (unlikely(domain->ops->map == NULL))
                return -ENODEV;
 
-       size         = PAGE_SIZE << gfp_order;
+       /* find out the minimum page size supported */
+       min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+
+       /*
+        * both the virtual address and the physical one, as well as
+        * the size of the mapping, must be aligned (at least) to the
+        * size of the smallest page supported by the hardware
+        */
+       if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
+               pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
+                       "0x%x\n", iova, (unsigned long)paddr,
+                       (unsigned long)size, min_pagesz);
+               return -EINVAL;
+       }
+
+       pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
+                               (unsigned long)paddr, (unsigned long)size);
+
+       while (size) {
+               unsigned long pgsize, addr_merge = iova | paddr;
+               unsigned int pgsize_idx;
+
+               /* Max page size that still fits into 'size' */
+               pgsize_idx = __fls(size);
+
+               /* need to consider alignment requirements ? */
+               if (likely(addr_merge)) {
+                       /* Max page size allowed by both iova and paddr */
+                       unsigned int align_pgsize_idx = __ffs(addr_merge);
+
+                       pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+               }
+
+               /* build a mask of acceptable page sizes */
+               pgsize = (1UL << (pgsize_idx + 1)) - 1;
 
-       BUG_ON(!IS_ALIGNED(iova | paddr, size));
+               /* throw away page sizes not supported by the hardware */
+               pgsize &= domain->ops->pgsize_bitmap;
 
-       return domain->ops->map(domain, iova, paddr, gfp_order, prot);
+               /* make sure we're still sane */
+               BUG_ON(!pgsize);
+
+               /* pick the biggest page */
+               pgsize_idx = __fls(pgsize);
+               pgsize = 1UL << pgsize_idx;
+
+               pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
+                                       (unsigned long)paddr, pgsize);
+
+               ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
+               if (ret)
+                       break;
+
+               iova += pgsize;
+               paddr += pgsize;
+               size -= pgsize;
+       }
+
+       /* unroll mapping in case something went wrong */
+       if (ret)
+               iommu_unmap(domain, orig_iova, orig_size - size);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(iommu_map);
 
-int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
+size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
 {
-       size_t size;
+       size_t unmapped_page, unmapped = 0;
+       unsigned int min_pagesz;
 
        if (unlikely(domain->ops->unmap == NULL))
                return -ENODEV;
 
-       size         = PAGE_SIZE << gfp_order;
+       /* find out the minimum page size supported */
+       min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+
+       /*
+        * The virtual address, as well as the size of the mapping, must be
+        * aligned (at least) to the size of the smallest page supported
+        * by the hardware
+        */
+       if (!IS_ALIGNED(iova | size, min_pagesz)) {
+               pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
+                                       iova, (unsigned long)size, min_pagesz);
+               return -EINVAL;
+       }
+
+       pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
+                                                       (unsigned long)size);
+
+       /*
+        * Keep iterating until we either unmap 'size' bytes (or more)
+        * or we hit an area that isn't mapped.
+        */
+       while (unmapped < size) {
+               size_t left = size - unmapped;
+
+               unmapped_page = domain->ops->unmap(domain, iova, left);
+               if (!unmapped_page)
+                       break;
+
+               pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
+                                       (unsigned long)unmapped_page);
+
+               iova += unmapped_page;
+               unmapped += unmapped_page;
+       }
+
+       return unmapped;
+}
+EXPORT_SYMBOL_GPL(iommu_unmap);
 
-       BUG_ON(!IS_ALIGNED(iova, size));
+int iommu_device_group(struct device *dev, unsigned int *groupid)
+{
+       if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
+               return dev->bus->iommu_ops->device_group(dev, groupid);
 
-       return domain->ops->unmap(domain, iova, gfp_order);
+       return -ENODEV;
 }
-EXPORT_SYMBOL_GPL(iommu_unmap);
+EXPORT_SYMBOL_GPL(iommu_device_group);
index 5865dd2e28f928b0cf55ff1c5ebddfe5786e1d16..08a90b88e40d80feb9d01185e534a1351cf7473d 100644 (file)
@@ -42,6 +42,9 @@ __asm__ __volatile__ (                                                        \
 #define RCP15_PRRR(reg)                MRC(reg, p15, 0, c10, c2, 0)
 #define RCP15_NMRR(reg)                MRC(reg, p15, 0, c10, c2, 1)
 
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES      (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+
 static int msm_iommu_tex_class[4];
 
 DEFINE_SPINLOCK(msm_iommu_lock);
@@ -352,7 +355,7 @@ fail:
 }
 
 static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
-                        phys_addr_t pa, int order, int prot)
+                        phys_addr_t pa, size_t len, int prot)
 {
        struct msm_priv *priv;
        unsigned long flags;
@@ -363,7 +366,6 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
        unsigned long *sl_pte;
        unsigned long sl_offset;
        unsigned int pgprot;
-       size_t len = 0x1000UL << order;
        int ret = 0, tex, sh;
 
        spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -463,8 +465,8 @@ fail:
        return ret;
 }
 
-static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
-                           int order)
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+                           size_t len)
 {
        struct msm_priv *priv;
        unsigned long flags;
@@ -474,7 +476,6 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
        unsigned long *sl_table;
        unsigned long *sl_pte;
        unsigned long sl_offset;
-       size_t len = 0x1000UL << order;
        int i, ret = 0;
 
        spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -544,15 +545,12 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
 
        ret = __flush_iotlb(domain);
 
-       /*
-        * the IOMMU API requires us to return the order of the unmapped
-        * page (on success).
-        */
-       if (!ret)
-               ret = order;
 fail:
        spin_unlock_irqrestore(&msm_iommu_lock, flags);
-       return ret;
+
+       /* the IOMMU API requires us to return how many bytes were unmapped */
+       len = ret ? 0 : len;
+       return len;
 }
 
 static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -684,7 +682,8 @@ static struct iommu_ops msm_iommu_ops = {
        .map = msm_iommu_map,
        .unmap = msm_iommu_unmap,
        .iova_to_phys = msm_iommu_iova_to_phys,
-       .domain_has_cap = msm_iommu_domain_has_cap
+       .domain_has_cap = msm_iommu_domain_has_cap,
+       .pgsize_bitmap = MSM_IOMMU_PGSIZES,
 };
 
 static int __init get_tex_class(int icp, int ocp, int mt, int nos)
index 8f32b2bf758777686c4bf9c7a85df3edaf9ac3af..d8edd979d01b2c3d84ffb49a9d2c3c53829518d9 100644 (file)
@@ -33,6 +33,9 @@
             (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);   \
             __i++)
 
+/* bitmap of the page sizes currently supported */
+#define OMAP_IOMMU_PGSIZES     (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+
 /**
  * struct omap_iommu_domain - omap iommu domain
  * @pgtable:   the page table
@@ -86,20 +89,24 @@ EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
 
 /**
  * omap_iommu_save_ctx - Save registers for pm off-mode support
- * @obj:       target iommu
+ * @dev:       client device
  **/
-void omap_iommu_save_ctx(struct omap_iommu *obj)
+void omap_iommu_save_ctx(struct device *dev)
 {
+       struct omap_iommu *obj = dev_to_omap_iommu(dev);
+
        arch_iommu->save_ctx(obj);
 }
 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
 
 /**
  * omap_iommu_restore_ctx - Restore registers for pm off-mode support
- * @obj:       target iommu
+ * @dev:       client device
  **/
-void omap_iommu_restore_ctx(struct omap_iommu *obj)
+void omap_iommu_restore_ctx(struct device *dev)
 {
+       struct omap_iommu *obj = dev_to_omap_iommu(dev);
+
        arch_iommu->restore_ctx(obj);
 }
 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
@@ -819,36 +826,24 @@ static int device_match_by_alias(struct device *dev, void *data)
        return strcmp(obj->name, name) == 0;
 }
 
-/**
- * omap_find_iommu_device() - find an omap iommu device by name
- * @name:      name of the iommu device
- *
- * The generic iommu API requires the caller to provide the device
- * he wishes to attach to a certain iommu domain.
- *
- * Drivers generally should not bother with this as it should just
- * be taken care of by the DMA-API using dev_archdata.
- *
- * This function is provided as an interim solution until the latter
- * materializes, and omap3isp is fully migrated to the DMA-API.
- */
-struct device *omap_find_iommu_device(const char *name)
-{
-       return driver_find_device(&omap_iommu_driver.driver, NULL,
-                               (void *)name,
-                               device_match_by_alias);
-}
-EXPORT_SYMBOL_GPL(omap_find_iommu_device);
-
 /**
  * omap_iommu_attach() - attach iommu device to an iommu domain
- * @dev:       target omap iommu device
+ * @name:      name of target omap iommu device
  * @iopgd:     page table
  **/
-static struct omap_iommu *omap_iommu_attach(struct device *dev, u32 *iopgd)
+static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
 {
        int err = -ENOMEM;
-       struct omap_iommu *obj = to_iommu(dev);
+       struct device *dev;
+       struct omap_iommu *obj;
+
+       dev = driver_find_device(&omap_iommu_driver.driver, NULL,
+                               (void *)name,
+                               device_match_by_alias);
+       if (!dev)
+               return NULL;
+
+       obj = to_iommu(dev);
 
        spin_lock(&obj->iommu_lock);
 
@@ -1019,12 +1014,11 @@ static void iopte_cachep_ctor(void *iopte)
 }
 
 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
-                        phys_addr_t pa, int order, int prot)
+                        phys_addr_t pa, size_t bytes, int prot)
 {
        struct omap_iommu_domain *omap_domain = domain->priv;
        struct omap_iommu *oiommu = omap_domain->iommu_dev;
        struct device *dev = oiommu->dev;
-       size_t bytes = PAGE_SIZE << order;
        struct iotlb_entry e;
        int omap_pgsz;
        u32 ret, flags;
@@ -1049,19 +1043,16 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
        return ret;
 }
 
-static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
-                           int order)
+static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
+                           size_t size)
 {
        struct omap_iommu_domain *omap_domain = domain->priv;
        struct omap_iommu *oiommu = omap_domain->iommu_dev;
        struct device *dev = oiommu->dev;
-       size_t unmap_size;
-
-       dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order);
 
-       unmap_size = iopgtable_clear_entry(oiommu, da);
+       dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
 
-       return unmap_size ? get_order(unmap_size) : -EINVAL;
+       return iopgtable_clear_entry(oiommu, da);
 }
 
 static int
@@ -1069,6 +1060,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
 {
        struct omap_iommu_domain *omap_domain = domain->priv;
        struct omap_iommu *oiommu;
+       struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
        int ret = 0;
 
        spin_lock(&omap_domain->lock);
@@ -1081,14 +1073,14 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
        }
 
        /* get a handle to and enable the omap iommu */
-       oiommu = omap_iommu_attach(dev, omap_domain->pgtable);
+       oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable);
        if (IS_ERR(oiommu)) {
                ret = PTR_ERR(oiommu);
                dev_err(dev, "can't get omap iommu: %d\n", ret);
                goto out;
        }
 
-       omap_domain->iommu_dev = oiommu;
+       omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
        oiommu->domain = domain;
 
 out:
@@ -1100,7 +1092,8 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain,
                                 struct device *dev)
 {
        struct omap_iommu_domain *omap_domain = domain->priv;
-       struct omap_iommu *oiommu = to_iommu(dev);
+       struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+       struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
 
        spin_lock(&omap_domain->lock);
 
@@ -1114,7 +1107,7 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain,
 
        omap_iommu_detach(oiommu);
 
-       omap_domain->iommu_dev = NULL;
+       omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
 
 out:
        spin_unlock(&omap_domain->lock);
@@ -1183,14 +1176,14 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
                else if (iopte_is_large(*pte))
                        ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
                else
-                       dev_err(dev, "bogus pte 0x%x", *pte);
+                       dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da);
        } else {
                if (iopgd_is_section(*pgd))
                        ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
                else if (iopgd_is_super(*pgd))
                        ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
                else
-                       dev_err(dev, "bogus pgd 0x%x", *pgd);
+                       dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da);
        }
 
        return ret;
@@ -1211,6 +1204,7 @@ static struct iommu_ops omap_iommu_ops = {
        .unmap          = omap_iommu_unmap,
        .iova_to_phys   = omap_iommu_iova_to_phys,
        .domain_has_cap = omap_iommu_domain_has_cap,
+       .pgsize_bitmap  = OMAP_IOMMU_PGSIZES,
 };
 
 static int __init omap_iommu_init(void)
index 46be456fcc00e01c10b2ae8106a83310b41d29b9..2e10c3e0a7aee3eaa3815f473e06d6999ce8866b 100644 (file)
@@ -231,12 +231,14 @@ static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
 
 /**
  * omap_find_iovm_area  -  find iovma which includes @da
+ * @dev:       client device
  * @da:                iommu device virtual address
  *
  * Find the existing iovma starting at @da
  */
-struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da)
+struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da)
 {
+       struct omap_iommu *obj = dev_to_omap_iommu(dev);
        struct iovm_struct *area;
 
        mutex_lock(&obj->mmap_lock);
@@ -343,14 +345,15 @@ static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
 
 /**
  * omap_da_to_va - convert (d) to (v)
- * @obj:       objective iommu
+ * @dev:       client device
  * @da:                iommu device virtual address
  * @va:                mpu virtual address
  *
  * Returns mpu virtual addr which corresponds to a given device virtual addr
  */
-void *omap_da_to_va(struct omap_iommu *obj, u32 da)
+void *omap_da_to_va(struct device *dev, u32 da)
 {
+       struct omap_iommu *obj = dev_to_omap_iommu(dev);
        void *va = NULL;
        struct iovm_struct *area;
 
@@ -410,7 +413,6 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
        unsigned int i, j;
        struct scatterlist *sg;
        u32 da = new->da_start;
-       int order;
 
        if (!domain || !sgt)
                return -EINVAL;
@@ -429,12 +431,10 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
                if (bytes_to_iopgsz(bytes) < 0)
                        goto err_out;
 
-               order = get_order(bytes);
-
                pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
                         i, da, pa, bytes);
 
-               err = iommu_map(domain, da, pa, order, flags);
+               err = iommu_map(domain, da, pa, bytes, flags);
                if (err)
                        goto err_out;
 
@@ -449,10 +449,9 @@ err_out:
                size_t bytes;
 
                bytes = sg->length + sg->offset;
-               order = get_order(bytes);
 
                /* ignore failures.. we're already handling one */
-               iommu_unmap(domain, da, order);
+               iommu_unmap(domain, da, bytes);
 
                da += bytes;
        }
@@ -467,7 +466,8 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
        size_t total = area->da_end - area->da_start;
        const struct sg_table *sgt = area->sgt;
        struct scatterlist *sg;
-       int i, err;
+       int i;
+       size_t unmapped;
 
        BUG_ON(!sgtable_ok(sgt));
        BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
@@ -475,13 +475,11 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
        start = area->da_start;
        for_each_sg(sgt->sgl, sg, sgt->nents, i) {
                size_t bytes;
-               int order;
 
                bytes = sg->length + sg->offset;
-               order = get_order(bytes);
 
-               err = iommu_unmap(domain, start, order);
-               if (err < 0)
+               unmapped = iommu_unmap(domain, start, bytes);
+               if (unmapped < bytes)
                        break;
 
                dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
@@ -582,16 +580,18 @@ __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
 
 /**
  * omap_iommu_vmap  -  (d)-(p)-(v) address mapper
- * @obj:       objective iommu
+ * @domain:    iommu domain
+ * @dev:       client device
  * @sgt:       address of scatter gather table
  * @flags:     iovma and page property
  *
  * Creates 1-n-1 mapping with given @sgt and returns @da.
  * All @sgt element must be io page size aligned.
  */
-u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
+u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
                const struct sg_table *sgt, u32 flags)
 {
+       struct omap_iommu *obj = dev_to_omap_iommu(dev);
        size_t bytes;
        void *va = NULL;
 
@@ -622,15 +622,17 @@ EXPORT_SYMBOL_GPL(omap_iommu_vmap);
 
 /**
  * omap_iommu_vunmap  -  release virtual mapping obtained by 'omap_iommu_vmap()'
- * @obj:       objective iommu
+ * @domain:    iommu domain
+ * @dev:       client device
  * @da:                iommu device virtual address
  *
  * Free the iommu virtually contiguous memory area starting at
  * @da, which was returned by 'omap_iommu_vmap()'.
  */
 struct sg_table *
-omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
+omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da)
 {
+       struct omap_iommu *obj = dev_to_omap_iommu(dev);
        struct sg_table *sgt;
        /*
         * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
@@ -647,7 +649,7 @@ EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
 
 /**
  * omap_iommu_vmalloc  -  (d)-(p)-(v) address allocator and mapper
- * @obj:       objective iommu
+ * @dev:       client device
  * @da:                contiguous iommu virtual memory
  * @bytes:     allocation size
  * @flags:     iovma and page property
@@ -656,9 +658,10 @@ EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
  * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
  */
 u32
-omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
+omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da,
                                                size_t bytes, u32 flags)
 {
+       struct omap_iommu *obj = dev_to_omap_iommu(dev);
        void *va;
        struct sg_table *sgt;
 
@@ -698,15 +701,16 @@ EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
 
 /**
  * omap_iommu_vfree  -  release memory allocated by 'omap_iommu_vmalloc()'
- * @obj:       objective iommu
+ * @dev:       client device
  * @da:                iommu device virtual address
  *
  * Frees the iommu virtually continuous memory area starting at
  * @da, as obtained from 'omap_iommu_vmalloc()'.
  */
-void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
+void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
                                                                const u32 da)
 {
+       struct omap_iommu *obj = dev_to_omap_iommu(dev);
        struct sg_table *sgt;
 
        sgt = unmap_vm_area(domain, obj, da, vfree,
index 1b75a56ebd08016cb517e62086ec62949e9aed0a..897a77dfa9d7dd923ec7567c26c729e1901e1757 100644 (file)
@@ -388,6 +388,14 @@ config LEDS_RENESAS_TPU
          pin function. The latter to support brightness control.
          Brightness control is supported but hardware blinking is not.
 
+config LEDS_TCA6507
+       tristate "LED Support for TCA6507 I2C chip"
+       depends on LEDS_CLASS && I2C
+       help
+         This option enables support for LEDs connected to TC6507
+         LED driver chips accessed via the I2C bus.
+         Driver support brightness control and hardware-assisted blinking.
+
 config LEDS_TRIGGERS
        bool "LED Trigger support"
        depends on LEDS_CLASS
index e4f6bf568880d284cc4743e4cb6231b450d57296..5c9dc4b000d5d2ab9996de7909623d030e2352fa 100644 (file)
@@ -25,6 +25,7 @@ obj-$(CONFIG_LEDS_GPIO)                       += leds-gpio.o
 obj-$(CONFIG_LEDS_LP3944)              += leds-lp3944.o
 obj-$(CONFIG_LEDS_LP5521)              += leds-lp5521.o
 obj-$(CONFIG_LEDS_LP5523)              += leds-lp5523.o
+obj-$(CONFIG_LEDS_TCA6507)             += leds-tca6507.o
 obj-$(CONFIG_LEDS_CLEVO_MAIL)          += leds-clevo-mail.o
 obj-$(CONFIG_LEDS_HP6XX)               += leds-hp6xx.o
 obj-$(CONFIG_LEDS_FSG)                 += leds-fsg.o
index 0810604dc701307973f2967b8fd578f5a9bee997..4ca00624bd1860ed3b1f29e27ea79b59b47dc46d 100644 (file)
@@ -238,17 +238,7 @@ static struct platform_driver pm860x_led_driver = {
        .remove = pm860x_led_remove,
 };
 
-static int __devinit pm860x_led_init(void)
-{
-       return platform_driver_register(&pm860x_led_driver);
-}
-module_init(pm860x_led_init);
-
-static void __devexit pm860x_led_exit(void)
-{
-       platform_driver_unregister(&pm860x_led_driver);
-}
-module_exit(pm860x_led_exit);
+module_platform_driver(pm860x_led_driver);
 
 MODULE_DESCRIPTION("LED driver for Marvell PM860x");
 MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
index 7ba4c7b5b97e07ce6ff11d26895a154b25b5a956..b1400db3f839a2730b6dc29db302306fb3f627eb 100644 (file)
@@ -213,17 +213,7 @@ static struct platform_driver adp5520_led_driver = {
        .remove         = __devexit_p(adp5520_led_remove),
 };
 
-static int __init adp5520_led_init(void)
-{
-       return platform_driver_register(&adp5520_led_driver);
-}
-module_init(adp5520_led_init);
-
-static void __exit adp5520_led_exit(void)
-{
-       platform_driver_unregister(&adp5520_led_driver);
-}
-module_exit(adp5520_led_exit);
+module_platform_driver(adp5520_led_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("LEDS ADP5520(01) Driver");
index 8c00937bf7e74d02bc1759cbab01ca6eba1bc13e..07428357c83fd467ef2c524c65f4c4f4404f2fe1 100644 (file)
@@ -118,18 +118,7 @@ static struct platform_driver ams_delta_led_driver = {
        },
 };
 
-static int __init ams_delta_led_init(void)
-{
-       return platform_driver_register(&ams_delta_led_driver);
-}
-
-static void __exit ams_delta_led_exit(void)
-{
-       platform_driver_unregister(&ams_delta_led_driver);
-}
-
-module_init(ams_delta_led_init);
-module_exit(ams_delta_led_exit);
+module_platform_driver(ams_delta_led_driver);
 
 MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
 MODULE_DESCRIPTION("Amstrad Delta LED driver");
index 48d9fe61bdfcdc39e7bd6b1d942bb637f28c00ff..525a92492837bb892de5b4fb29050bf939a0a4fb 100644 (file)
@@ -179,21 +179,9 @@ static struct platform_driver asic3_led_driver = {
        },
 };
 
-MODULE_ALIAS("platform:leds-asic3");
-
-static int __init asic3_led_init(void)
-{
-       return platform_driver_register(&asic3_led_driver);
-}
-
-static void __exit asic3_led_exit(void)
-{
-       platform_driver_unregister(&asic3_led_driver);
-}
-
-module_init(asic3_led_init);
-module_exit(asic3_led_exit);
+module_platform_driver(asic3_led_driver);
 
 MODULE_AUTHOR("Paul Parsons <lost.distance@yahoo.com>");
 MODULE_DESCRIPTION("HTC ASIC3 LED driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-asic3");
index 109c875ea233486fc191a33b938f06f719ad1eaa..800243b6037ed9edc5b945b74d226a17be05f712 100644 (file)
@@ -134,29 +134,18 @@ static int __exit pwmled_remove(struct platform_device *pdev)
        return 0;
 }
 
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:leds-atmel-pwm");
-
 static struct platform_driver pwmled_driver = {
        .driver = {
                .name =         "leds-atmel-pwm",
                .owner =        THIS_MODULE,
        },
        /* REVISIT add suspend() and resume() methods */
+       .probe =        pwmled_probe,
        .remove =       __exit_p(pwmled_remove),
 };
 
-static int __init modinit(void)
-{
-       return platform_driver_probe(&pwmled_driver, pwmled_probe);
-}
-module_init(modinit);
-
-static void __exit modexit(void)
-{
-       platform_driver_unregister(&pwmled_driver);
-}
-module_exit(modexit);
+module_platform_driver(pwmled_driver);
 
 MODULE_DESCRIPTION("Driver for LEDs with PWM-controlled brightness");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-atmel-pwm");
index ea2185531f826e064d53437f956fbfb04e5e35f5..591cbdf5a0463e99ab91892bfc50b0beeb1c579c 100644 (file)
@@ -688,8 +688,7 @@ static int __devinit bd2802_probe(struct i2c_client *client,
        i2c_set_clientdata(client, led);
 
        /* Configure RESET GPIO (L: RESET, H: RESET cancel) */
-       gpio_request(pdata->reset_gpio, "RGB_RESETB");
-       gpio_direction_output(pdata->reset_gpio, 1);
+       gpio_request_one(pdata->reset_gpio, GPIOF_OUT_INIT_HIGH, "RGB_RESETB");
 
        /* Tacss = min 0.1ms */
        udelay(100);
@@ -813,17 +812,7 @@ static struct i2c_driver bd2802_i2c_driver = {
        .id_table       = bd2802_id,
 };
 
-static int __init bd2802_init(void)
-{
-       return i2c_add_driver(&bd2802_i2c_driver);
-}
-module_init(bd2802_init);
-
-static void __exit bd2802_exit(void)
-{
-       i2c_del_driver(&bd2802_i2c_driver);
-}
-module_exit(bd2802_exit);
+module_i2c_driver(bd2802_i2c_driver);
 
 MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>");
 MODULE_DESCRIPTION("BD2802 LED driver");
index da5fb016b1a550fabfee5114bb11727a22c01749..6a8725cc7b4dfe119a2083e37e1290198bdbdd24 100644 (file)
@@ -75,9 +75,6 @@ static int __devexit cobalt_qube_led_remove(struct platform_device *pdev)
        return 0;
 }
 
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:cobalt-qube-leds");
-
 static struct platform_driver cobalt_qube_led_driver = {
        .probe  = cobalt_qube_led_probe,
        .remove = __devexit_p(cobalt_qube_led_remove),
@@ -87,19 +84,9 @@ static struct platform_driver cobalt_qube_led_driver = {
        },
 };
 
-static int __init cobalt_qube_led_init(void)
-{
-       return platform_driver_register(&cobalt_qube_led_driver);
-}
-
-static void __exit cobalt_qube_led_exit(void)
-{
-       platform_driver_unregister(&cobalt_qube_led_driver);
-}
-
-module_init(cobalt_qube_led_init);
-module_exit(cobalt_qube_led_exit);
+module_platform_driver(cobalt_qube_led_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Front LED support for Cobalt Server");
 MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_ALIAS("platform:cobalt-qube-leds");
index f28931cf6781049562018d0af5d59e014b2936d2..d9cd73ebd6c44c3e89ff032e2c2155eb036ad972 100644 (file)
@@ -158,17 +158,7 @@ static struct platform_driver da903x_led_driver = {
        .remove         = __devexit_p(da903x_led_remove),
 };
 
-static int __init da903x_led_init(void)
-{
-       return platform_driver_register(&da903x_led_driver);
-}
-module_init(da903x_led_init);
-
-static void __exit da903x_led_exit(void)
-{
-       platform_driver_unregister(&da903x_led_driver);
-}
-module_exit(da903x_led_exit);
+module_platform_driver(da903x_led_driver);
 
 MODULE_DESCRIPTION("LEDs driver for Dialog Semiconductor DA9030/DA9034");
 MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"
index 31cf0d60a9a546052e782d5177b8461c9673d941..d56c14269ff0c7e9c5e40992f689919024cef7bd 100644 (file)
@@ -131,18 +131,7 @@ static struct spi_driver dac124s085_driver = {
        },
 };
 
-static int __init dac124s085_leds_init(void)
-{
-       return spi_register_driver(&dac124s085_driver);
-}
-
-static void __exit dac124s085_leds_exit(void)
-{
-       spi_unregister_driver(&dac124s085_driver);
-}
-
-module_init(dac124s085_leds_init);
-module_exit(dac124s085_leds_exit);
+module_spi_driver(dac124s085_driver);
 
 MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
 MODULE_DESCRIPTION("DAC124S085 LED driver");
index 49aceffaa5b6b76d0fb4b5fc8e14d6b90012f7fa..b9053fa6e2534312495217164c4a5f4e1eeb403d 100644 (file)
@@ -224,20 +224,7 @@ static struct platform_driver fsg_led_driver = {
        },
 };
 
-
-static int __init fsg_led_init(void)
-{
-       return platform_driver_register(&fsg_led_driver);
-}
-
-static void __exit fsg_led_exit(void)
-{
-       platform_driver_unregister(&fsg_led_driver);
-}
-
-
-module_init(fsg_led_init);
-module_exit(fsg_led_exit);
+module_platform_driver(fsg_led_driver);
 
 MODULE_AUTHOR("Rod Whitby <rod@whitby.id.au>");
 MODULE_DESCRIPTION("Freecom FSG-3 LED driver");
index 399a86f2013a145f1945f39077d23805bb40213d..7df74cb97e702e693935ab798598b9967b0d0ae7 100644 (file)
@@ -293,21 +293,9 @@ static struct platform_driver gpio_led_driver = {
        },
 };
 
-MODULE_ALIAS("platform:leds-gpio");
-
-static int __init gpio_led_init(void)
-{
-       return platform_driver_register(&gpio_led_driver);
-}
-
-static void __exit gpio_led_exit(void)
-{
-       platform_driver_unregister(&gpio_led_driver);
-}
-
-module_init(gpio_led_init);
-module_exit(gpio_led_exit);
+module_platform_driver(gpio_led_driver);
 
 MODULE_AUTHOR("Raphael Assenat <raph@8d.com>, Trent Piepho <tpiepho@freescale.com>");
 MODULE_DESCRIPTION("GPIO LED driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-gpio");
index bcfbd3a60eab6b8ee4ae0ecb4cf14efce17a2ac3..366b6055e33063e5461d5fdc2bdac3ba23fff611 100644 (file)
@@ -79,9 +79,6 @@ static int hp6xxled_remove(struct platform_device *pdev)
        return 0;
 }
 
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:hp6xx-led");
-
 static struct platform_driver hp6xxled_driver = {
        .probe          = hp6xxled_probe,
        .remove         = hp6xxled_remove,
@@ -91,19 +88,9 @@ static struct platform_driver hp6xxled_driver = {
        },
 };
 
-static int __init hp6xxled_init(void)
-{
-       return platform_driver_register(&hp6xxled_driver);
-}
-
-static void __exit hp6xxled_exit(void)
-{
-       platform_driver_unregister(&hp6xxled_driver);
-}
-
-module_init(hp6xxled_init);
-module_exit(hp6xxled_exit);
+module_platform_driver(hp6xxled_driver);
 
 MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
 MODULE_DESCRIPTION("HP Jornada 6xx LED driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:hp6xx-led");
index 0630e4f4b2866a8829a1731411dfa19afb7e0bae..45e6878d73741d8359db2069b68bbe1df2589f2e 100644 (file)
@@ -457,18 +457,7 @@ static struct i2c_driver lm3530_i2c_driver = {
        },
 };
 
-static int __init lm3530_init(void)
-{
-       return i2c_add_driver(&lm3530_i2c_driver);
-}
-
-static void __exit lm3530_exit(void)
-{
-       i2c_del_driver(&lm3530_i2c_driver);
-}
-
-module_init(lm3530_init);
-module_exit(lm3530_exit);
+module_i2c_driver(lm3530_i2c_driver);
 
 MODULE_DESCRIPTION("Back Light driver for LM3530");
 MODULE_LICENSE("GPL v2");
index 9010c054615e414fa5c236a8fe2cc4a700a00757..b8f9f0a5d4318d1291e377fe1fd4599d3bc15e13 100644 (file)
@@ -453,18 +453,7 @@ static struct i2c_driver lp3944_driver = {
        .id_table = lp3944_id,
 };
 
-static int __init lp3944_module_init(void)
-{
-       return i2c_add_driver(&lp3944_driver);
-}
-
-static void __exit lp3944_module_exit(void)
-{
-       i2c_del_driver(&lp3944_driver);
-}
-
-module_init(lp3944_module_init);
-module_exit(lp3944_module_exit);
+module_i2c_driver(lp3944_driver);
 
 MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
 MODULE_DESCRIPTION("LP3944 Fun Light Chip");
index cb641f1b33429ab2776cb300194b4e0fd00d3fdb..d62a7982a5e66ad25812e87c86773985c3ffc802 100644 (file)
@@ -797,25 +797,7 @@ static struct i2c_driver lp5521_driver = {
        .id_table       = lp5521_id,
 };
 
-static int __init lp5521_init(void)
-{
-       int ret;
-
-       ret = i2c_add_driver(&lp5521_driver);
-
-       if (ret < 0)
-               printk(KERN_ALERT "Adding lp5521 driver failed\n");
-
-       return ret;
-}
-
-static void __exit lp5521_exit(void)
-{
-       i2c_del_driver(&lp5521_driver);
-}
-
-module_init(lp5521_init);
-module_exit(lp5521_exit);
+module_i2c_driver(lp5521_driver);
 
 MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo");
 MODULE_DESCRIPTION("LP5521 LED engine");
index 5971e309b2342390a1988cd7f58d6e49d5a24301..73e791ae725993e1833f40cc8b0b059a0a55b53e 100644 (file)
@@ -870,8 +870,6 @@ static int __devinit lp5523_init_led(struct lp5523_led *led, struct device *dev,
        return 0;
 }
 
-static struct i2c_driver lp5523_driver;
-
 static int __devinit lp5523_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
@@ -1021,25 +1019,7 @@ static struct i2c_driver lp5523_driver = {
        .id_table       = lp5523_id,
 };
 
-static int __init lp5523_init(void)
-{
-       int ret;
-
-       ret = i2c_add_driver(&lp5523_driver);
-
-       if (ret < 0)
-               printk(KERN_ALERT "Adding lp5523 driver failed\n");
-
-       return ret;
-}
-
-static void __exit lp5523_exit(void)
-{
-       i2c_del_driver(&lp5523_driver);
-}
-
-module_init(lp5523_init);
-module_exit(lp5523_exit);
+module_i2c_driver(lp5523_driver);
 
 MODULE_AUTHOR("Mathias Nyman <mathias.nyman@nokia.com>");
 MODULE_DESCRIPTION("LP5523 LED engine");
index 53f67b8ce55db505bd4ae5127423202e9141bca3..e311a96c4469759ae72eadabfc3f86c8c7795760 100644 (file)
@@ -199,21 +199,9 @@ static struct platform_driver lt3593_led_driver = {
        },
 };
 
-MODULE_ALIAS("platform:leds-lt3593");
-
-static int __init lt3593_led_init(void)
-{
-       return platform_driver_register(&lt3593_led_driver);
-}
-
-static void __exit lt3593_led_exit(void)
-{
-       platform_driver_unregister(&lt3593_led_driver);
-}
-
-module_init(lt3593_led_init);
-module_exit(lt3593_led_exit);
+module_platform_driver(lt3593_led_driver);
 
 MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
 MODULE_DESCRIPTION("LED driver for LT3593 controllers");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-lt3593");
index b3393a9f21398c119ad0440b68cf58953bc002a5..8bc4915415509d6e2456a0d4ea46446bf9188eca 100644 (file)
@@ -275,7 +275,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       if (pdata->num_leds < 1 || pdata->num_leds > MC13783_LED_MAX) {
+       if (pdata->num_leds < 1 || pdata->num_leds > (MC13783_LED_MAX + 1)) {
                dev_err(&pdev->dev, "Invalid led count %d\n", pdata->num_leds);
                return -EINVAL;
        }
@@ -385,17 +385,7 @@ static struct platform_driver mc13783_led_driver = {
        .remove         = __devexit_p(mc13783_led_remove),
 };
 
-static int __init mc13783_led_init(void)
-{
-       return platform_driver_register(&mc13783_led_driver);
-}
-module_init(mc13783_led_init);
-
-static void __exit mc13783_led_exit(void)
-{
-       platform_driver_unregister(&mc13783_led_driver);
-}
-module_exit(mc13783_led_exit);
+module_platform_driver(mc13783_led_driver);
 
 MODULE_DESCRIPTION("LEDs driver for Freescale MC13783 PMIC");
 MODULE_AUTHOR("Philippe Retornaz <philippe.retornaz@epfl.ch>");
index f2e51c13439962391626b682337e912ca2a83bfb..d8433f2d53bc712995b899db7a19e6d16bb091be 100644 (file)
@@ -81,35 +81,23 @@ static int __devinit gpio_ext_init(struct netxbig_gpio_ext *gpio_ext)
 
        /* Configure address GPIOs. */
        for (i = 0; i < gpio_ext->num_addr; i++) {
-               err = gpio_request(gpio_ext->addr[i], "GPIO extension addr");
+               err = gpio_request_one(gpio_ext->addr[i], GPIOF_OUT_INIT_LOW,
+                                      "GPIO extension addr");
                if (err)
                        goto err_free_addr;
-               err = gpio_direction_output(gpio_ext->addr[i], 0);
-               if (err) {
-                       gpio_free(gpio_ext->addr[i]);
-                       goto err_free_addr;
-               }
        }
        /* Configure data GPIOs. */
        for (i = 0; i < gpio_ext->num_data; i++) {
-               err = gpio_request(gpio_ext->data[i], "GPIO extension data");
+               err = gpio_request_one(gpio_ext->data[i], GPIOF_OUT_INIT_LOW,
+                                  "GPIO extension data");
                if (err)
                        goto err_free_data;
-               err = gpio_direction_output(gpio_ext->data[i], 0);
-               if (err) {
-                       gpio_free(gpio_ext->data[i]);
-                       goto err_free_data;
-               }
        }
        /* Configure "enable select" GPIO. */
-       err = gpio_request(gpio_ext->enable, "GPIO extension enable");
+       err = gpio_request_one(gpio_ext->enable, GPIOF_OUT_INIT_LOW,
+                              "GPIO extension enable");
        if (err)
                goto err_free_data;
-       err = gpio_direction_output(gpio_ext->enable, 0);
-       if (err) {
-               gpio_free(gpio_ext->enable);
-               goto err_free_data;
-       }
 
        return 0;
 
@@ -429,21 +417,10 @@ static struct platform_driver netxbig_led_driver = {
                .owner  = THIS_MODULE,
        },
 };
-MODULE_ALIAS("platform:leds-netxbig");
 
-static int __init netxbig_led_init(void)
-{
-       return platform_driver_register(&netxbig_led_driver);
-}
-
-static void __exit netxbig_led_exit(void)
-{
-       platform_driver_unregister(&netxbig_led_driver);
-}
-
-module_init(netxbig_led_init);
-module_exit(netxbig_led_exit);
+module_platform_driver(netxbig_led_driver);
 
 MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
 MODULE_DESCRIPTION("LED driver for LaCie xBig Network boards");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-netxbig");
index 37b7d0cfe5867e91ed0c1aa94039dc4985dc79f7..2f0a14421a7344fa0505f27bea94b23857fba340 100644 (file)
@@ -323,21 +323,10 @@ static struct platform_driver ns2_led_driver = {
                .owner  = THIS_MODULE,
        },
 };
-MODULE_ALIAS("platform:leds-ns2");
-
-static int __init ns2_led_init(void)
-{
-       return platform_driver_register(&ns2_led_driver);
-}
 
-static void __exit ns2_led_exit(void)
-{
-       platform_driver_unregister(&ns2_led_driver);
-}
-
-module_init(ns2_led_init);
-module_exit(ns2_led_exit);
+module_platform_driver(ns2_led_driver);
 
 MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
 MODULE_DESCRIPTION("Network Space v2 LED driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-ns2");
index a2c874623e3521550ab6917be4b56eb3e42b4aaa..ceccab44b5b818404da63b76f74cda552e7eb367 100644 (file)
@@ -489,20 +489,8 @@ static int pca9532_remove(struct i2c_client *client)
        return 0;
 }
 
-static int __init pca9532_init(void)
-{
-       return i2c_add_driver(&pca9532_driver);
-}
-
-static void __exit pca9532_exit(void)
-{
-       i2c_del_driver(&pca9532_driver);
-}
+module_i2c_driver(pca9532_driver);
 
 MODULE_AUTHOR("Riku Voipio");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("PCA 9532 LED dimmer");
-
-module_init(pca9532_init);
-module_exit(pca9532_exit);
-
index 66aa3e8e786f545c4db0e6bd4318a173a792fc1e..dcc3bc3d38db8cac7384c65009965967bc026f24 100644 (file)
@@ -371,18 +371,7 @@ static struct i2c_driver pca955x_driver = {
        .id_table = pca955x_id,
 };
 
-static int __init pca955x_leds_init(void)
-{
-       return i2c_add_driver(&pca955x_driver);
-}
-
-static void __exit pca955x_leds_exit(void)
-{
-       i2c_del_driver(&pca955x_driver);
-}
-
-module_init(pca955x_leds_init);
-module_exit(pca955x_leds_exit);
+module_i2c_driver(pca955x_driver);
 
 MODULE_AUTHOR("Nate Case <ncase@xes-inc.com>");
 MODULE_DESCRIPTION("PCA955x LED driver");
index 666daf77872e56239bbf59a002c180593996ae7a..3ed92f34bd4477108549499e7bbf4959af1a56cd 100644 (file)
@@ -135,18 +135,7 @@ static struct platform_driver led_pwm_driver = {
        },
 };
 
-static int __init led_pwm_init(void)
-{
-       return platform_driver_register(&led_pwm_driver);
-}
-
-static void __exit led_pwm_exit(void)
-{
-       platform_driver_unregister(&led_pwm_driver);
-}
-
-module_init(led_pwm_init);
-module_exit(led_pwm_exit);
+module_platform_driver(led_pwm_driver);
 
 MODULE_AUTHOR("Luotao Fu <l.fu@pengutronix.de>");
 MODULE_DESCRIPTION("PWM LED driver for PXA");
index c3525f37f73d6895c5abfd619af06c7164cf119f..a7815b6cd8567d9dfd97ed9437f348d9cac4ebc8 100644 (file)
@@ -57,21 +57,9 @@ static struct platform_driver rb532_led_driver = {
        },
 };
 
-static int __init rb532_led_init(void)
-{
-       return platform_driver_register(&rb532_led_driver);
-}
-
-static void __exit rb532_led_exit(void)
-{
-       platform_driver_unregister(&rb532_led_driver);
-}
-
-module_init(rb532_led_init);
-module_exit(rb532_led_exit);
-
-MODULE_ALIAS("platform:rb532-led");
+module_platform_driver(rb532_led_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("User LED support for Routerboard532");
 MODULE_AUTHOR("Phil Sutter <n0-1@freewrt.org>");
+MODULE_ALIAS("platform:rb532-led");
index 8497f56f8e461cf70d8d1b8692e04b11e353323c..df7e963bddd304f37bf507fe61ca317066111253 100644 (file)
@@ -229,17 +229,7 @@ static struct platform_driver regulator_led_driver = {
        .remove = __devexit_p(regulator_led_remove),
 };
 
-static int __init regulator_led_init(void)
-{
-       return platform_driver_register(&regulator_led_driver);
-}
-module_init(regulator_led_init);
-
-static void __exit regulator_led_exit(void)
-{
-       platform_driver_unregister(&regulator_led_driver);
-}
-module_exit(regulator_led_exit);
+module_platform_driver(regulator_led_driver);
 
 MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
 MODULE_DESCRIPTION("Regulator driven LED driver");
index 3ee540eb127e7ffadfd0d5c2e471dc14380fa307..32fe337d5c687fbb09c06bd778e70cdc3cc7a223 100644 (file)
@@ -339,18 +339,7 @@ static struct platform_driver r_tpu_device_driver = {
        }
 };
 
-static int __init r_tpu_init(void)
-{
-       return platform_driver_register(&r_tpu_device_driver);
-}
-
-static void __exit r_tpu_exit(void)
-{
-       platform_driver_unregister(&r_tpu_device_driver);
-}
-
-module_init(r_tpu_init);
-module_exit(r_tpu_exit);
+module_platform_driver(r_tpu_device_driver);
 
 MODULE_AUTHOR("Magnus Damm");
 MODULE_DESCRIPTION("Renesas TPU LED Driver");
index 29f8b0f0e2c6f880429aea67bcddee59f52f6b91..bd0a5ed49c42d9473bf1fa2e735500ee8c2f67ad 100644 (file)
@@ -121,18 +121,7 @@ static struct platform_driver s3c24xx_led_driver = {
        },
 };
 
-static int __init s3c24xx_led_init(void)
-{
-       return platform_driver_register(&s3c24xx_led_driver);
-}
-
-static void __exit s3c24xx_led_exit(void)
-{
-       platform_driver_unregister(&s3c24xx_led_driver);
-}
-
-module_init(s3c24xx_led_init);
-module_exit(s3c24xx_led_exit);
+module_platform_driver(s3c24xx_led_driver);
 
 MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
 MODULE_DESCRIPTION("S3C24XX LED driver");
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
new file mode 100644 (file)
index 0000000..133f89f
--- /dev/null
@@ -0,0 +1,779 @@
+/*
+ * leds-tca6507
+ *
+ * The TCA6507 is a programmable LED controller that can drive 7
+ * separate lines either by holding them low, or by pulsing them
+ * with modulated width.
+ * The modulation can be varied in a simple pattern to produce a blink or
+ * double-blink.
+ *
+ * This driver can configure each line either as a 'GPIO' which is out-only
+ * (no pull-up) or as an LED with variable brightness and hardware-assisted
+ * blinking.
+ *
+ * Apart from OFF and ON there are three programmable brightness levels which
+ * can be programmed from 0 to 15 and indicate how many 500usec intervals in
+ * each 8msec that the led is 'on'.  The levels are named MASTER, BANK0 and
+ * BANK1.
+ *
+ * There are two different blink rates that can be programmed, each with
+ * separate time for rise, on, fall, off and second-off.  Thus if 3 or more
+ * different non-trivial rates are required, software must be used for the extra
+ * rates. The two different blink rates must align with the two levels BANK0 and
+ * BANK1.
+ * This driver does not support double-blink so 'second-off' always matches
+ * 'off'.
+ *
+ * Only 16 different times can be programmed in a roughly logarithmic scale from
+ * 64ms to 16320ms.  To be precise the possible times are:
+ *    0, 64, 128, 192, 256, 384, 512, 768,
+ *    1024, 1536, 2048, 3072, 4096, 5760, 8128, 16320
+ *
+ * Times that cannot be closely matched with these must be
+ * handled in software.  This driver allows 12.5% error in matching.
+ *
+ * This driver does not allow rise/fall rates to be set explicitly.  When trying
+ * to match a given 'on' or 'off' period, an appropriate pair of 'change' and
+ * 'hold' times are chosen to get a close match.  If the target delay is even,
+ * the 'change' number will be the smaller; if odd, the 'hold' number will be
+ * the smaller.
+
+ * Choosing pairs of delays with 12.5% errors allows us to match delays in the
+ * ranges: 56-72, 112-144, 168-216, 224-27504, 28560-36720.
+ * 26% of the achievable sums can be matched by multiple pairings. For example
+ * 1536 == 1536+0, 1024+512, or 768+768.  This driver will always choose the
+ * pairing with the least maximum - 768+768 in this case.  Other pairings are
+ * not available.
+ *
+ * Access to the 3 levels and 2 blinks are on a first-come, first-served basis.
+ * Access can be shared by multiple leds if they have the same level and
+ * either same blink rates, or some don't blink.
+ * When a led changes, it relinquishes access and tries again, so it might
+ * lose access to hardware blink.
+ * If a blink engine cannot be allocated, software blink is used.
+ * If the desired brightness cannot be allocated, the closest available non-zero
+ * brightness is used.  As 'full' is always available, the worst case would be
+ * to have two different blink rates at '1', with Max at '2', then other leds
+ * will have to choose between '2' and '16'.  Hopefully this is not likely.
+ *
+ * Each bank (BANK0 and BANK1) has two usage counts - LEDs using the brightness
+ * and LEDs using the blink.  It can only be reprogrammed when the appropriate
+ * counter is zero.  The MASTER level has a single usage count.
+ *
+ * Each Led has programmable 'on' and 'off' time as milliseconds.  With each
+ * there is a flag saying if it was explicitly requested or defaulted.
+ * Similarly the banks know if each time was explicit or a default.  Defaults
+ * are permitted to be changed freely - they are not recognised when matching.
+ *
+ *
+ * An led-tca6507 device must be provided with platform data.  This data
+ * lists for each output: the name, default trigger, and whether the signal
+ * is being used as a GPiO rather than an led.  'struct led_plaform_data'
+ * is used for this.  If 'name' is NULL, the output isn't used.  If 'flags'
+ * is TCA6507_MAKE_CPIO, the output is a GPO.
+ * The "struct led_platform_data" can be embedded in a
+ * "struct tca6507_platform_data" which adds a 'gpio_base' for the GPiOs,
+ * and a 'setup' callback which is called once the GPiOs are available.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/leds-tca6507.h>
+
+/* LED select registers determine the source that drives LED outputs */
+#define TCA6507_LS_LED_OFF     0x0     /* Output HI-Z (off) */
+#define TCA6507_LS_LED_OFF1    0x1     /* Output HI-Z (off) - not used */
+#define TCA6507_LS_LED_PWM0    0x2     /* Output LOW with Bank0 rate */
+#define TCA6507_LS_LED_PWM1    0x3     /* Output LOW with Bank1 rate */
+#define TCA6507_LS_LED_ON      0x4     /* Output LOW (on) */
+#define TCA6507_LS_LED_MIR     0x5     /* Output LOW with Master Intensity */
+#define TCA6507_LS_BLINK0      0x6     /* Blink at Bank0 rate */
+#define TCA6507_LS_BLINK1      0x7     /* Blink at Bank1 rate */
+
+enum {
+       BANK0,
+       BANK1,
+       MASTER,
+};
+static int bank_source[3] = {
+       TCA6507_LS_LED_PWM0,
+       TCA6507_LS_LED_PWM1,
+       TCA6507_LS_LED_MIR,
+};
+static int blink_source[2] = {
+       TCA6507_LS_BLINK0,
+       TCA6507_LS_BLINK1,
+};
+
+/* PWM registers */
+#define        TCA6507_REG_CNT                 11
+
+/*
+ * 0x00, 0x01, 0x02 encode the TCA6507_LS_* values, each output
+ * owns one bit in each register
+ */
+#define        TCA6507_FADE_ON                 0x03
+#define        TCA6507_FULL_ON                 0x04
+#define        TCA6507_FADE_OFF                0x05
+#define        TCA6507_FIRST_OFF               0x06
+#define        TCA6507_SECOND_OFF              0x07
+#define        TCA6507_MAX_INTENSITY           0x08
+#define        TCA6507_MASTER_INTENSITY        0x09
+#define        TCA6507_INITIALIZE              0x0A
+
+#define        INIT_CODE                       0x8
+
+#define TIMECODES 16
+static int time_codes[TIMECODES] = {
+       0, 64, 128, 192, 256, 384, 512, 768,
+       1024, 1536, 2048, 3072, 4096, 5760, 8128, 16320
+};
+
+/* Convert an led.brightness level (0..255) to a TCA6507 level (0..15) */
+static inline int TO_LEVEL(int brightness)
+{
+       return brightness >> 4;
+}
+
+/* ...and convert back */
+static inline int TO_BRIGHT(int level)
+{
+       if (level)
+               return (level << 4) | 0xf;
+       return 0;
+}
+
+#define NUM_LEDS 7
+struct tca6507_chip {
+       int                     reg_set;        /* One bit per register where
+                                                * a '1' means the register
+                                                * should be written */
+       u8                      reg_file[TCA6507_REG_CNT];
+       /* Bank 2 is Master Intensity and doesn't use times */
+       struct bank {
+               int level;
+               int ontime, offtime;
+               int on_dflt, off_dflt;
+               int time_use, level_use;
+       } bank[3];
+       struct i2c_client       *client;
+       struct work_struct      work;
+       spinlock_t              lock;
+
+       struct tca6507_led {
+               struct tca6507_chip     *chip;
+               struct led_classdev     led_cdev;
+               int                     num;
+               int                     ontime, offtime;
+               int                     on_dflt, off_dflt;
+               int                     bank;   /* Bank used, or -1 */
+               int                     blink;  /* Set if hardware-blinking */
+       } leds[NUM_LEDS];
+#ifdef CONFIG_GPIOLIB
+       struct gpio_chip                gpio;
+       const char                      *gpio_name[NUM_LEDS];
+       int                             gpio_map[NUM_LEDS];
+#endif
+};
+
+static const struct i2c_device_id tca6507_id[] = {
+       { "tca6507" },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, tca6507_id);
+
+static int choose_times(int msec, int *c1p, int *c2p)
+{
+       /*
+        * Choose two timecodes which add to 'msec' as near as possible.
+        * The first returned is the 'on' or 'off' time.  The second is to be
+        * used as a 'fade-on' or 'fade-off' time.  If 'msec' is even,
+        * the first will not be smaller than the second.  If 'msec' is odd,
+        * the first will not be larger than the second.
+        * If we cannot get a sum within 1/8 of 'msec' fail with -EINVAL,
+        * otherwise return the sum that was achieved, plus 1 if the first is
+        * smaller.
+        * If two possibilities are equally good (e.g. 512+0, 256+256), choose
+        * the first pair so there is more change-time visible (i.e. it is
+        * softer).
+        */
+       int c1, c2;
+       int tmax = msec * 9 / 8;
+       int tmin = msec * 7 / 8;
+       int diff = 65536;
+
+       /* We start at '1' to ensure we never even think of choosing a
+        * total time of '0'.
+        */
+       for (c1 = 1; c1 < TIMECODES; c1++) {
+               int t = time_codes[c1];
+               if (t*2 < tmin)
+                       continue;
+               if (t > tmax)
+                       break;
+               for (c2 = 0; c2 <= c1; c2++) {
+                       int tt = t + time_codes[c2];
+                       int d;
+                       if (tt < tmin)
+                               continue;
+                       if (tt > tmax)
+                               break;
+                       /* This works! */
+                       d = abs(msec - tt);
+                       if (d >= diff)
+                               continue;
+                       /* Best yet */
+                       *c1p = c1;
+                       *c2p = c2;
+                       diff = d;
+                       if (d == 0)
+                               return msec;
+               }
+       }
+       if (diff < 65536) {
+               int actual;
+               if (msec & 1) {
+                       c1 = *c2p;
+                       *c2p = *c1p;
+                       *c1p = c1;
+               }
+               actual = time_codes[*c1p] + time_codes[*c2p];
+               if (*c1p < *c2p)
+                       return actual + 1;
+               else
+                       return actual;
+       }
+       /* No close match */
+       return -EINVAL;
+}
+
+/*
+ * Update the register file with the appropriate 3-bit state for
+ * the given led.
+ */
+static void set_select(struct tca6507_chip *tca, int led, int val)
+{
+       int mask = (1 << led);
+       int bit;
+
+       for (bit = 0; bit < 3; bit++) {
+               int n = tca->reg_file[bit] & ~mask;
+               if (val & (1 << bit))
+                       n |= mask;
+               if (tca->reg_file[bit] != n) {
+                       tca->reg_file[bit] = n;
+                       tca->reg_set |= (1 << bit);
+               }
+       }
+}
+
+/* Update the register file with the appropriate 4-bit code for
+ * one bank or other.  This can be used for timers, for levels, or
+ * for initialisation.
+ */
+static void set_code(struct tca6507_chip *tca, int reg, int bank, int new)
+{
+       int mask = 0xF;
+       int n;
+       if (bank) {
+               mask <<= 4;
+               new <<= 4;
+       }
+       n = tca->reg_file[reg] & ~mask;
+       n |= new;
+       if (tca->reg_file[reg] != n) {
+               tca->reg_file[reg] = n;
+               tca->reg_set |= 1 << reg;
+       }
+}
+
+/* Update brightness level. */
+static void set_level(struct tca6507_chip *tca, int bank, int level)
+{
+       switch (bank) {
+       case BANK0:
+       case BANK1:
+               set_code(tca, TCA6507_MAX_INTENSITY, bank, level);
+               break;
+       case MASTER:
+               set_code(tca, TCA6507_MASTER_INTENSITY, 0, level);
+               break;
+       }
+       tca->bank[bank].level = level;
+}
+
+/* Record all relevant time code for a given bank */
+static void set_times(struct tca6507_chip *tca, int bank)
+{
+       int c1, c2;
+       int result;
+
+       result = choose_times(tca->bank[bank].ontime, &c1, &c2);
+       dev_dbg(&tca->client->dev,
+               "Chose on  times %d(%d) %d(%d) for %dms\n", c1, time_codes[c1],
+               c2, time_codes[c2], tca->bank[bank].ontime);
+       set_code(tca, TCA6507_FADE_ON, bank, c2);
+       set_code(tca, TCA6507_FULL_ON, bank, c1);
+       tca->bank[bank].ontime = result;
+
+       result = choose_times(tca->bank[bank].offtime, &c1, &c2);
+       dev_dbg(&tca->client->dev,
+               "Chose off times %d(%d) %d(%d) for %dms\n", c1, time_codes[c1],
+               c2, time_codes[c2], tca->bank[bank].offtime);
+       set_code(tca, TCA6507_FADE_OFF, bank, c2);
+       set_code(tca, TCA6507_FIRST_OFF, bank, c1);
+       set_code(tca, TCA6507_SECOND_OFF, bank, c1);
+       tca->bank[bank].offtime = result;
+
+       set_code(tca, TCA6507_INITIALIZE, bank, INIT_CODE);
+}
+
+/* Write all needed register of tca6507 */
+
+static void tca6507_work(struct work_struct *work)
+{
+       struct tca6507_chip *tca = container_of(work, struct tca6507_chip,
+                                               work);
+       struct i2c_client *cl = tca->client;
+       int set;
+       u8 file[TCA6507_REG_CNT];
+       int r;
+
+       spin_lock_irq(&tca->lock);
+       set = tca->reg_set;
+       memcpy(file, tca->reg_file, TCA6507_REG_CNT);
+       tca->reg_set = 0;
+       spin_unlock_irq(&tca->lock);
+
+       for (r = 0; r < TCA6507_REG_CNT; r++)
+               if (set & (1<<r))
+                       i2c_smbus_write_byte_data(cl, r, file[r]);
+}
+
+static void led_release(struct tca6507_led *led)
+{
+       /* If led owns any resource, release it. */
+       struct tca6507_chip *tca = led->chip;
+       if (led->bank >= 0) {
+               struct bank *b = tca->bank + led->bank;
+               if (led->blink)
+                       b->time_use--;
+               b->level_use--;
+       }
+       led->blink = 0;
+       led->bank = -1;
+}
+
+static int led_prepare(struct tca6507_led *led)
+{
+       /* Assign this led to a bank, configuring that bank if necessary. */
+       int level = TO_LEVEL(led->led_cdev.brightness);
+       struct tca6507_chip *tca = led->chip;
+       int c1, c2;
+       int i;
+       struct bank *b;
+       int need_init = 0;
+
+       led->led_cdev.brightness = TO_BRIGHT(level);
+       if (level == 0) {
+               set_select(tca, led->num, TCA6507_LS_LED_OFF);
+               return 0;
+       }
+
+       if (led->ontime == 0 || led->offtime == 0) {
+               /*
+                * Just set the brightness, choosing first usable bank.
+                * If none perfect, choose best.
+                * Count backwards so we check MASTER bank first
+                * to avoid wasting a timer.
+                */
+               int best = -1;/* full-on */
+               int diff = 15-level;
+
+               if (level == 15) {
+                       set_select(tca, led->num, TCA6507_LS_LED_ON);
+                       return 0;
+               }
+
+               for (i = MASTER; i >= BANK0; i--) {
+                       int d;
+                       if (tca->bank[i].level == level ||
+                           tca->bank[i].level_use == 0) {
+                               best = i;
+                               break;
+                       }
+                       d = abs(level - tca->bank[i].level);
+                       if (d < diff) {
+                               diff = d;
+                               best = i;
+                       }
+               }
+               if (best == -1) {
+                       /* Best brightness is full-on */
+                       set_select(tca, led->num, TCA6507_LS_LED_ON);
+                       led->led_cdev.brightness = LED_FULL;
+                       return 0;
+               }
+
+               if (!tca->bank[best].level_use)
+                       set_level(tca, best, level);
+
+               tca->bank[best].level_use++;
+               led->bank = best;
+               set_select(tca, led->num, bank_source[best]);
+               led->led_cdev.brightness = TO_BRIGHT(tca->bank[best].level);
+               return 0;
+       }
+
+       /*
+        * We have on/off time so we need to try to allocate a timing bank.
+        * First check if times are compatible with hardware and give up if
+        * not.
+        */
+       if (choose_times(led->ontime, &c1, &c2) < 0)
+               return -EINVAL;
+       if (choose_times(led->offtime, &c1, &c2) < 0)
+               return -EINVAL;
+
+       for (i = BANK0; i <= BANK1; i++) {
+               if (tca->bank[i].level_use == 0)
+                       /* not in use - it is ours! */
+                       break;
+               if (tca->bank[i].level != level)
+                       /* Incompatible level - skip */
+                       /* FIX: if timer matches we maybe should consider
+                        * this anyway...
+                        */
+                       continue;
+
+               if (tca->bank[i].time_use == 0)
+                       /* Timer not in use, and level matches - use it */
+                       break;
+
+               if (!(tca->bank[i].on_dflt ||
+                     led->on_dflt ||
+                     tca->bank[i].ontime == led->ontime))
+                       /* on time is incompatible */
+                       continue;
+
+               if (!(tca->bank[i].off_dflt ||
+                     led->off_dflt ||
+                     tca->bank[i].offtime == led->offtime))
+                       /* off time is incompatible */
+                       continue;
+
+               /* looks like a suitable match */
+               break;
+       }
+
+       if (i > BANK1)
+               /* Nothing matches - how sad */
+               return -EINVAL;
+
+       b = &tca->bank[i];
+       if (b->level_use == 0)
+               set_level(tca, i, level);
+       b->level_use++;
+       led->bank = i;
+
+       if (b->on_dflt ||
+           !led->on_dflt ||
+           b->time_use == 0) {
+               b->ontime = led->ontime;
+               b->on_dflt = led->on_dflt;
+               need_init = 1;
+       }
+
+       if (b->off_dflt ||
+           !led->off_dflt ||
+           b->time_use == 0) {
+               b->offtime = led->offtime;
+               b->off_dflt = led->off_dflt;
+               need_init = 1;
+       }
+
+       if (need_init)
+               set_times(tca, i);
+
+       led->ontime = b->ontime;
+       led->offtime = b->offtime;
+
+       b->time_use++;
+       led->blink = 1;
+       led->led_cdev.brightness = TO_BRIGHT(b->level);
+       set_select(tca, led->num, blink_source[i]);
+       return 0;
+}
+
+static int led_assign(struct tca6507_led *led)
+{
+       struct tca6507_chip *tca = led->chip;
+       int err;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tca->lock, flags);
+       led_release(led);
+       err = led_prepare(led);
+       if (err) {
+               /*
+                * Can only fail on timer setup.  In that case we need to
+                * re-establish as steady level.
+                */
+               led->ontime = 0;
+               led->offtime = 0;
+               led_prepare(led);
+       }
+       spin_unlock_irqrestore(&tca->lock, flags);
+
+       if (tca->reg_set)
+               schedule_work(&tca->work);
+       return err;
+}
+
+static void tca6507_brightness_set(struct led_classdev *led_cdev,
+                                  enum led_brightness brightness)
+{
+       struct tca6507_led *led = container_of(led_cdev, struct tca6507_led,
+                                              led_cdev);
+       led->led_cdev.brightness = brightness;
+       led->ontime = 0;
+       led->offtime = 0;
+       led_assign(led);
+}
+
+static int tca6507_blink_set(struct led_classdev *led_cdev,
+                            unsigned long *delay_on,
+                            unsigned long *delay_off)
+{
+       struct tca6507_led *led = container_of(led_cdev, struct tca6507_led,
+                                              led_cdev);
+
+       if (*delay_on == 0)
+               led->on_dflt = 1;
+       else if (delay_on != &led_cdev->blink_delay_on)
+               led->on_dflt = 0;
+       led->ontime = *delay_on;
+
+       if (*delay_off == 0)
+               led->off_dflt = 1;
+       else if (delay_off != &led_cdev->blink_delay_off)
+               led->off_dflt = 0;
+       led->offtime = *delay_off;
+
+       if (led->ontime == 0)
+               led->ontime = 512;
+       if (led->offtime == 0)
+               led->offtime = 512;
+
+       if (led->led_cdev.brightness == LED_OFF)
+               led->led_cdev.brightness = LED_FULL;
+       if (led_assign(led) < 0) {
+               led->ontime = 0;
+               led->offtime = 0;
+               led->led_cdev.brightness = LED_OFF;
+               return -EINVAL;
+       }
+       *delay_on = led->ontime;
+       *delay_off = led->offtime;
+       return 0;
+}
+
+#ifdef CONFIG_GPIOLIB
+static void tca6507_gpio_set_value(struct gpio_chip *gc,
+                                  unsigned offset, int val)
+{
+       struct tca6507_chip *tca = container_of(gc, struct tca6507_chip, gpio);
+       unsigned long flags;
+
+       spin_lock_irqsave(&tca->lock, flags);
+       /*
+        * 'OFF' is floating high, and 'ON' is pulled down, so it has the
+        * inverse sense of 'val'.
+        */
+       set_select(tca, tca->gpio_map[offset],
+                  val ? TCA6507_LS_LED_OFF : TCA6507_LS_LED_ON);
+       spin_unlock_irqrestore(&tca->lock, flags);
+       if (tca->reg_set)
+               schedule_work(&tca->work);
+}
+
+static int tca6507_gpio_direction_output(struct gpio_chip *gc,
+                                         unsigned offset, int val)
+{
+       tca6507_gpio_set_value(gc, offset, val);
+       return 0;
+}
+
+static int tca6507_probe_gpios(struct i2c_client *client,
+                              struct tca6507_chip *tca,
+                              struct tca6507_platform_data *pdata)
+{
+       int err;
+       int i = 0;
+       int gpios = 0;
+
+       for (i = 0; i < NUM_LEDS; i++)
+               if (pdata->leds.leds[i].name && pdata->leds.leds[i].flags) {
+                       /* Configure as a gpio */
+                       tca->gpio_name[gpios] = pdata->leds.leds[i].name;
+                       tca->gpio_map[gpios] = i;
+                       gpios++;
+               }
+
+       if (!gpios)
+               return 0;
+
+       tca->gpio.label = "gpio-tca6507";
+       tca->gpio.names = tca->gpio_name;
+       tca->gpio.ngpio = gpios;
+       tca->gpio.base = pdata->gpio_base;
+       tca->gpio.owner = THIS_MODULE;
+       tca->gpio.direction_output = tca6507_gpio_direction_output;
+       tca->gpio.set = tca6507_gpio_set_value;
+       tca->gpio.dev = &client->dev;
+       err = gpiochip_add(&tca->gpio);
+       if (err) {
+               tca->gpio.ngpio = 0;
+               return err;
+       }
+       if (pdata->setup)
+               pdata->setup(tca->gpio.base, tca->gpio.ngpio);
+       return 0;
+}
+
+static void tca6507_remove_gpio(struct tca6507_chip *tca)
+{
+       if (tca->gpio.ngpio) {
+               int err = gpiochip_remove(&tca->gpio);
+               dev_err(&tca->client->dev, "%s failed, %d\n",
+                       "gpiochip_remove()", err);
+       }
+}
+#else /* CONFIG_GPIOLIB */
+static int tca6507_probe_gpios(struct i2c_client *client,
+                              struct tca6507_chip *tca,
+                              struct tca6507_platform_data *pdata)
+{
+       return 0;
+}
+static void tca6507_remove_gpio(struct tca6507_chip *tca)
+{
+}
+#endif /* CONFIG_GPIOLIB */
+
+static int __devinit tca6507_probe(struct i2c_client *client,
+                                  const struct i2c_device_id *id)
+{
+       struct tca6507_chip *tca;
+       struct i2c_adapter *adapter;
+       struct tca6507_platform_data *pdata;
+       int err;
+       int i = 0;
+
+       adapter = to_i2c_adapter(client->dev.parent);
+       pdata = client->dev.platform_data;
+
+       if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+               return -EIO;
+
+       if (!pdata || pdata->leds.num_leds != NUM_LEDS) {
+               dev_err(&client->dev, "Need %d entries in platform-data list\n",
+                       NUM_LEDS);
+               return -ENODEV;
+       }
+       err = -ENOMEM;
+       tca = kzalloc(sizeof(*tca), GFP_KERNEL);
+       if (!tca)
+               goto exit;
+
+       tca->client = client;
+       INIT_WORK(&tca->work, tca6507_work);
+       spin_lock_init(&tca->lock);
+       i2c_set_clientdata(client, tca);
+
+       for (i = 0; i < NUM_LEDS; i++) {
+               struct tca6507_led *l = tca->leds + i;
+
+               l->chip = tca;
+               l->num = i;
+               if (pdata->leds.leds[i].name && !pdata->leds.leds[i].flags) {
+                       l->led_cdev.name = pdata->leds.leds[i].name;
+                       l->led_cdev.default_trigger
+                               = pdata->leds.leds[i].default_trigger;
+                       l->led_cdev.brightness_set = tca6507_brightness_set;
+                       l->led_cdev.blink_set = tca6507_blink_set;
+                       l->bank = -1;
+                       err = led_classdev_register(&client->dev,
+                                                   &l->led_cdev);
+                       if (err < 0)
+                               goto exit;
+               }
+       }
+       err = tca6507_probe_gpios(client, tca, pdata);
+       if (err)
+               goto exit;
+       /* set all registers to known state - zero */
+       tca->reg_set = 0x7f;
+       schedule_work(&tca->work);
+
+       return 0;
+exit:
+       while (i--)
+               if (tca->leds[i].led_cdev.name)
+                       led_classdev_unregister(&tca->leds[i].led_cdev);
+       cancel_work_sync(&tca->work);
+       i2c_set_clientdata(client, NULL);
+       kfree(tca);
+       return err;
+}
+
+static int __devexit tca6507_remove(struct i2c_client *client)
+{
+       int i;
+       struct tca6507_chip *tca = i2c_get_clientdata(client);
+       struct tca6507_led *tca_leds = tca->leds;
+
+       for (i = 0; i < NUM_LEDS; i++) {
+               if (tca_leds[i].led_cdev.name)
+                       led_classdev_unregister(&tca_leds[i].led_cdev);
+       }
+       tca6507_remove_gpio(tca);
+       cancel_work_sync(&tca->work);
+       i2c_set_clientdata(client, NULL);
+       kfree(tca);
+
+       return 0;
+}
+
+static struct i2c_driver tca6507_driver = {
+       .driver   = {
+               .name    = "leds-tca6507",
+               .owner   = THIS_MODULE,
+       },
+       .probe    = tca6507_probe,
+       .remove   = __devexit_p(tca6507_remove),
+       .id_table = tca6507_id,
+};
+
+static int __init tca6507_leds_init(void)
+{
+       return i2c_add_driver(&tca6507_driver);
+}
+
+static void __exit tca6507_leds_exit(void)
+{
+       i2c_del_driver(&tca6507_driver);
+}
+
+module_init(tca6507_leds_init);
+module_exit(tca6507_leds_exit);
+
+MODULE_AUTHOR("NeilBrown <neilb@suse.de>");
+MODULE_DESCRIPTION("TCA6507 LED/GPO driver");
+MODULE_LICENSE("GPL v2");
index b1eb34c3e81f41d5eafdd00ed112373a5f25c970..74a24cf897c386952f47bdb0a9df319808f2d5a6 100644 (file)
@@ -237,7 +237,8 @@ static int wm831x_status_probe(struct platform_device *pdev)
                goto err;
        }
 
-       drvdata = kzalloc(sizeof(struct wm831x_status), GFP_KERNEL);
+       drvdata = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_status),
+                              GFP_KERNEL);
        if (!drvdata)
                return -ENOMEM;
        dev_set_drvdata(&pdev->dev, drvdata);
@@ -300,7 +301,6 @@ static int wm831x_status_probe(struct platform_device *pdev)
 
 err_led:
        led_classdev_unregister(&drvdata->cdev);
-       kfree(drvdata);
 err:
        return ret;
 }
@@ -311,7 +311,6 @@ static int wm831x_status_remove(struct platform_device *pdev)
 
        device_remove_file(drvdata->cdev.dev, &dev_attr_src);
        led_classdev_unregister(&drvdata->cdev);
-       kfree(drvdata);
 
        return 0;
 }
@@ -325,17 +324,7 @@ static struct platform_driver wm831x_status_driver = {
        .remove = wm831x_status_remove,
 };
 
-static int __devinit wm831x_status_init(void)
-{
-       return platform_driver_register(&wm831x_status_driver);
-}
-module_init(wm831x_status_init);
-
-static void wm831x_status_exit(void)
-{
-       platform_driver_unregister(&wm831x_status_driver);
-}
-module_exit(wm831x_status_exit);
+module_platform_driver(wm831x_status_driver);
 
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
 MODULE_DESCRIPTION("WM831x status LED driver");
index 4a1276578352283544ceb6fda2ff176d0772d887..918d4baff1c7257c65276bfcf4cb05b73350553c 100644 (file)
@@ -227,7 +227,7 @@ static int wm8350_led_probe(struct platform_device *pdev)
                goto err_isink;
        }
 
-       led = kzalloc(sizeof(*led), GFP_KERNEL);
+       led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
        if (led == NULL) {
                ret = -ENOMEM;
                goto err_dcdc;
@@ -259,12 +259,10 @@ static int wm8350_led_probe(struct platform_device *pdev)
 
        ret = led_classdev_register(&pdev->dev, &led->cdev);
        if (ret < 0)
-               goto err_led;
+               goto err_dcdc;
 
        return 0;
 
- err_led:
-       kfree(led);
  err_dcdc:
        regulator_put(dcdc);
  err_isink:
@@ -281,7 +279,6 @@ static int wm8350_led_remove(struct platform_device *pdev)
        wm8350_led_disable(led);
        regulator_put(led->dcdc);
        regulator_put(led->isink);
-       kfree(led);
        return 0;
 }
 
@@ -295,17 +292,7 @@ static struct platform_driver wm8350_led_driver = {
        .shutdown = wm8350_led_shutdown,
 };
 
-static int __devinit wm8350_led_init(void)
-{
-       return platform_driver_register(&wm8350_led_driver);
-}
-module_init(wm8350_led_init);
-
-static void wm8350_led_exit(void)
-{
-       platform_driver_unregister(&wm8350_led_driver);
-}
-module_exit(wm8350_led_exit);
+module_platform_driver(wm8350_led_driver);
 
 MODULE_AUTHOR("Mark Brown");
 MODULE_DESCRIPTION("WM8350 LED driver");
index b818cacf420f89e4932ea02b18bf3ad3539ab829..d4c48ef227fb9379ed34ee4d09976cd86836658b 100644 (file)
 #include "isph3a.h"
 #include "isphist.h"
 
-/*
- * this is provided as an interim solution until omap3isp doesn't need
- * any omap-specific iommu API
- */
-#define to_iommu(dev)                                                  \
-       (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))
-
 static unsigned int autoidle;
 module_param(autoidle, int, 0444);
 MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support");
@@ -1114,8 +1107,7 @@ isp_restore_context(struct isp_device *isp, struct isp_reg *reg_list)
 static void isp_save_ctx(struct isp_device *isp)
 {
        isp_save_context(isp, isp_reg_list);
-       if (isp->iommu)
-               omap_iommu_save_ctx(isp->iommu);
+       omap_iommu_save_ctx(isp->dev);
 }
 
 /*
@@ -1128,8 +1120,7 @@ static void isp_save_ctx(struct isp_device *isp)
 static void isp_restore_ctx(struct isp_device *isp)
 {
        isp_restore_context(isp, isp_reg_list);
-       if (isp->iommu)
-               omap_iommu_restore_ctx(isp->iommu);
+       omap_iommu_restore_ctx(isp->dev);
        omap3isp_ccdc_restore_context(isp);
        omap3isp_preview_restore_context(isp);
 }
@@ -1983,7 +1974,7 @@ static int isp_remove(struct platform_device *pdev)
        isp_cleanup_modules(isp);
 
        omap3isp_get(isp);
-       iommu_detach_device(isp->domain, isp->iommu_dev);
+       iommu_detach_device(isp->domain, &pdev->dev);
        iommu_domain_free(isp->domain);
        omap3isp_put(isp);
 
@@ -2131,17 +2122,6 @@ static int isp_probe(struct platform_device *pdev)
                }
        }
 
-       /* IOMMU */
-       isp->iommu_dev = omap_find_iommu_device("isp");
-       if (!isp->iommu_dev) {
-               dev_err(isp->dev, "omap_find_iommu_device failed\n");
-               ret = -ENODEV;
-               goto error_isp;
-       }
-
-       /* to be removed once iommu migration is complete */
-       isp->iommu = to_iommu(isp->iommu_dev);
-
        isp->domain = iommu_domain_alloc(pdev->dev.bus);
        if (!isp->domain) {
                dev_err(isp->dev, "can't alloc iommu domain\n");
@@ -2149,7 +2129,7 @@ static int isp_probe(struct platform_device *pdev)
                goto error_isp;
        }
 
-       ret = iommu_attach_device(isp->domain, isp->iommu_dev);
+       ret = iommu_attach_device(isp->domain, &pdev->dev);
        if (ret) {
                dev_err(&pdev->dev, "can't attach iommu device: %d\n", ret);
                goto free_domain;
@@ -2188,7 +2168,7 @@ error_modules:
 error_irq:
        free_irq(isp->irq_num, isp);
 detach_dev:
-       iommu_detach_device(isp->domain, isp->iommu_dev);
+       iommu_detach_device(isp->domain, &pdev->dev);
 free_domain:
        iommu_domain_free(isp->domain);
 error_isp:
index 705946ef4d6027ca6f2e966149868e7a80b64d0e..d96603eb0d17a7c1ca6d4f363b4c8f32089d7c03 100644 (file)
@@ -212,9 +212,7 @@ struct isp_device {
        unsigned int sbl_resources;
        unsigned int subclk_resources;
 
-       struct omap_iommu *iommu;
        struct iommu_domain *domain;
-       struct device *iommu_dev;
 
        struct isp_platform_callback platform_cb;
 };
index 54a4a3f22e2e4187c5651aaa42cf73dcf9141059..d341ba12593f0bc0a93fc1f66dc57a899b9aa1a7 100644 (file)
@@ -366,7 +366,7 @@ static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc,
                dma_unmap_sg(isp->dev, req->iovm->sgt->sgl,
                             req->iovm->sgt->nents, DMA_TO_DEVICE);
        if (req->table)
-               omap_iommu_vfree(isp->domain, isp->iommu, req->table);
+               omap_iommu_vfree(isp->domain, isp->dev, req->table);
        kfree(req);
 }
 
@@ -438,7 +438,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
 
                req->enable = 1;
 
-               req->table = omap_iommu_vmalloc(isp->domain, isp->iommu, 0,
+               req->table = omap_iommu_vmalloc(isp->domain, isp->dev, 0,
                                        req->config.size, IOMMU_FLAG);
                if (IS_ERR_VALUE(req->table)) {
                        req->table = 0;
@@ -446,7 +446,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
                        goto done;
                }
 
-               req->iovm = omap_find_iovm_area(isp->iommu, req->table);
+               req->iovm = omap_find_iovm_area(isp->dev, req->table);
                if (req->iovm == NULL) {
                        ret = -ENOMEM;
                        goto done;
@@ -462,7 +462,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
                dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl,
                                    req->iovm->sgt->nents, DMA_TO_DEVICE);
 
-               table = omap_da_to_va(isp->iommu, req->table);
+               table = omap_da_to_va(isp->dev, req->table);
                if (copy_from_user(table, config->lsc, req->config.size)) {
                        ret = -EFAULT;
                        goto done;
@@ -734,15 +734,15 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
                         * already done by omap_iommu_vmalloc().
                         */
                        size = ccdc->fpc.fpnum * 4;
-                       table_new = omap_iommu_vmalloc(isp->domain, isp->iommu,
+                       table_new = omap_iommu_vmalloc(isp->domain, isp->dev,
                                                        0, size, IOMMU_FLAG);
                        if (IS_ERR_VALUE(table_new))
                                return -ENOMEM;
 
-                       if (copy_from_user(omap_da_to_va(isp->iommu, table_new),
+                       if (copy_from_user(omap_da_to_va(isp->dev, table_new),
                                           (__force void __user *)
                                           ccdc->fpc.fpcaddr, size)) {
-                               omap_iommu_vfree(isp->domain, isp->iommu,
+                               omap_iommu_vfree(isp->domain, isp->dev,
                                                                table_new);
                                return -EFAULT;
                        }
@@ -753,7 +753,7 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
 
                ccdc_configure_fpc(ccdc);
                if (table_old != 0)
-                       omap_iommu_vfree(isp->domain, isp->iommu, table_old);
+                       omap_iommu_vfree(isp->domain, isp->dev, table_old);
        }
 
        return ccdc_lsc_config(ccdc, ccdc_struct);
@@ -2309,7 +2309,7 @@ void omap3isp_ccdc_cleanup(struct isp_device *isp)
        ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
 
        if (ccdc->fpc.fpcaddr != 0)
-               omap_iommu_vfree(isp->domain, isp->iommu, ccdc->fpc.fpcaddr);
+               omap_iommu_vfree(isp->domain, isp->dev, ccdc->fpc.fpcaddr);
 
        mutex_destroy(&ccdc->ioctl_lock);
 }
index bc0b2c7349b97894d62c6fcdb876eff8613634f7..11871ecc6d25c7257d6f960c97d805146e4bbd94 100644 (file)
@@ -366,7 +366,7 @@ static void isp_stat_bufs_free(struct ispstat *stat)
                                dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl,
                                             buf->iovm->sgt->nents,
                                             DMA_FROM_DEVICE);
-                       omap_iommu_vfree(isp->domain, isp->iommu,
+                       omap_iommu_vfree(isp->domain, isp->dev,
                                                        buf->iommu_addr);
                } else {
                        if (!buf->virt_addr)
@@ -400,7 +400,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
                struct iovm_struct *iovm;
 
                WARN_ON(buf->dma_addr);
-               buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->iommu, 0,
+               buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->dev, 0,
                                                        size, IOMMU_FLAG);
                if (IS_ERR((void *)buf->iommu_addr)) {
                        dev_err(stat->isp->dev,
@@ -410,7 +410,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
                        return -ENOMEM;
                }
 
-               iovm = omap_find_iovm_area(isp->iommu, buf->iommu_addr);
+               iovm = omap_find_iovm_area(isp->dev, buf->iommu_addr);
                if (!iovm ||
                    !dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents,
                                DMA_FROM_DEVICE)) {
@@ -419,7 +419,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
                }
                buf->iovm = iovm;
 
-               buf->virt_addr = omap_da_to_va(stat->isp->iommu,
+               buf->virt_addr = omap_da_to_va(stat->isp->dev,
                                          (u32)buf->iommu_addr);
                buf->empty = 1;
                dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
index f2290578448c416bb809abe4d11d4146da6ead91..bd3aebafafa08c29502c5624623b3cf3c1830b74 100644 (file)
@@ -453,7 +453,7 @@ ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
        sgt->nents = sglen;
        sgt->orig_nents = sglen;
 
-       da = omap_iommu_vmap(isp->domain, isp->iommu, 0, sgt, IOMMU_FLAG);
+       da = omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
        if (IS_ERR_VALUE(da))
                kfree(sgt);
 
@@ -469,7 +469,7 @@ static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
 {
        struct sg_table *sgt;
 
-       sgt = omap_iommu_vunmap(isp->domain, isp->iommu, (u32)da);
+       sgt = omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
        kfree(sgt);
 }
 
index 22027e7946f7bf16c4cba748a065a9b92a78ed51..d9bcfba6b049c2970af1bddb035d8e24c4990f6c 100644 (file)
@@ -583,6 +583,7 @@ typedef struct _MSG_CONFIG_REPLY
 #define MPI_MANUFACTPAGE_DEVID_SAS1066E             (0x005A)
 #define MPI_MANUFACTPAGE_DEVID_SAS1068              (0x0054)
 #define MPI_MANUFACTPAGE_DEVID_SAS1068E             (0x0058)
+#define MPI_MANUFACTPAGE_DEVID_SAS1068_820XELP      (0x0059)
 #define MPI_MANUFACTPAGE_DEVID_SAS1078              (0x0062)
 
 
index e9c6a6047a00cfcf5c28306226e62ef148a35531..a7dc4672d996c5e67bb112371e92bfd634f2d1c7 100644 (file)
@@ -115,7 +115,8 @@ module_param(mpt_fwfault_debug, int, 0600);
 MODULE_PARM_DESC(mpt_fwfault_debug,
                 "Enable detection of Firmware fault and halt Firmware on fault - (default=0)");
 
-static char    MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS][50];
+static char    MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS]
+                               [MPT_MAX_CALLBACKNAME_LEN+1];
 
 #ifdef MFCNT
 static int mfcounter = 0;
@@ -717,8 +718,8 @@ mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass, char *func_name)
                        MptDriverClass[cb_idx] = dclass;
                        MptEvHandlers[cb_idx] = NULL;
                        last_drv_idx = cb_idx;
-                       memcpy(MptCallbacksName[cb_idx], func_name,
-                           strlen(func_name) > 50 ? 50 : strlen(func_name));
+                       strlcpy(MptCallbacksName[cb_idx], func_name,
+                               MPT_MAX_CALLBACKNAME_LEN+1);
                        break;
                }
        }
index b4d24dc081ae5de35c3d4d0c3765ddf63235b517..76c05bc24cb7ab27b9787bc50cc043681077a8da 100644 (file)
@@ -89,6 +89,7 @@
  */
 #define MPT_MAX_ADAPTERS               18
 #define MPT_MAX_PROTOCOL_DRIVERS       16
+#define MPT_MAX_CALLBACKNAME_LEN       49
 #define MPT_MAX_BUS                    1       /* Do not change */
 #define MPT_MAX_FC_DEVICES             255
 #define MPT_MAX_SCSI_DEVICES           16
index 9d9504298549d147133e107a1d66c1a74440c3eb..551262e4b96e0829bc4f64c15357f9862f006c76 100644 (file)
@@ -5376,6 +5376,8 @@ static struct pci_device_id mptsas_pci_table[] = {
                PCI_ANY_ID, PCI_ANY_ID },
        { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1078,
                PCI_ANY_ID, PCI_ANY_ID },
+       { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068_820XELP,
+               PCI_ANY_ID, PCI_ANY_ID },
        {0}     /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(pci, mptsas_pci_table);
index 318a869286ab09581e761a391e57539faa7b5c77..1be621841400f9c2221f433e0f5b13e02619cc40 100644 (file)
@@ -140,6 +140,14 @@ config MTD_AR7_PARTS
        ---help---
          TI AR7 partitioning support
 
+config MTD_BCM63XX_PARTS
+       tristate "BCM63XX CFE partitioning support"
+       depends on BCM63XX
+       select CRC32
+       help
+         This provides partions parsing for BCM63xx devices with CFE
+         bootloaders.
+
 comment "User Modules And Translation Layers"
 
 config MTD_CHAR
index 9aaac3ac89f3f55e64adb4d72f1e94c8c3967e16..f90135429dc7568e6446d9ac350837ed2ba648fc 100644 (file)
@@ -11,6 +11,7 @@ obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
 obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
 obj-$(CONFIG_MTD_AFS_PARTS)    += afs.o
 obj-$(CONFIG_MTD_AR7_PARTS)    += ar7part.o
+obj-$(CONFIG_MTD_BCM63XX_PARTS)        += bcm63xxpart.o
 
 # 'Users' - code which presents functionality to userspace.
 obj-$(CONFIG_MTD_CHAR)         += mtdchar.o
index 89a02f6f65dc1ae4c2752e6d941602c6a7295709..5a3942bf109cd9ccded20d42fc4ee62b36a3ad8e 100644 (file)
@@ -75,7 +75,7 @@ afs_read_footer(struct mtd_info *mtd, u_int *img_start, u_int *iis_start,
        size_t sz;
        int ret;
 
-       ret = mtd->read(mtd, ptr, sizeof(fs), &sz, (u_char *) &fs);
+       ret = mtd_read(mtd, ptr, sizeof(fs), &sz, (u_char *)&fs);
        if (ret >= 0 && sz != sizeof(fs))
                ret = -EINVAL;
 
@@ -132,7 +132,7 @@ afs_read_iis(struct mtd_info *mtd, struct image_info_struct *iis, u_int ptr)
        int ret, i;
 
        memset(iis, 0, sizeof(*iis));
-       ret = mtd->read(mtd, ptr, sizeof(*iis), &sz, (u_char *) iis);
+       ret = mtd_read(mtd, ptr, sizeof(*iis), &sz, (u_char *)iis);
        if (ret < 0)
                goto failed;
 
index f40ea4547554d93def59fb55f1aac233fb12528f..945393129952d1a0021d65d53eb7374751c30bda 100644 (file)
@@ -73,8 +73,8 @@ static int create_mtd_partitions(struct mtd_info *master,
 
        do { /* Try 10 blocks starting from master->erasesize */
                offset = pre_size;
-               master->read(master, offset,
-                            sizeof(header), &len, (uint8_t *)&header);
+               mtd_read(master, offset, sizeof(header), &len,
+                        (uint8_t *)&header);
                if (!strncmp((char *)&header, "TIENV0.8", 8))
                        ar7_parts[1].offset = pre_size;
                if (header.checksum == LOADER_MAGIC1)
@@ -95,16 +95,16 @@ static int create_mtd_partitions(struct mtd_info *master,
        case LOADER_MAGIC1:
                while (header.length) {
                        offset += sizeof(header) + header.length;
-                       master->read(master, offset, sizeof(header),
-                                    &len, (uint8_t *)&header);
+                       mtd_read(master, offset, sizeof(header), &len,
+                                (uint8_t *)&header);
                }
                root_offset = offset + sizeof(header) + 4;
                break;
        case LOADER_MAGIC2:
                while (header.length) {
                        offset += sizeof(header) + header.length;
-                       master->read(master, offset, sizeof(header),
-                                    &len, (uint8_t *)&header);
+                       mtd_read(master, offset, sizeof(header), &len,
+                                (uint8_t *)&header);
                }
                root_offset = offset + sizeof(header) + 4 + 0xff;
                root_offset &= ~(uint32_t)0xff;
@@ -114,8 +114,7 @@ static int create_mtd_partitions(struct mtd_info *master,
                break;
        }
 
-       master->read(master, root_offset,
-               sizeof(header), &len, (u8 *)&header);
+       mtd_read(master, root_offset, sizeof(header), &len, (u8 *)&header);
        if (header.checksum != SQUASHFS_MAGIC) {
                root_offset += master->erasesize - 1;
                root_offset &= ~(master->erasesize - 1);
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
new file mode 100644 (file)
index 0000000..608321e
--- /dev/null
@@ -0,0 +1,222 @@
+/*
+ * BCM63XX CFE image tag parser
+ *
+ * Copyright Â© 2006-2008  Florian Fainelli <florian@openwrt.org>
+ *                       Mike Albon <malbon@openwrt.org>
+ * Copyright Â© 2009-2010  Daniel Dickinson <openwrt@cshore.neomailbox.net>
+ * Copyright Â© 2011 Jonas Gorski <jonas.gorski@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/crc32.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/mach-bcm63xx/bcm963xx_tag.h>
+#include <asm/mach-bcm63xx/board_bcm963xx.h>
+
+#define BCM63XX_EXTENDED_SIZE  0xBFC00000      /* Extended flash address */
+
+#define BCM63XX_MIN_CFE_SIZE   0x10000         /* always at least 64KiB */
+#define BCM63XX_MIN_NVRAM_SIZE 0x10000         /* always at least 64KiB */
+
+#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0
+
+static int bcm63xx_detect_cfe(struct mtd_info *master)
+{
+       char buf[9];
+       int ret;
+       size_t retlen;
+
+       ret = mtd_read(master, BCM963XX_CFE_VERSION_OFFSET, 5, &retlen,
+                      (void *)buf);
+       buf[retlen] = 0;
+
+       if (ret)
+               return ret;
+
+       if (strncmp("cfe-v", buf, 5) == 0)
+               return 0;
+
+       /* very old CFE's do not have the cfe-v string, so check for magic */
+       ret = mtd_read(master, BCM63XX_CFE_MAGIC_OFFSET, 8, &retlen,
+                      (void *)buf);
+       buf[retlen] = 0;
+
+       return strncmp("CFE1CFE1", buf, 8);
+}
+
+static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
+                                       struct mtd_partition **pparts,
+                                       struct mtd_part_parser_data *data)
+{
+       /* CFE, NVRAM and global Linux are always present */
+       int nrparts = 3, curpart = 0;
+       struct bcm_tag *buf;
+       struct mtd_partition *parts;
+       int ret;
+       size_t retlen;
+       unsigned int rootfsaddr, kerneladdr, spareaddr;
+       unsigned int rootfslen, kernellen, sparelen, totallen;
+       unsigned int cfelen, nvramlen;
+       int namelen = 0;
+       int i;
+       u32 computed_crc;
+
+       if (bcm63xx_detect_cfe(master))
+               return -EINVAL;
+
+       cfelen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_CFE_SIZE);
+       nvramlen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_NVRAM_SIZE);
+
+       /* Allocate memory for buffer */
+       buf = vmalloc(sizeof(struct bcm_tag));
+       if (!buf)
+               return -ENOMEM;
+
+       /* Get the tag */
+       ret = mtd_read(master, cfelen, sizeof(struct bcm_tag), &retlen,
+                      (void *)buf);
+
+       if (retlen != sizeof(struct bcm_tag)) {
+               vfree(buf);
+               return -EIO;
+       }
+
+       computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
+                               offsetof(struct bcm_tag, header_crc));
+       if (computed_crc == buf->header_crc) {
+               char *boardid = &(buf->board_id[0]);
+               char *tagversion = &(buf->tag_version[0]);
+
+               sscanf(buf->kernel_address, "%u", &kerneladdr);
+               sscanf(buf->kernel_length, "%u", &kernellen);
+               sscanf(buf->total_length, "%u", &totallen);
+
+               pr_info("CFE boot tag found with version %s and board type %s\n",
+                       tagversion, boardid);
+
+               kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
+               rootfsaddr = kerneladdr + kernellen;
+               spareaddr = roundup(totallen, master->erasesize) + cfelen;
+               sparelen = master->size - spareaddr - nvramlen;
+               rootfslen = spareaddr - rootfsaddr;
+       } else {
+               pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n",
+                       buf->header_crc, computed_crc);
+               kernellen = 0;
+               rootfslen = 0;
+               rootfsaddr = 0;
+               spareaddr = cfelen;
+               sparelen = master->size - cfelen - nvramlen;
+       }
+
+       /* Determine number of partitions */
+       namelen = 8;
+       if (rootfslen > 0) {
+               nrparts++;
+               namelen += 6;
+       }
+       if (kernellen > 0) {
+               nrparts++;
+               namelen += 6;
+       }
+
+       /* Ask kernel for more memory */
+       parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
+       if (!parts) {
+               vfree(buf);
+               return -ENOMEM;
+       }
+
+       /* Start building partition list */
+       parts[curpart].name = "CFE";
+       parts[curpart].offset = 0;
+       parts[curpart].size = cfelen;
+       curpart++;
+
+       if (kernellen > 0) {
+               parts[curpart].name = "kernel";
+               parts[curpart].offset = kerneladdr;
+               parts[curpart].size = kernellen;
+               curpart++;
+       }
+
+       if (rootfslen > 0) {
+               parts[curpart].name = "rootfs";
+               parts[curpart].offset = rootfsaddr;
+               parts[curpart].size = rootfslen;
+               if (sparelen > 0)
+                       parts[curpart].size += sparelen;
+               curpart++;
+       }
+
+       parts[curpart].name = "nvram";
+       parts[curpart].offset = master->size - nvramlen;
+       parts[curpart].size = nvramlen;
+
+       /* Global partition "linux" to make easy firmware upgrade */
+       curpart++;
+       parts[curpart].name = "linux";
+       parts[curpart].offset = cfelen;
+       parts[curpart].size = master->size - cfelen - nvramlen;
+
+       for (i = 0; i < nrparts; i++)
+               pr_info("Partition %d is %s offset %lx and length %lx\n", i,
+                       parts[i].name, (long unsigned int)(parts[i].offset),
+                       (long unsigned int)(parts[i].size));
+
+       pr_info("Spare partition is offset %x and length %x\n", spareaddr,
+               sparelen);
+
+       *pparts = parts;
+       vfree(buf);
+
+       return nrparts;
+};
+
+static struct mtd_part_parser bcm63xx_cfe_parser = {
+       .owner = THIS_MODULE,
+       .parse_fn = bcm63xx_parse_cfe_partitions,
+       .name = "bcm63xxpart",
+};
+
+static int __init bcm63xx_cfe_parser_init(void)
+{
+       return register_mtd_parser(&bcm63xx_cfe_parser);
+}
+
+static void __exit bcm63xx_cfe_parser_exit(void)
+{
+       deregister_mtd_parser(&bcm63xx_cfe_parser);
+}
+
+module_init(bcm63xx_cfe_parser_init);
+module_exit(bcm63xx_cfe_parser_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
+MODULE_AUTHOR("Jonas Gorski <jonas.gorski@gmail.com");
+MODULE_DESCRIPTION("MTD partitioning for BCM63XX CFE bootloaders");
index 179814a95f3ac73bf0cfd6110a201f651a8c0647..85e80180b65b5131201d974e6946fdc658c2463b 100644 (file)
@@ -139,8 +139,9 @@ struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
                }
 
                /* Do some byteswapping if necessary */
-               extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
-               extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
+               extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
+               extp->BlkStatusRegMask = cfi32_to_cpu(map,
+                                               extp->BlkStatusRegMask);
 
 #ifdef DEBUG_CFI_FEATURES
                /* Tell the user about it in lots of lovely detail */
@@ -698,7 +699,8 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
                                continue;
                        }
                        memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
-                       ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
+                       ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
+                                       buffer);
                        totlen += thislen;
                        if (ret || thislen != ECCBUF_SIZE)
                                goto write_error;
@@ -707,7 +709,8 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
                        to += ECCBUF_SIZE;
                }
                if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
-                       ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
+                       ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
+                                       &thislen, elem_base);
                        totlen += thislen;
                        if (ret || thislen != ECCBUF_DIV(elem_len))
                                goto write_error;
@@ -721,7 +724,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
        }
        if (buflen) { /* flush last page, even if not full */
                /* This is sometimes intended behaviour, really */
-               ret = mtd->write(mtd, to, buflen, &thislen, buffer);
+               ret = mtd_write(mtd, to, buflen, &thislen, buffer);
                totlen += thislen;
                if (ret || thislen != ECCBUF_SIZE)
                        goto write_error;
index 283d887f78251168f40255fe80b788f6fba0b0fc..37b05c3f2792a2bbc73521c52f4e69f8b45991cb 100644 (file)
@@ -191,6 +191,7 @@ comment "Disk-On-Chip Device Drivers"
 
 config MTD_DOC2000
        tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)"
+       depends on MTD_NAND
        select MTD_DOCPROBE
        select MTD_NAND_IDS
        ---help---
@@ -213,6 +214,7 @@ config MTD_DOC2000
 
 config MTD_DOC2001
        tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)"
+       depends on MTD_NAND
        select MTD_DOCPROBE
        select MTD_NAND_IDS
        ---help---
@@ -234,6 +236,7 @@ config MTD_DOC2001
 
 config MTD_DOC2001PLUS
        tristate "M-Systems Disk-On-Chip Millennium Plus"
+       depends on MTD_NAND
        select MTD_DOCPROBE
        select MTD_NAND_IDS
        ---help---
@@ -251,6 +254,8 @@ config MTD_DOC2001PLUS
 
 config MTD_DOCG3
        tristate "M-Systems Disk-On-Chip G3"
+       select BCH
+       select BCH_CONST_PARAMS
        ---help---
          This provides an MTD device driver for the M-Systems DiskOnChip
          G3 devices.
@@ -259,6 +264,13 @@ config MTD_DOCG3
          M-Systems and now Sandisk. The support is very experimental,
          and doesn't give access to any write operations.
 
+if MTD_DOCG3
+config BCH_CONST_M
+       default 14
+config BCH_CONST_T
+       default 4
+endif
+
 config MTD_DOCPROBE
        tristate
        select MTD_DOCECC
index ebeabc727f70319b35ca2c1ab80b4aeee283c081..e7e46d1e74631b6f31cbde50a0590077d8ee0da6 100644 (file)
@@ -287,7 +287,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
        dev->mtd.flags = MTD_CAP_RAM;
        dev->mtd.erase = block2mtd_erase;
        dev->mtd.write = block2mtd_write;
-       dev->mtd.writev = default_mtd_writev;
+       dev->mtd.writev = mtd_writev;
        dev->mtd.sync = block2mtd_sync;
        dev->mtd.read = block2mtd_read;
        dev->mtd.priv = dev;
index e9fad915121924e636c32480b7b699bd6f76379f..b1cdf647901960fd88ef2bce4a787c21ace8a03d 100644 (file)
@@ -562,23 +562,14 @@ void DoC2k_init(struct mtd_info *mtd)
 
        mtd->type = MTD_NANDFLASH;
        mtd->flags = MTD_CAP_NANDFLASH;
-       mtd->size = 0;
-       mtd->erasesize = 0;
        mtd->writesize = 512;
        mtd->oobsize = 16;
        mtd->owner = THIS_MODULE;
        mtd->erase = doc_erase;
-       mtd->point = NULL;
-       mtd->unpoint = NULL;
        mtd->read = doc_read;
        mtd->write = doc_write;
        mtd->read_oob = doc_read_oob;
        mtd->write_oob = doc_write_oob;
-       mtd->sync = NULL;
-
-       this->totlen = 0;
-       this->numchips = 0;
-
        this->curfloor = -1;
        this->curchip = -1;
        mutex_init(&this->lock);
index a3f7a27499be86a711b4c57018b8e8b958671969..7543b98f46c46c1524a5f5aeec26b4c567e07d49 100644 (file)
@@ -343,25 +343,17 @@ void DoCMil_init(struct mtd_info *mtd)
 
        mtd->type = MTD_NANDFLASH;
        mtd->flags = MTD_CAP_NANDFLASH;
-       mtd->size = 0;
 
        /* FIXME: erase size is not always 8KiB */
        mtd->erasesize = 0x2000;
-
        mtd->writesize = 512;
        mtd->oobsize = 16;
        mtd->owner = THIS_MODULE;
        mtd->erase = doc_erase;
-       mtd->point = NULL;
-       mtd->unpoint = NULL;
        mtd->read = doc_read;
        mtd->write = doc_write;
        mtd->read_oob = doc_read_oob;
        mtd->write_oob = doc_write_oob;
-       mtd->sync = NULL;
-
-       this->totlen = 0;
-       this->numchips = 0;
        this->curfloor = -1;
        this->curchip = -1;
 
index 99351bc3e0ed32207ecf294ff03bad1b322ea74c..177510d0e7ee0c35a23450e6f66b7fdc944c5a6c 100644 (file)
@@ -467,23 +467,14 @@ void DoCMilPlus_init(struct mtd_info *mtd)
 
        mtd->type = MTD_NANDFLASH;
        mtd->flags = MTD_CAP_NANDFLASH;
-       mtd->size = 0;
-
-       mtd->erasesize = 0;
        mtd->writesize = 512;
        mtd->oobsize = 16;
        mtd->owner = THIS_MODULE;
        mtd->erase = doc_erase;
-       mtd->point = NULL;
-       mtd->unpoint = NULL;
        mtd->read = doc_read;
        mtd->write = doc_write;
        mtd->read_oob = doc_read_oob;
        mtd->write_oob = doc_write_oob;
-       mtd->sync = NULL;
-
-       this->totlen = 0;
-       this->numchips = 0;
        this->curfloor = -1;
        this->curchip = -1;
 
index bdcf5df982e829d99e45a97f263a68d478cf657a..ad11ef0a81f401dc0f2b4e2440f5d27af10e4708 100644 (file)
@@ -29,6 +29,9 @@
 #include <linux/delay.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
+#include <linux/bitmap.h>
+#include <linux/bitrev.h>
+#include <linux/bch.h>
 
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
  *
  * As no specification is available from M-Systems/Sandisk, this drivers lacks
  * several functions available on the chip, as :
- *  - block erase
- *  - page write
  *  - IPL write
- *  - ECC fixing (lack of BCH algorith understanding)
- *  - powerdown / powerup
  *
  * The bus data width (8bits versus 16bits) is not handled (if_cfg flag), and
  * the driver assumes a 16bits data bus.
@@ -53,8 +52,7 @@
  * DocG3 relies on 2 ECC algorithms, which are handled in hardware :
  *  - a 1 byte Hamming code stored in the OOB for each page
  *  - a 7 bytes BCH code stored in the OOB for each page
- * The BCH part is only used for check purpose, no correction is available as
- * some information is missing. What is known is that :
+ * The BCH ECC is :
  *  - BCH is in GF(2^14)
  *  - BCH is over data of 520 bytes (512 page + 7 page_info bytes
  *                                   + 1 hamming byte)
  *
  */
 
+static unsigned int reliable_mode;
+module_param(reliable_mode, uint, 0);
+MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, "
+                "2=reliable) : MLC normal operations are in normal mode");
+
+/**
+ * struct docg3_oobinfo - DiskOnChip G3 OOB layout
+ * @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC)
+ * @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC)
+ * @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15
+ * @oobavail: 8 available bytes remaining after ECC toll
+ */
+static struct nand_ecclayout docg3_oobinfo = {
+       .eccbytes = 8,
+       .eccpos = {7, 8, 9, 10, 11, 12, 13, 14},
+       .oobfree = {{0, 7}, {15, 1} },
+       .oobavail = 8,
+};
+
+/**
+ * struct docg3_bch - BCH engine
+ */
+static struct bch_control *docg3_bch;
+
 static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
 {
        u8 val = readb(docg3->base + reg);
@@ -82,7 +104,7 @@ static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
 static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
 {
        writeb(val, docg3->base + reg);
-       trace_docg3_io(1, 16, reg, val);
+       trace_docg3_io(1, 8, reg, val);
 }
 
 static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
@@ -143,7 +165,7 @@ static void doc_delay(struct docg3 *docg3, int nbNOPs)
 {
        int i;
 
-       doc_dbg("NOP x %d\n", nbNOPs);
+       doc_vdbg("NOP x %d\n", nbNOPs);
        for (i = 0; i < nbNOPs; i++)
                doc_writeb(docg3, 0, DOC_NOP);
 }
@@ -196,8 +218,8 @@ static int doc_reset_seq(struct docg3 *docg3)
 /**
  * doc_read_data_area - Read data from data area
  * @docg3: the device
- * @buf: the buffer to fill in
- * @len: the lenght to read
+ * @buf: the buffer to fill in (might be NULL is dummy reads)
+ * @len: the length to read
  * @first: first time read, DOC_READADDRESS should be set
  *
  * Reads bytes from flash data. Handles the single byte / even bytes reads.
@@ -218,8 +240,10 @@ static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
        dst16 = buf;
        for (i = 0; i < len4; i += 2) {
                data16 = doc_readw(docg3, DOC_IOSPACE_DATA);
-               *dst16 = data16;
-               dst16++;
+               if (dst16) {
+                       *dst16 = data16;
+                       dst16++;
+               }
        }
 
        if (cdr) {
@@ -229,26 +253,84 @@ static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
                dst8 = (u8 *)dst16;
                for (i = 0; i < cdr; i++) {
                        data8 = doc_readb(docg3, DOC_IOSPACE_DATA);
-                       *dst8 = data8;
-                       dst8++;
+                       if (dst8) {
+                               *dst8 = data8;
+                               dst8++;
+                       }
                }
        }
 }
 
 /**
- * doc_set_data_mode - Sets the flash to reliable data mode
+ * doc_write_data_area - Write data into data area
+ * @docg3: the device
+ * @buf: the buffer to get input bytes from
+ * @len: the length to write
+ *
+ * Writes bytes into flash data. Handles the single byte / even bytes writes.
+ */
+static void doc_write_data_area(struct docg3 *docg3, const void *buf, int len)
+{
+       int i, cdr, len4;
+       u16 *src16;
+       u8 *src8;
+
+       doc_dbg("doc_write_data_area(buf=%p, len=%d)\n", buf, len);
+       cdr = len & 0x3;
+       len4 = len - cdr;
+
+       doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
+       src16 = (u16 *)buf;
+       for (i = 0; i < len4; i += 2) {
+               doc_writew(docg3, *src16, DOC_IOSPACE_DATA);
+               src16++;
+       }
+
+       src8 = (u8 *)src16;
+       for (i = 0; i < cdr; i++) {
+               doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
+                          DOC_READADDRESS);
+               doc_writeb(docg3, *src8, DOC_IOSPACE_DATA);
+               src8++;
+       }
+}
+
+/**
+ * doc_set_data_mode - Sets the flash to normal or reliable data mode
  * @docg3: the device
  *
  * The reliable data mode is a bit slower than the fast mode, but less errors
  * occur.  Entering the reliable mode cannot be done without entering the fast
  * mode first.
+ *
+ * In reliable mode, pages 2*n and 2*n+1 are clones. Writing to page 0 of blocks
+ * (4,5) make the hardware write also to page 1 of blocks blocks(4,5). Reading
+ * from page 0 of blocks (4,5) or from page 1 of blocks (4,5) gives the same
+ * result, which is a logical and between bytes from page 0 and page 1 (which is
+ * consistent with the fact that writing to a page is _clearing_ bits of that
+ * page).
  */
 static void doc_set_reliable_mode(struct docg3 *docg3)
 {
-       doc_dbg("doc_set_reliable_mode()\n");
-       doc_flash_sequence(docg3, DOC_SEQ_SET_MODE);
-       doc_flash_command(docg3, DOC_CMD_FAST_MODE);
-       doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
+       static char *strmode[] = { "normal", "fast", "reliable", "invalid" };
+
+       doc_dbg("doc_set_reliable_mode(%s)\n", strmode[docg3->reliable]);
+       switch (docg3->reliable) {
+       case 0:
+               break;
+       case 1:
+               doc_flash_sequence(docg3, DOC_SEQ_SET_FASTMODE);
+               doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+               break;
+       case 2:
+               doc_flash_sequence(docg3, DOC_SEQ_SET_RELIABLEMODE);
+               doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+               doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
+               break;
+       default:
+               doc_err("doc_set_reliable_mode(): invalid mode\n");
+               break;
+       }
        doc_delay(docg3, 2);
 }
 
@@ -324,6 +406,37 @@ static int doc_set_extra_page_mode(struct docg3 *docg3)
                return 0;
 }
 
+/**
+ * doc_setup_addr_sector - Setup blocks/page/ofs address for one plane
+ * @docg3: the device
+ * @sector: the sector
+ */
+static void doc_setup_addr_sector(struct docg3 *docg3, int sector)
+{
+       doc_delay(docg3, 1);
+       doc_flash_address(docg3, sector & 0xff);
+       doc_flash_address(docg3, (sector >> 8) & 0xff);
+       doc_flash_address(docg3, (sector >> 16) & 0xff);
+       doc_delay(docg3, 1);
+}
+
+/**
+ * doc_setup_writeaddr_sector - Setup blocks/page/ofs address for one plane
+ * @docg3: the device
+ * @sector: the sector
+ * @ofs: the offset in the page, between 0 and (512 + 16 + 512)
+ */
+static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs)
+{
+       ofs = ofs >> 2;
+       doc_delay(docg3, 1);
+       doc_flash_address(docg3, ofs & 0xff);
+       doc_flash_address(docg3, sector & 0xff);
+       doc_flash_address(docg3, (sector >> 8) & 0xff);
+       doc_flash_address(docg3, (sector >> 16) & 0xff);
+       doc_delay(docg3, 1);
+}
+
 /**
  * doc_seek - Set both flash planes to the specified block, page for reading
  * @docg3: the device
@@ -360,34 +473,80 @@ static int doc_read_seek(struct docg3 *docg3, int block0, int block1, int page,
        if (ret)
                goto out;
 
-       sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
        doc_flash_sequence(docg3, DOC_SEQ_READ);
+       sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
        doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
-       doc_delay(docg3, 1);
-       doc_flash_address(docg3, sector & 0xff);
-       doc_flash_address(docg3, (sector >> 8) & 0xff);
-       doc_flash_address(docg3, (sector >> 16) & 0xff);
-       doc_delay(docg3, 1);
+       doc_setup_addr_sector(docg3, sector);
 
        sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
        doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+       doc_setup_addr_sector(docg3, sector);
        doc_delay(docg3, 1);
-       doc_flash_address(docg3, sector & 0xff);
-       doc_flash_address(docg3, (sector >> 8) & 0xff);
-       doc_flash_address(docg3, (sector >> 16) & 0xff);
+
+out:
+       return ret;
+}
+
+/**
+ * doc_write_seek - Set both flash planes to the specified block, page for writing
+ * @docg3: the device
+ * @block0: the first plane block index
+ * @block1: the second plane block index
+ * @page: the page index within the block
+ * @ofs: offset in page to write
+ *
+ * Programs the flash even and odd planes to the specific block and page.
+ * Alternatively, programs the flash to the wear area of the specified page.
+ */
+static int doc_write_seek(struct docg3 *docg3, int block0, int block1, int page,
+                        int ofs)
+{
+       int ret = 0, sector;
+
+       doc_dbg("doc_write_seek(blocks=(%d,%d), page=%d, ofs=%d)\n",
+               block0, block1, page, ofs);
+
+       doc_set_reliable_mode(docg3);
+
+       if (ofs < 2 * DOC_LAYOUT_PAGE_SIZE) {
+               doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
+               doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
+               doc_delay(docg3, 2);
+       } else {
+               doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
+               doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
+               doc_delay(docg3, 2);
+       }
+
+       doc_flash_sequence(docg3, DOC_SEQ_PAGE_SETUP);
+       doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
+
+       sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+       doc_setup_writeaddr_sector(docg3, sector, ofs);
+
+       doc_flash_command(docg3, DOC_CMD_PROG_CYCLE3);
        doc_delay(docg3, 2);
+       ret = doc_wait_ready(docg3);
+       if (ret)
+               goto out;
+
+       doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
+       sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+       doc_setup_writeaddr_sector(docg3, sector, ofs);
+       doc_delay(docg3, 1);
 
 out:
        return ret;
 }
 
+
 /**
  * doc_read_page_ecc_init - Initialize hardware ECC engine
  * @docg3: the device
  * @len: the number of bytes covered by the ECC (BCH covered)
  *
  * The function does initialize the hardware ECC engine to compute the Hamming
- * ECC (on 1 byte) and the BCH Syndroms (on 7 bytes).
+ * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
  *
  * Return 0 if succeeded, -EIO on error
  */
@@ -402,6 +561,106 @@ static int doc_read_page_ecc_init(struct docg3 *docg3, int len)
        return doc_wait_ready(docg3);
 }
 
+/**
+ * doc_write_page_ecc_init - Initialize hardware BCH ECC engine
+ * @docg3: the device
+ * @len: the number of bytes covered by the ECC (BCH covered)
+ *
+ * The function does initialize the hardware ECC engine to compute the Hamming
+ * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
+ *
+ * Return 0 if succeeded, -EIO on error
+ */
+static int doc_write_page_ecc_init(struct docg3 *docg3, int len)
+{
+       doc_writew(docg3, DOC_ECCCONF0_WRITE_MODE
+                  | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
+                  | (len & DOC_ECCCONF0_DATA_BYTES_MASK),
+                  DOC_ECCCONF0);
+       doc_delay(docg3, 4);
+       doc_register_readb(docg3, DOC_FLASHCONTROL);
+       return doc_wait_ready(docg3);
+}
+
+/**
+ * doc_ecc_disable - Disable Hamming and BCH ECC hardware calculator
+ * @docg3: the device
+ *
+ * Disables the hardware ECC generator and checker, for unchecked reads (as when
+ * reading OOB only or write status byte).
+ */
+static void doc_ecc_disable(struct docg3 *docg3)
+{
+       doc_writew(docg3, DOC_ECCCONF0_READ_MODE, DOC_ECCCONF0);
+       doc_delay(docg3, 4);
+}
+
+/**
+ * doc_hamming_ecc_init - Initialize hardware Hamming ECC engine
+ * @docg3: the device
+ * @nb_bytes: the number of bytes covered by the ECC (Hamming covered)
+ *
+ * This function programs the ECC hardware to compute the hamming code on the
+ * last provided N bytes to the hardware generator.
+ */
+static void doc_hamming_ecc_init(struct docg3 *docg3, int nb_bytes)
+{
+       u8 ecc_conf1;
+
+       ecc_conf1 = doc_register_readb(docg3, DOC_ECCCONF1);
+       ecc_conf1 &= ~DOC_ECCCONF1_HAMMING_BITS_MASK;
+       ecc_conf1 |= (nb_bytes & DOC_ECCCONF1_HAMMING_BITS_MASK);
+       doc_writeb(docg3, ecc_conf1, DOC_ECCCONF1);
+}
+
+/**
+ * doc_ecc_bch_fix_data - Fix if need be read data from flash
+ * @docg3: the device
+ * @buf: the buffer of read data (512 + 7 + 1 bytes)
+ * @hwecc: the hardware calculated ECC.
+ *         It's in fact recv_ecc ^ calc_ecc, where recv_ecc was read from OOB
+ *         area data, and calc_ecc the ECC calculated by the hardware generator.
+ *
+ * Checks if the received data matches the ECC, and if an error is detected,
+ * tries to fix the bit flips (at most 4) in the buffer buf.  As the docg3
+ * understands the (data, ecc, syndroms) in an inverted order in comparison to
+ * the BCH library, the function reverses the order of bits (ie. bit7 and bit0,
+ * bit6 and bit 1, ...) for all ECC data.
+ *
+ * The hardware ecc unit produces oob_ecc ^ calc_ecc.  The kernel's bch
+ * algorithm is used to decode this.  However the hw operates on page
+ * data in a bit order that is the reverse of that of the bch alg,
+ * requiring that the bits be reversed on the result.  Thanks to Ivan
+ * Djelic for his analysis.
+ *
+ * Returns number of fixed bits (0, 1, 2, 3, 4) or -EBADMSG if too many bit
+ * errors were detected and cannot be fixed.
+ */
+static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc)
+{
+       u8 ecc[DOC_ECC_BCH_SIZE];
+       int errorpos[DOC_ECC_BCH_T], i, numerrs;
+
+       for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
+               ecc[i] = bitrev8(hwecc[i]);
+       numerrs = decode_bch(docg3_bch, NULL, DOC_ECC_BCH_COVERED_BYTES,
+                            NULL, ecc, NULL, errorpos);
+       BUG_ON(numerrs == -EINVAL);
+       if (numerrs < 0)
+               goto out;
+
+       for (i = 0; i < numerrs; i++)
+               errorpos[i] = (errorpos[i] & ~7) | (7 - (errorpos[i] & 7));
+       for (i = 0; i < numerrs; i++)
+               if (errorpos[i] < DOC_ECC_BCH_COVERED_BYTES*8)
+                       /* error is located in data, correct it */
+                       change_bit(errorpos[i], buf);
+out:
+       doc_dbg("doc_ecc_bch_fix_data: flipped %d bits\n", numerrs);
+       return numerrs;
+}
+
+
 /**
  * doc_read_page_prepare - Prepares reading data from a flash page
  * @docg3: the device
@@ -488,16 +747,40 @@ static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
 }
 
 /**
- * doc_get_hw_bch_syndroms - Get hardware calculated BCH syndroms
+ * doc_write_page_putbytes - Writes bytes into a prepared page
+ * @docg3: the device
+ * @len: the number of bytes to be written
+ * @buf: the buffer of input bytes
+ *
+ */
+static void doc_write_page_putbytes(struct docg3 *docg3, int len,
+                                   const u_char *buf)
+{
+       doc_write_data_area(docg3, buf, len);
+       doc_delay(docg3, 2);
+}
+
+/**
+ * doc_get_bch_hw_ecc - Get hardware calculated BCH ECC
  * @docg3: the device
- * @syns:  the array of 7 integers where the syndroms will be stored
+ * @hwecc:  the array of 7 integers where the hardware ecc will be stored
  */
-static void doc_get_hw_bch_syndroms(struct docg3 *docg3, int *syns)
+static void doc_get_bch_hw_ecc(struct docg3 *docg3, u8 *hwecc)
 {
        int i;
 
        for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
-               syns[i] = doc_register_readb(docg3, DOC_BCH_SYNDROM(i));
+               hwecc[i] = doc_register_readb(docg3, DOC_BCH_HW_ECC(i));
+}
+
+/**
+ * doc_page_finish - Ends reading/writing of a flash page
+ * @docg3: the device
+ */
+static void doc_page_finish(struct docg3 *docg3)
+{
+       doc_writeb(docg3, 0, DOC_DATAEND);
+       doc_delay(docg3, 2);
 }
 
 /**
@@ -510,8 +793,7 @@ static void doc_get_hw_bch_syndroms(struct docg3 *docg3, int *syns)
  */
 static void doc_read_page_finish(struct docg3 *docg3)
 {
-       doc_writeb(docg3, 0, DOC_DATAEND);
-       doc_delay(docg3, 2);
+       doc_page_finish(docg3);
        doc_set_device_id(docg3, 0);
 }
 
@@ -523,18 +805,29 @@ static void doc_read_page_finish(struct docg3 *docg3)
  * @block1: second plane block index calculated
  * @page: page calculated
  * @ofs: offset in page
+ * @reliable: 0 if docg3 in normal mode, 1 if docg3 in fast mode, 2 if docg3 in
+ * reliable mode.
+ *
+ * The calculation is based on the reliable/normal mode. In normal mode, the 64
+ * pages of a block are available. In reliable mode, as pages 2*n and 2*n+1 are
+ * clones, only 32 pages per block are available.
  */
 static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
-                             int *ofs)
+                             int *ofs, int reliable)
 {
-       uint sector;
+       uint sector, pages_biblock;
+
+       pages_biblock = DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES;
+       if (reliable == 1 || reliable == 2)
+               pages_biblock /= 2;
 
        sector = from / DOC_LAYOUT_PAGE_SIZE;
-       *block0 = sector / (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES)
-               * DOC_LAYOUT_NBPLANES;
+       *block0 = sector / pages_biblock * DOC_LAYOUT_NBPLANES;
        *block1 = *block0 + 1;
-       *page = sector % (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES);
+       *page = sector % pages_biblock;
        *page /= DOC_LAYOUT_NBPLANES;
+       if (reliable == 1 || reliable == 2)
+               *page *= 2;
        if (sector % 2)
                *ofs = DOC_LAYOUT_PAGE_OOB_SIZE;
        else
@@ -542,99 +835,124 @@ static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
 }
 
 /**
- * doc_read - Read bytes from flash
+ * doc_read_oob - Read out of band bytes from flash
  * @mtd: the device
  * @from: the offset from first block and first page, in bytes, aligned on page
  *        size
- * @len: the number of bytes to read (must be a multiple of 4)
- * @retlen: the number of bytes actually read
- * @buf: the filled in buffer
+ * @ops: the mtd oob structure
  *
- * Reads flash memory pages. This function does not read the OOB chunk, but only
- * the page data.
+ * Reads flash memory OOB area of pages.
  *
  * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
  */
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
-            size_t *retlen, u_char *buf)
+static int doc_read_oob(struct mtd_info *mtd, loff_t from,
+                       struct mtd_oob_ops *ops)
 {
        struct docg3 *docg3 = mtd->priv;
-       int block0, block1, page, readlen, ret, ofs = 0;
-       int syn[DOC_ECC_BCH_SIZE], eccconf1;
-       u8 oob[DOC_LAYOUT_OOB_SIZE];
+       int block0, block1, page, ret, ofs = 0;
+       u8 *oobbuf = ops->oobbuf;
+       u8 *buf = ops->datbuf;
+       size_t len, ooblen, nbdata, nboob;
+       u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
+
+       if (buf)
+               len = ops->len;
+       else
+               len = 0;
+       if (oobbuf)
+               ooblen = ops->ooblen;
+       else
+               ooblen = 0;
+
+       if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
+               oobbuf += ops->ooboffs;
+
+       doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
+               from, ops->mode, buf, len, oobbuf, ooblen);
+       if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % DOC_LAYOUT_OOB_SIZE) ||
+           (from % DOC_LAYOUT_PAGE_SIZE))
+               return -EINVAL;
 
        ret = -EINVAL;
-       doc_dbg("doc_read(from=%lld, len=%zu, buf=%p)\n", from, len, buf);
-       if (from % DOC_LAYOUT_PAGE_SIZE)
-               goto err;
-       if (len % 4)
-               goto err;
-       calc_block_sector(from, &block0, &block1, &page, &ofs);
+       calc_block_sector(from + len, &block0, &block1, &page, &ofs,
+                         docg3->reliable);
        if (block1 > docg3->max_block)
                goto err;
 
-       *retlen = 0;
+       ops->oobretlen = 0;
+       ops->retlen = 0;
        ret = 0;
-       readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
-       while (!ret && len > 0) {
-               readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
+       while (!ret && (len > 0 || ooblen > 0)) {
+               calc_block_sector(from, &block0, &block1, &page, &ofs,
+                       docg3->reliable);
+               nbdata = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
+               nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE);
                ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
                if (ret < 0)
                        goto err;
-               ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_COVERED_BYTES);
+               ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
                if (ret < 0)
                        goto err_in_read;
-               ret = doc_read_page_getbytes(docg3, readlen, buf, 1);
-               if (ret < readlen)
+               ret = doc_read_page_getbytes(docg3, nbdata, buf, 1);
+               if (ret < nbdata)
                        goto err_in_read;
-               ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE,
-                                            oob, 0);
-               if (ret < DOC_LAYOUT_OOB_SIZE)
+               doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE - nbdata,
+                                      NULL, 0);
+               ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0);
+               if (ret < nboob)
                        goto err_in_read;
+               doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob,
+                                      NULL, 0);
 
-               *retlen += readlen;
-               buf += readlen;
-               len -= readlen;
-
-               ofs ^= DOC_LAYOUT_PAGE_OOB_SIZE;
-               if (ofs == 0)
-                       page += 2;
-               if (page > DOC_ADDR_PAGE_MASK) {
-                       page = 0;
-                       block0 += 2;
-                       block1 += 2;
-               }
-
-               /*
-                * There should be a BCH bitstream fixing algorithm here ...
-                * By now, a page read failure is triggered by BCH error
-                */
-               doc_get_hw_bch_syndroms(docg3, syn);
+               doc_get_bch_hw_ecc(docg3, hwecc);
                eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
 
-               doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
-                        oob[0], oob[1], oob[2], oob[3], oob[4],
-                        oob[5], oob[6]);
-               doc_dbg("OOB - HAMMING: %02x\n", oob[7]);
-               doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
-                        oob[8], oob[9], oob[10], oob[11], oob[12],
-                        oob[13], oob[14]);
-               doc_dbg("OOB - UNUSED: %02x\n", oob[15]);
+               if (nboob >= DOC_LAYOUT_OOB_SIZE) {
+                       doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+                               oobbuf[0], oobbuf[1], oobbuf[2], oobbuf[3],
+                               oobbuf[4], oobbuf[5], oobbuf[6]);
+                       doc_dbg("OOB - HAMMING: %02x\n", oobbuf[7]);
+                       doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+                               oobbuf[8], oobbuf[9], oobbuf[10], oobbuf[11],
+                               oobbuf[12], oobbuf[13], oobbuf[14]);
+                       doc_dbg("OOB - UNUSED: %02x\n", oobbuf[15]);
+               }
                doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1);
-               doc_dbg("ECC BCH syndrom: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
-                       syn[0], syn[1], syn[2], syn[3], syn[4], syn[5], syn[6]);
-
-               ret = -EBADMSG;
-               if (block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) {
-                       if (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR)
-                               goto err_in_read;
-                       if (is_prot_seq_error(docg3))
-                               goto err_in_read;
+               doc_dbg("ECC HW_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+                       hwecc[0], hwecc[1], hwecc[2], hwecc[3], hwecc[4],
+                       hwecc[5], hwecc[6]);
+
+               ret = -EIO;
+               if (is_prot_seq_error(docg3))
+                       goto err_in_read;
+               ret = 0;
+               if ((block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) &&
+                   (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR) &&
+                   (eccconf1 & DOC_ECCCONF1_PAGE_IS_WRITTEN) &&
+                   (ops->mode != MTD_OPS_RAW) &&
+                   (nbdata == DOC_LAYOUT_PAGE_SIZE)) {
+                       ret = doc_ecc_bch_fix_data(docg3, buf, hwecc);
+                       if (ret < 0) {
+                               mtd->ecc_stats.failed++;
+                               ret = -EBADMSG;
+                       }
+                       if (ret > 0) {
+                               mtd->ecc_stats.corrected += ret;
+                               ret = -EUCLEAN;
+                       }
                }
+
                doc_read_page_finish(docg3);
+               ops->retlen += nbdata;
+               ops->oobretlen += nboob;
+               buf += nbdata;
+               oobbuf += nboob;
+               len -= nbdata;
+               ooblen -= nboob;
+               from += DOC_LAYOUT_PAGE_SIZE;
        }
 
-       return 0;
+       return ret;
 err_in_read:
        doc_read_page_finish(docg3);
 err:
@@ -642,54 +960,33 @@ err:
 }
 
 /**
- * doc_read_oob - Read out of band bytes from flash
+ * doc_read - Read bytes from flash
  * @mtd: the device
  * @from: the offset from first block and first page, in bytes, aligned on page
  *        size
- * @ops: the mtd oob structure
+ * @len: the number of bytes to read (must be a multiple of 4)
+ * @retlen: the number of bytes actually read
+ * @buf: the filled in buffer
  *
- * Reads flash memory OOB area of pages.
+ * Reads flash memory pages. This function does not read the OOB chunk, but only
+ * the page data.
  *
  * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
  */
-static int doc_read_oob(struct mtd_info *mtd, loff_t from,
-                       struct mtd_oob_ops *ops)
+static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
+            size_t *retlen, u_char *buf)
 {
-       struct docg3 *docg3 = mtd->priv;
-       int block0, block1, page, ofs, ret;
-       u8 *buf = ops->oobbuf;
-       size_t len = ops->ooblen;
-
-       doc_dbg("doc_read_oob(from=%lld, buf=%p, len=%zu)\n", from, buf, len);
-       if (len != DOC_LAYOUT_OOB_SIZE)
-               return -EINVAL;
-
-       switch (ops->mode) {
-       case MTD_OPS_PLACE_OOB:
-               buf += ops->ooboffs;
-               break;
-       default:
-               break;
-       }
+       struct mtd_oob_ops ops;
+       size_t ret;
 
-       calc_block_sector(from, &block0, &block1, &page, &ofs);
-       if (block1 > docg3->max_block)
-               return -EINVAL;
-
-       ret = doc_read_page_prepare(docg3, block0, block1, page,
-                                   ofs + DOC_LAYOUT_PAGE_SIZE);
-       if (!ret)
-               ret = doc_read_page_ecc_init(docg3, DOC_LAYOUT_OOB_SIZE);
-       if (!ret)
-               ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE,
-                                            buf, 1);
-       doc_read_page_finish(docg3);
+       memset(&ops, 0, sizeof(ops));
+       ops.datbuf = buf;
+       ops.len = len;
+       ops.mode = MTD_OPS_AUTO_OOB;
 
-       if (ret > 0)
-               ops->oobretlen = ret;
-       else
-               ops->oobretlen = 0;
-       return (ret > 0) ? 0 : ret;
+       ret = doc_read_oob(mtd, from, &ops);
+       *retlen = ops.retlen;
+       return ret;
 }
 
 static int doc_reload_bbt(struct docg3 *docg3)
@@ -726,7 +1023,8 @@ static int doc_block_isbad(struct mtd_info *mtd, loff_t from)
        struct docg3 *docg3 = mtd->priv;
        int block0, block1, page, ofs, is_good;
 
-       calc_block_sector(from, &block0, &block1, &page, &ofs);
+       calc_block_sector(from, &block0, &block1, &page, &ofs,
+               docg3->reliable);
        doc_dbg("doc_block_isbad(from=%lld) => block=(%d,%d), page=%d, ofs=%d\n",
                from, block0, block1, page, ofs);
 
@@ -739,6 +1037,7 @@ static int doc_block_isbad(struct mtd_info *mtd, loff_t from)
        return !is_good;
 }
 
+#if 0
 /**
  * doc_get_erase_count - Get block erase count
  * @docg3: the device
@@ -758,7 +1057,7 @@ static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
        doc_dbg("doc_get_erase_count(from=%lld, buf=%p)\n", from, buf);
        if (from % DOC_LAYOUT_PAGE_SIZE)
                return -EINVAL;
-       calc_block_sector(from, &block0, &block1, &page, &ofs);
+       calc_block_sector(from, &block0, &block1, &page, &ofs, docg3->reliable);
        if (block1 > docg3->max_block)
                return -EINVAL;
 
@@ -780,26 +1079,578 @@ static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
 
        return max(plane1_erase_count, plane2_erase_count);
 }
+#endif
 
-/*
- * Debug sysfs entries
+/**
+ * doc_get_op_status - get erase/write operation status
+ * @docg3: the device
+ *
+ * Queries the status from the chip, and returns it
+ *
+ * Returns the status (bits DOC_PLANES_STATUS_*)
  */
-static int dbg_flashctrl_show(struct seq_file *s, void *p)
+static int doc_get_op_status(struct docg3 *docg3)
 {
-       struct docg3 *docg3 = (struct docg3 *)s->private;
+       u8 status;
 
-       int pos = 0;
-       u8 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+       doc_flash_sequence(docg3, DOC_SEQ_PLANES_STATUS);
+       doc_flash_command(docg3, DOC_CMD_PLANES_STATUS);
+       doc_delay(docg3, 5);
 
-       pos += seq_printf(s,
-                "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
-                fctrl,
-                fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-",
-                fctrl & DOC_CTRL_CE ? "active" : "inactive",
-                fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-",
-                fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-",
-                fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready");
-       return pos;
+       doc_ecc_disable(docg3);
+       doc_read_data_area(docg3, &status, 1, 1);
+       return status;
+}
+
+/**
+ * doc_write_erase_wait_status - wait for write or erase completion
+ * @docg3: the device
+ *
+ * Wait for the chip to be ready again after erase or write operation, and check
+ * erase/write status.
+ *
+ * Returns 0 if erase successfull, -EIO if erase/write issue, -ETIMEOUT if
+ * timeout
+ */
+static int doc_write_erase_wait_status(struct docg3 *docg3)
+{
+       int status, ret = 0;
+
+       if (!doc_is_ready(docg3))
+               usleep_range(3000, 3000);
+       if (!doc_is_ready(docg3)) {
+               doc_dbg("Timeout reached and the chip is still not ready\n");
+               ret = -EAGAIN;
+               goto out;
+       }
+
+       status = doc_get_op_status(docg3);
+       if (status & DOC_PLANES_STATUS_FAIL) {
+               doc_dbg("Erase/Write failed on (a) plane(s), status = %x\n",
+                       status);
+               ret = -EIO;
+       }
+
+out:
+       doc_page_finish(docg3);
+       return ret;
+}
+
+/**
+ * doc_erase_block - Erase a couple of blocks
+ * @docg3: the device
+ * @block0: the first block to erase (leftmost plane)
+ * @block1: the second block to erase (rightmost plane)
+ *
+ * Erase both blocks, and return operation status
+ *
+ * Returns 0 if erase successful, -EIO if erase issue, -ETIMEOUT if chip not
+ * ready for too long
+ */
+static int doc_erase_block(struct docg3 *docg3, int block0, int block1)
+{
+       int ret, sector;
+
+       doc_dbg("doc_erase_block(blocks=(%d,%d))\n", block0, block1);
+       ret = doc_reset_seq(docg3);
+       if (ret)
+               return -EIO;
+
+       doc_set_reliable_mode(docg3);
+       doc_flash_sequence(docg3, DOC_SEQ_ERASE);
+
+       sector = block0 << DOC_ADDR_BLOCK_SHIFT;
+       doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+       doc_setup_addr_sector(docg3, sector);
+       sector = block1 << DOC_ADDR_BLOCK_SHIFT;
+       doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+       doc_setup_addr_sector(docg3, sector);
+       doc_delay(docg3, 1);
+
+       doc_flash_command(docg3, DOC_CMD_ERASECYCLE2);
+       doc_delay(docg3, 2);
+
+       if (is_prot_seq_error(docg3)) {
+               doc_err("Erase blocks %d,%d error\n", block0, block1);
+               return -EIO;
+       }
+
+       return doc_write_erase_wait_status(docg3);
+}
+
+/**
+ * doc_erase - Erase a portion of the chip
+ * @mtd: the device
+ * @info: the erase info
+ *
+ * Erase a bunch of contiguous blocks, by pairs, as a "mtd" page of 1024 is
+ * split into 2 pages of 512 bytes on 2 contiguous blocks.
+ *
+ * Returns 0 if erase successful, -EINVAL if adressing error, -EIO if erase
+ * issue
+ */
+static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
+{
+       struct docg3 *docg3 = mtd->priv;
+       uint64_t len;
+       int block0, block1, page, ret, ofs = 0;
+
+       doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len);
+       doc_set_device_id(docg3, docg3->device_id);
+
+       info->state = MTD_ERASE_PENDING;
+       calc_block_sector(info->addr + info->len, &block0, &block1, &page,
+                         &ofs, docg3->reliable);
+       ret = -EINVAL;
+       if (block1 > docg3->max_block || page || ofs)
+               goto reset_err;
+
+       ret = 0;
+       calc_block_sector(info->addr, &block0, &block1, &page, &ofs,
+                         docg3->reliable);
+       doc_set_reliable_mode(docg3);
+       for (len = info->len; !ret && len > 0; len -= mtd->erasesize) {
+               info->state = MTD_ERASING;
+               ret = doc_erase_block(docg3, block0, block1);
+               block0 += 2;
+               block1 += 2;
+       }
+
+       if (ret)
+               goto reset_err;
+
+       info->state = MTD_ERASE_DONE;
+       return 0;
+
+reset_err:
+       info->state = MTD_ERASE_FAILED;
+       return ret;
+}
+
+/**
+ * doc_write_page - Write a single page to the chip
+ * @docg3: the device
+ * @to: the offset from first block and first page, in bytes, aligned on page
+ *      size
+ * @buf: buffer to get bytes from
+ * @oob: buffer to get out of band bytes from (can be NULL if no OOB should be
+ *       written)
+ * @autoecc: if 0, all 16 bytes from OOB are taken, regardless of HW Hamming or
+ *           BCH computations. If 1, only bytes 0-7 and byte 15 are taken,
+ *           remaining ones are filled with hardware Hamming and BCH
+ *           computations. Its value is not meaningfull is oob == NULL.
+ *
+ * Write one full page (ie. 1 page split on two planes), of 512 bytes, with the
+ * OOB data. The OOB ECC is automatically computed by the hardware Hamming and
+ * BCH generator if autoecc is not null.
+ *
+ * Returns 0 if write successful, -EIO if write error, -EAGAIN if timeout
+ */
+static int doc_write_page(struct docg3 *docg3, loff_t to, const u_char *buf,
+                         const u_char *oob, int autoecc)
+{
+       int block0, block1, page, ret, ofs = 0;
+       u8 hwecc[DOC_ECC_BCH_SIZE], hamming;
+
+       doc_dbg("doc_write_page(to=%lld)\n", to);
+       calc_block_sector(to, &block0, &block1, &page, &ofs, docg3->reliable);
+
+       doc_set_device_id(docg3, docg3->device_id);
+       ret = doc_reset_seq(docg3);
+       if (ret)
+               goto err;
+
+       /* Program the flash address block and page */
+       ret = doc_write_seek(docg3, block0, block1, page, ofs);
+       if (ret)
+               goto err;
+
+       doc_write_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
+       doc_delay(docg3, 2);
+       doc_write_page_putbytes(docg3, DOC_LAYOUT_PAGE_SIZE, buf);
+
+       if (oob && autoecc) {
+               doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ, oob);
+               doc_delay(docg3, 2);
+               oob += DOC_LAYOUT_OOB_UNUSED_OFS;
+
+               hamming = doc_register_readb(docg3, DOC_HAMMINGPARITY);
+               doc_delay(docg3, 2);
+               doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_HAMMING_SZ,
+                                       &hamming);
+               doc_delay(docg3, 2);
+
+               doc_get_bch_hw_ecc(docg3, hwecc);
+               doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_BCH_SZ, hwecc);
+               doc_delay(docg3, 2);
+
+               doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_UNUSED_SZ, oob);
+       }
+       if (oob && !autoecc)
+               doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_SIZE, oob);
+
+       doc_delay(docg3, 2);
+       doc_page_finish(docg3);
+       doc_delay(docg3, 2);
+       doc_flash_command(docg3, DOC_CMD_PROG_CYCLE2);
+       doc_delay(docg3, 2);
+
+       /*
+        * The wait status will perform another doc_page_finish() call, but that
+        * seems to please the docg3, so leave it.
+        */
+       ret = doc_write_erase_wait_status(docg3);
+       return ret;
+err:
+       doc_read_page_finish(docg3);
+       return ret;
+}
+
+/**
+ * doc_guess_autoecc - Guess autoecc mode from mbd_oob_ops
+ * @ops: the oob operations
+ *
+ * Returns 0 or 1 if success, -EINVAL if invalid oob mode
+ */
+static int doc_guess_autoecc(struct mtd_oob_ops *ops)
+{
+       int autoecc;
+
+       switch (ops->mode) {
+       case MTD_OPS_PLACE_OOB:
+       case MTD_OPS_AUTO_OOB:
+               autoecc = 1;
+               break;
+       case MTD_OPS_RAW:
+               autoecc = 0;
+               break;
+       default:
+               autoecc = -EINVAL;
+       }
+       return autoecc;
+}
+
+/**
+ * doc_fill_autooob - Fill a 16 bytes OOB from 8 non-ECC bytes
+ * @dst: the target 16 bytes OOB buffer
+ * @oobsrc: the source 8 bytes non-ECC OOB buffer
+ *
+ */
+static void doc_fill_autooob(u8 *dst, u8 *oobsrc)
+{
+       memcpy(dst, oobsrc, DOC_LAYOUT_OOB_PAGEINFO_SZ);
+       dst[DOC_LAYOUT_OOB_UNUSED_OFS] = oobsrc[DOC_LAYOUT_OOB_PAGEINFO_SZ];
+}
+
+/**
+ * doc_backup_oob - Backup OOB into docg3 structure
+ * @docg3: the device
+ * @to: the page offset in the chip
+ * @ops: the OOB size and buffer
+ *
+ * As the docg3 should write a page with its OOB in one pass, and some userland
+ * applications do write_oob() to setup the OOB and then write(), store the OOB
+ * into a temporary storage. This is very dangerous, as 2 concurrent
+ * applications could store an OOB, and then write their pages (which will
+ * result into one having its OOB corrupted).
+ *
+ * The only reliable way would be for userland to call doc_write_oob() with both
+ * the page data _and_ the OOB area.
+ *
+ * Returns 0 if success, -EINVAL if ops content invalid
+ */
+static int doc_backup_oob(struct docg3 *docg3, loff_t to,
+                         struct mtd_oob_ops *ops)
+{
+       int ooblen = ops->ooblen, autoecc;
+
+       if (ooblen != DOC_LAYOUT_OOB_SIZE)
+               return -EINVAL;
+       autoecc = doc_guess_autoecc(ops);
+       if (autoecc < 0)
+               return autoecc;
+
+       docg3->oob_write_ofs = to;
+       docg3->oob_autoecc = autoecc;
+       if (ops->mode == MTD_OPS_AUTO_OOB) {
+               doc_fill_autooob(docg3->oob_write_buf, ops->oobbuf);
+               ops->oobretlen = 8;
+       } else {
+               memcpy(docg3->oob_write_buf, ops->oobbuf, DOC_LAYOUT_OOB_SIZE);
+               ops->oobretlen = DOC_LAYOUT_OOB_SIZE;
+       }
+       return 0;
+}
+
+/**
+ * doc_write_oob - Write out of band bytes to flash
+ * @mtd: the device
+ * @ofs: the offset from first block and first page, in bytes, aligned on page
+ *       size
+ * @ops: the mtd oob structure
+ *
+ * Either write OOB data into a temporary buffer, for the subsequent write
+ * page. The provided OOB should be 16 bytes long. If a data buffer is provided
+ * as well, issue the page write.
+ * Or provide data without OOB, and then a all zeroed OOB will be used (ECC will
+ * still be filled in if asked for).
+ *
+ * Returns 0 is successfull, EINVAL if length is not 14 bytes
+ */
+static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
+                        struct mtd_oob_ops *ops)
+{
+       struct docg3 *docg3 = mtd->priv;
+       int block0, block1, page, ret, pofs = 0, autoecc, oobdelta;
+       u8 *oobbuf = ops->oobbuf;
+       u8 *buf = ops->datbuf;
+       size_t len, ooblen;
+       u8 oob[DOC_LAYOUT_OOB_SIZE];
+
+       if (buf)
+               len = ops->len;
+       else
+               len = 0;
+       if (oobbuf)
+               ooblen = ops->ooblen;
+       else
+               ooblen = 0;
+
+       if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
+               oobbuf += ops->ooboffs;
+
+       doc_dbg("doc_write_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
+               ofs, ops->mode, buf, len, oobbuf, ooblen);
+       switch (ops->mode) {
+       case MTD_OPS_PLACE_OOB:
+       case MTD_OPS_RAW:
+               oobdelta = mtd->oobsize;
+               break;
+       case MTD_OPS_AUTO_OOB:
+               oobdelta = mtd->ecclayout->oobavail;
+               break;
+       default:
+               oobdelta = 0;
+       }
+       if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) ||
+           (ofs % DOC_LAYOUT_PAGE_SIZE))
+               return -EINVAL;
+       if (len && ooblen &&
+           (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
+               return -EINVAL;
+
+       ret = -EINVAL;
+       calc_block_sector(ofs + len, &block0, &block1, &page, &pofs,
+                         docg3->reliable);
+       if (block1 > docg3->max_block)
+               goto err;
+
+       ops->oobretlen = 0;
+       ops->retlen = 0;
+       ret = 0;
+       if (len == 0 && ooblen == 0)
+               return -EINVAL;
+       if (len == 0 && ooblen > 0)
+               return doc_backup_oob(docg3, ofs, ops);
+
+       autoecc = doc_guess_autoecc(ops);
+       if (autoecc < 0)
+               return autoecc;
+
+       while (!ret && len > 0) {
+               memset(oob, 0, sizeof(oob));
+               if (ofs == docg3->oob_write_ofs)
+                       memcpy(oob, docg3->oob_write_buf, DOC_LAYOUT_OOB_SIZE);
+               else if (ooblen > 0 && ops->mode == MTD_OPS_AUTO_OOB)
+                       doc_fill_autooob(oob, oobbuf);
+               else if (ooblen > 0)
+                       memcpy(oob, oobbuf, DOC_LAYOUT_OOB_SIZE);
+               ret = doc_write_page(docg3, ofs, buf, oob, autoecc);
+
+               ofs += DOC_LAYOUT_PAGE_SIZE;
+               len -= DOC_LAYOUT_PAGE_SIZE;
+               buf += DOC_LAYOUT_PAGE_SIZE;
+               if (ooblen) {
+                       oobbuf += oobdelta;
+                       ooblen -= oobdelta;
+                       ops->oobretlen += oobdelta;
+               }
+               ops->retlen += DOC_LAYOUT_PAGE_SIZE;
+       }
+err:
+       doc_set_device_id(docg3, 0);
+       return ret;
+}
+
+/**
+ * doc_write - Write a buffer to the chip
+ * @mtd: the device
+ * @to: the offset from first block and first page, in bytes, aligned on page
+ *      size
+ * @len: the number of bytes to write (must be a full page size, ie. 512)
+ * @retlen: the number of bytes actually written (0 or 512)
+ * @buf: the buffer to get bytes from
+ *
+ * Writes data to the chip.
+ *
+ * Returns 0 if write successful, -EIO if write error
+ */
+static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
+                    size_t *retlen, const u_char *buf)
+{
+       struct docg3 *docg3 = mtd->priv;
+       int ret;
+       struct mtd_oob_ops ops;
+
+       doc_dbg("doc_write(to=%lld, len=%zu)\n", to, len);
+       ops.datbuf = (char *)buf;
+       ops.len = len;
+       ops.mode = MTD_OPS_PLACE_OOB;
+       ops.oobbuf = NULL;
+       ops.ooblen = 0;
+       ops.ooboffs = 0;
+
+       ret = doc_write_oob(mtd, to, &ops);
+       *retlen = ops.retlen;
+       return ret;
+}
+
+static struct docg3 *sysfs_dev2docg3(struct device *dev,
+                                    struct device_attribute *attr)
+{
+       int floor;
+       struct platform_device *pdev = to_platform_device(dev);
+       struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
+
+       floor = attr->attr.name[1] - '0';
+       if (floor < 0 || floor >= DOC_MAX_NBFLOORS)
+               return NULL;
+       else
+               return docg3_floors[floor]->priv;
+}
+
+static ssize_t dps0_is_key_locked(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+       int dps0;
+
+       doc_set_device_id(docg3, docg3->device_id);
+       dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+       doc_set_device_id(docg3, 0);
+
+       return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK));
+}
+
+static ssize_t dps1_is_key_locked(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+       int dps1;
+
+       doc_set_device_id(docg3, docg3->device_id);
+       dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+       doc_set_device_id(docg3, 0);
+
+       return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK));
+}
+
+static ssize_t dps0_insert_key(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t count)
+{
+       struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+       int i;
+
+       if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
+               return -EINVAL;
+
+       doc_set_device_id(docg3, docg3->device_id);
+       for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
+               doc_writeb(docg3, buf[i], DOC_DPS0_KEY);
+       doc_set_device_id(docg3, 0);
+       return count;
+}
+
+static ssize_t dps1_insert_key(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t count)
+{
+       struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+       int i;
+
+       if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
+               return -EINVAL;
+
+       doc_set_device_id(docg3, docg3->device_id);
+       for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
+               doc_writeb(docg3, buf[i], DOC_DPS1_KEY);
+       doc_set_device_id(docg3, 0);
+       return count;
+}
+
+#define FLOOR_SYSFS(id) { \
+       __ATTR(f##id##_dps0_is_keylocked, S_IRUGO, dps0_is_key_locked, NULL), \
+       __ATTR(f##id##_dps1_is_keylocked, S_IRUGO, dps1_is_key_locked, NULL), \
+       __ATTR(f##id##_dps0_protection_key, S_IWUGO, NULL, dps0_insert_key), \
+       __ATTR(f##id##_dps1_protection_key, S_IWUGO, NULL, dps1_insert_key), \
+}
+
+static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = {
+       FLOOR_SYSFS(0), FLOOR_SYSFS(1), FLOOR_SYSFS(2), FLOOR_SYSFS(3)
+};
+
+static int doc_register_sysfs(struct platform_device *pdev,
+                             struct mtd_info **floors)
+{
+       int ret = 0, floor, i = 0;
+       struct device *dev = &pdev->dev;
+
+       for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS && floors[floor];
+            floor++)
+               for (i = 0; !ret && i < 4; i++)
+                       ret = device_create_file(dev, &doc_sys_attrs[floor][i]);
+       if (!ret)
+               return 0;
+       do {
+               while (--i >= 0)
+                       device_remove_file(dev, &doc_sys_attrs[floor][i]);
+               i = 4;
+       } while (--floor >= 0);
+       return ret;
+}
+
+static void doc_unregister_sysfs(struct platform_device *pdev,
+                                struct mtd_info **floors)
+{
+       struct device *dev = &pdev->dev;
+       int floor, i;
+
+       for (floor = 0; floor < DOC_MAX_NBFLOORS && floors[floor];
+            floor++)
+               for (i = 0; i < 4; i++)
+                       device_remove_file(dev, &doc_sys_attrs[floor][i]);
+}
+
+/*
+ * Debug sysfs entries
+ */
+static int dbg_flashctrl_show(struct seq_file *s, void *p)
+{
+       struct docg3 *docg3 = (struct docg3 *)s->private;
+
+       int pos = 0;
+       u8 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+
+       pos += seq_printf(s,
+                "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
+                fctrl,
+                fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-",
+                fctrl & DOC_CTRL_CE ? "active" : "inactive",
+                fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-",
+                fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-",
+                fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready");
+       return pos;
 }
 DEBUGFS_RO_ATTR(flashcontrol, dbg_flashctrl_show);
 
@@ -852,13 +1703,15 @@ static int dbg_protection_show(struct seq_file *s, void *p)
 {
        struct docg3 *docg3 = (struct docg3 *)s->private;
        int pos = 0;
-       int protect = doc_register_readb(docg3, DOC_PROTECTION);
-       int dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
-       int dps0_low = doc_register_readb(docg3, DOC_DPS0_ADDRLOW);
-       int dps0_high = doc_register_readb(docg3, DOC_DPS0_ADDRHIGH);
-       int dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
-       int dps1_low = doc_register_readb(docg3, DOC_DPS1_ADDRLOW);
-       int dps1_high = doc_register_readb(docg3, DOC_DPS1_ADDRHIGH);
+       int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
+
+       protect = doc_register_readb(docg3, DOC_PROTECTION);
+       dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+       dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW);
+       dps0_high = doc_register_readw(docg3, DOC_DPS0_ADDRHIGH);
+       dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+       dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW);
+       dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH);
 
        pos += seq_printf(s, "Protection = 0x%02x (",
                         protect);
@@ -947,52 +1800,54 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
 
        cfg = doc_register_readb(docg3, DOC_CONFIGURATION);
        docg3->if_cfg = (cfg & DOC_CONF_IF_CFG ? 1 : 0);
+       docg3->reliable = reliable_mode;
 
        switch (chip_id) {
        case DOC_CHIPID_G3:
-               mtd->name = "DiskOnChip G3";
+               mtd->name = kasprintf(GFP_KERNEL, "DiskOnChip G3 floor %d",
+                                     docg3->device_id);
                docg3->max_block = 2047;
                break;
        }
        mtd->type = MTD_NANDFLASH;
-       /*
-        * Once write methods are added, the correct flags will be set.
-        * mtd->flags = MTD_CAP_NANDFLASH;
-        */
-       mtd->flags = MTD_CAP_ROM;
+       mtd->flags = MTD_CAP_NANDFLASH;
        mtd->size = (docg3->max_block + 1) * DOC_LAYOUT_BLOCK_SIZE;
+       if (docg3->reliable == 2)
+               mtd->size /= 2;
        mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
+       if (docg3->reliable == 2)
+               mtd->erasesize /= 2;
        mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
        mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
        mtd->owner = THIS_MODULE;
-       mtd->erase = NULL;
-       mtd->point = NULL;
-       mtd->unpoint = NULL;
+       mtd->erase = doc_erase;
        mtd->read = doc_read;
-       mtd->write = NULL;
+       mtd->write = doc_write;
        mtd->read_oob = doc_read_oob;
-       mtd->write_oob = NULL;
-       mtd->sync = NULL;
+       mtd->write_oob = doc_write_oob;
        mtd->block_isbad = doc_block_isbad;
+       mtd->ecclayout = &docg3_oobinfo;
 }
 
 /**
- * doc_probe - Probe the IO space for a DiskOnChip G3 chip
- * @pdev: platform device
+ * doc_probe_device - Check if a device is available
+ * @base: the io space where the device is probed
+ * @floor: the floor of the probed device
+ * @dev: the device
  *
- * Probes for a G3 chip at the specified IO space in the platform data
- * ressources.
+ * Checks whether a device at the specified IO range, and floor is available.
  *
- * Returns 0 on success, -ENOMEM, -ENXIO on error
+ * Returns a mtd_info struct if there is a device, ENODEV if none found, ENOMEM
+ * if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is
+ * launched.
  */
-static int __init docg3_probe(struct platform_device *pdev)
+static struct mtd_info *doc_probe_device(void __iomem *base, int floor,
+                                        struct device *dev)
 {
-       struct device *dev = &pdev->dev;
-       struct docg3 *docg3;
-       struct mtd_info *mtd;
-       struct resource *ress;
        int ret, bbt_nbpages;
        u16 chip_id, chip_id_inv;
+       struct docg3 *docg3;
+       struct mtd_info *mtd;
 
        ret = -ENOMEM;
        docg3 = kzalloc(sizeof(struct docg3), GFP_KERNEL);
@@ -1002,69 +1857,218 @@ static int __init docg3_probe(struct platform_device *pdev)
        if (!mtd)
                goto nomem2;
        mtd->priv = docg3;
+       bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
+                                  8 * DOC_LAYOUT_PAGE_SIZE);
+       docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
+       if (!docg3->bbt)
+               goto nomem3;
 
-       ret = -ENXIO;
-       ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!ress) {
-               dev_err(dev, "No I/O memory resource defined\n");
-               goto noress;
-       }
-       docg3->base = ioremap(ress->start, DOC_IOSPACE_SIZE);
-
-       docg3->dev = &pdev->dev;
-       docg3->device_id = 0;
+       docg3->dev = dev;
+       docg3->device_id = floor;
+       docg3->base = base;
        doc_set_device_id(docg3, docg3->device_id);
-       doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
+       if (!floor)
+               doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
        doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL);
 
        chip_id = doc_register_readw(docg3, DOC_CHIPID);
        chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV);
 
-       ret = -ENODEV;
+       ret = 0;
        if (chip_id != (u16)(~chip_id_inv)) {
-               doc_info("No device found at IO addr %p\n",
-                        (void *)ress->start);
-               goto nochipfound;
+               goto nomem3;
        }
 
        switch (chip_id) {
        case DOC_CHIPID_G3:
-               doc_info("Found a G3 DiskOnChip at addr %p\n",
-                        (void *)ress->start);
+               doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n",
+                        base, floor);
                break;
        default:
                doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
-               goto nochipfound;
+               goto nomem3;
        }
 
        doc_set_driver_info(chip_id, mtd);
-       platform_set_drvdata(pdev, mtd);
 
-       ret = -ENOMEM;
-       bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
-                                  8 * DOC_LAYOUT_PAGE_SIZE);
-       docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
-       if (!docg3->bbt)
-               goto nochipfound;
+       doc_hamming_ecc_init(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ);
        doc_reload_bbt(docg3);
+       return mtd;
 
-       ret = mtd_device_parse_register(mtd, part_probes,
-                                       NULL, NULL, 0);
-       if (ret)
-               goto register_error;
+nomem3:
+       kfree(mtd);
+nomem2:
+       kfree(docg3);
+nomem1:
+       return ERR_PTR(ret);
+}
 
-       doc_dbg_register(docg3);
-       return 0;
+/**
+ * doc_release_device - Release a docg3 floor
+ * @mtd: the device
+ */
+static void doc_release_device(struct mtd_info *mtd)
+{
+       struct docg3 *docg3 = mtd->priv;
 
-register_error:
+       mtd_device_unregister(mtd);
        kfree(docg3->bbt);
-nochipfound:
-       iounmap(docg3->base);
-noress:
+       kfree(docg3);
+       kfree(mtd->name);
        kfree(mtd);
+}
+
+/**
+ * docg3_resume - Awakens docg3 floor
+ * @pdev: platfrom device
+ *
+ * Returns 0 (always successfull)
+ */
+static int docg3_resume(struct platform_device *pdev)
+{
+       int i;
+       struct mtd_info **docg3_floors, *mtd;
+       struct docg3 *docg3;
+
+       docg3_floors = platform_get_drvdata(pdev);
+       mtd = docg3_floors[0];
+       docg3 = mtd->priv;
+
+       doc_dbg("docg3_resume()\n");
+       for (i = 0; i < 12; i++)
+               doc_readb(docg3, DOC_IOSPACE_IPL);
+       return 0;
+}
+
+/**
+ * docg3_suspend - Put in low power mode the docg3 floor
+ * @pdev: platform device
+ * @state: power state
+ *
+ * Shuts off most of docg3 circuitery to lower power consumption.
+ *
+ * Returns 0 if suspend succeeded, -EIO if chip refused suspend
+ */
+static int docg3_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       int floor, i;
+       struct mtd_info **docg3_floors, *mtd;
+       struct docg3 *docg3;
+       u8 ctrl, pwr_down;
+
+       docg3_floors = platform_get_drvdata(pdev);
+       for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
+               mtd = docg3_floors[floor];
+               if (!mtd)
+                       continue;
+               docg3 = mtd->priv;
+
+               doc_writeb(docg3, floor, DOC_DEVICESELECT);
+               ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+               ctrl &= ~DOC_CTRL_VIOLATION & ~DOC_CTRL_CE;
+               doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
+
+               for (i = 0; i < 10; i++) {
+                       usleep_range(3000, 4000);
+                       pwr_down = doc_register_readb(docg3, DOC_POWERMODE);
+                       if (pwr_down & DOC_POWERDOWN_READY)
+                               break;
+               }
+               if (pwr_down & DOC_POWERDOWN_READY) {
+                       doc_dbg("docg3_suspend(): floor %d powerdown ok\n",
+                               floor);
+               } else {
+                       doc_err("docg3_suspend(): floor %d powerdown failed\n",
+                               floor);
+                       return -EIO;
+               }
+       }
+
+       mtd = docg3_floors[0];
+       docg3 = mtd->priv;
+       doc_set_asic_mode(docg3, DOC_ASICMODE_POWERDOWN);
+       return 0;
+}
+
+/**
+ * doc_probe - Probe the IO space for a DiskOnChip G3 chip
+ * @pdev: platform device
+ *
+ * Probes for a G3 chip at the specified IO space in the platform data
+ * ressources. The floor 0 must be available.
+ *
+ * Returns 0 on success, -ENOMEM, -ENXIO on error
+ */
+static int __init docg3_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct mtd_info *mtd;
+       struct resource *ress;
+       void __iomem *base;
+       int ret, floor, found = 0;
+       struct mtd_info **docg3_floors;
+
+       ret = -ENXIO;
+       ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!ress) {
+               dev_err(dev, "No I/O memory resource defined\n");
+               goto noress;
+       }
+       base = ioremap(ress->start, DOC_IOSPACE_SIZE);
+
+       ret = -ENOMEM;
+       docg3_floors = kzalloc(sizeof(*docg3_floors) * DOC_MAX_NBFLOORS,
+                              GFP_KERNEL);
+       if (!docg3_floors)
+               goto nomem1;
+       docg3_bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
+                            DOC_ECC_BCH_PRIMPOLY);
+       if (!docg3_bch)
+               goto nomem2;
+
+       for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
+               mtd = doc_probe_device(base, floor, dev);
+               if (IS_ERR(mtd)) {
+                       ret = PTR_ERR(mtd);
+                       goto err_probe;
+               }
+               if (!mtd) {
+                       if (floor == 0)
+                               goto notfound;
+                       else
+                               continue;
+               }
+               docg3_floors[floor] = mtd;
+               ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL,
+                                               0);
+               if (ret)
+                       goto err_probe;
+               found++;
+       }
+
+       ret = doc_register_sysfs(pdev, docg3_floors);
+       if (ret)
+               goto err_probe;
+       if (!found)
+               goto notfound;
+
+       platform_set_drvdata(pdev, docg3_floors);
+       doc_dbg_register(docg3_floors[0]->priv);
+       return 0;
+
+notfound:
+       ret = -ENODEV;
+       dev_info(dev, "No supported DiskOnChip found\n");
+err_probe:
+       free_bch(docg3_bch);
+       for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
+               if (docg3_floors[floor])
+                       doc_release_device(docg3_floors[floor]);
 nomem2:
-       kfree(docg3);
+       kfree(docg3_floors);
 nomem1:
+       iounmap(base);
+noress:
        return ret;
 }
 
@@ -1076,15 +2080,20 @@ nomem1:
  */
 static int __exit docg3_release(struct platform_device *pdev)
 {
-       struct mtd_info *mtd = platform_get_drvdata(pdev);
-       struct docg3 *docg3 = mtd->priv;
+       struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
+       struct docg3 *docg3 = docg3_floors[0]->priv;
+       void __iomem *base = docg3->base;
+       int floor;
 
+       doc_unregister_sysfs(pdev, docg3_floors);
        doc_dbg_unregister(docg3);
-       mtd_device_unregister(mtd);
-       iounmap(docg3->base);
-       kfree(docg3->bbt);
-       kfree(docg3);
-       kfree(mtd);
+       for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
+               if (docg3_floors[floor])
+                       doc_release_device(docg3_floors[floor]);
+
+       kfree(docg3_floors);
+       free_bch(docg3_bch);
+       iounmap(base);
        return 0;
 }
 
@@ -1093,6 +2102,8 @@ static struct platform_driver g3_driver = {
                .name   = "docg3",
                .owner  = THIS_MODULE,
        },
+       .suspend        = docg3_suspend,
+       .resume         = docg3_resume,
        .remove         = __exit_p(docg3_release),
 };
 
index 0d407be24594bab5a4cfa868d3d4a82ed01e6291..db0da436b49332dbea30971e44392a5fa9887461 100644 (file)
 #define DOC_LAYOUT_WEAR_OFFSET         (DOC_LAYOUT_PAGE_OOB_SIZE * 2)
 #define DOC_LAYOUT_BLOCK_SIZE                                  \
        (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_PAGE_SIZE)
+
+/*
+ * ECC related constants
+ */
+#define DOC_ECC_BCH_M                  14
+#define DOC_ECC_BCH_T                  4
+#define DOC_ECC_BCH_PRIMPOLY           0x4443
 #define DOC_ECC_BCH_SIZE               7
 #define DOC_ECC_BCH_COVERED_BYTES                              \
        (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_PAGEINFO_SZ +    \
-        DOC_LAYOUT_OOB_HAMMING_SZ + DOC_LAYOUT_OOB_BCH_SZ)
+        DOC_LAYOUT_OOB_HAMMING_SZ)
+#define DOC_ECC_BCH_TOTAL_BYTES                                        \
+       (DOC_ECC_BCH_COVERED_BYTES + DOC_LAYOUT_OOB_BCH_SZ)
 
 /*
  * Blocks distribution
@@ -80,6 +89,7 @@
 
 #define DOC_CHIPID_G3                  0x200
 #define DOC_ERASE_MARK                 0xaa
+#define DOC_MAX_NBFLOORS               4
 /*
  * Flash registers
  */
 #define DOC_ECCCONF1                   0x1042
 #define DOC_ECCPRESET                  0x1044
 #define DOC_HAMMINGPARITY              0x1046
-#define DOC_BCH_SYNDROM(idx)           (0x1048 + (idx << 1))
+#define DOC_BCH_HW_ECC(idx)            (0x1048 + idx)
 
 #define DOC_PROTECTION                 0x1056
+#define DOC_DPS0_KEY                   0x105c
+#define DOC_DPS1_KEY                   0x105e
 #define DOC_DPS0_ADDRLOW               0x1060
 #define DOC_DPS0_ADDRHIGH              0x1062
 #define DOC_DPS1_ADDRLOW               0x1064
 
 #define DOC_ASICMODECONFIRM            0x1072
 #define DOC_CHIPID_INV                 0x1074
+#define DOC_POWERMODE                  0x107c
 
 /*
  * Flash sequences
  */
 #define DOC_SEQ_RESET                  0x00
 #define DOC_SEQ_PAGE_SIZE_532          0x03
-#define DOC_SEQ_SET_MODE               0x09
+#define DOC_SEQ_SET_FASTMODE           0x05
+#define DOC_SEQ_SET_RELIABLEMODE       0x09
 #define DOC_SEQ_READ                   0x12
 #define DOC_SEQ_SET_PLANE1             0x0e
 #define DOC_SEQ_SET_PLANE2             0x10
 #define DOC_SEQ_PAGE_SETUP             0x1d
+#define DOC_SEQ_ERASE                  0x27
+#define DOC_SEQ_PLANES_STATUS          0x31
 
 /*
  * Flash commands
 #define DOC_CMD_PROG_BLOCK_ADDR                0x60
 #define DOC_CMD_PROG_CYCLE1            0x80
 #define DOC_CMD_PROG_CYCLE2            0x10
+#define DOC_CMD_PROG_CYCLE3            0x11
 #define DOC_CMD_ERASECYCLE2            0xd0
+#define DOC_CMD_READ_STATUS            0x70
+#define DOC_CMD_PLANES_STATUS          0x71
 
 #define DOC_CMD_RELIABLE_MODE          0x22
 #define DOC_CMD_FAST_MODE              0xa2
 /*
  * Flash register : DOC_ECCCONF0
  */
+#define DOC_ECCCONF0_WRITE_MODE                0x0000
 #define DOC_ECCCONF0_READ_MODE         0x8000
 #define DOC_ECCCONF0_AUTO_ECC_ENABLE   0x4000
 #define DOC_ECCCONF0_HAMMING_ENABLE    0x1000
  */
 #define DOC_ECCCONF1_BCH_SYNDROM_ERR   0x80
 #define DOC_ECCCONF1_UNKOWN1           0x40
-#define DOC_ECCCONF1_UNKOWN2           0x20
+#define DOC_ECCCONF1_PAGE_IS_WRITTEN   0x20
 #define DOC_ECCCONF1_UNKOWN3           0x10
 #define DOC_ECCCONF1_HAMMING_BITS_MASK 0x0f
 
 #define DOC_READADDR_ONE_BYTE          0x4000
 #define DOC_READADDR_ADDR_MASK         0x1fff
 
+/*
+ * Flash register : DOC_POWERMODE
+ */
+#define DOC_POWERDOWN_READY            0x80
+
+/*
+ * Status of erase and write operation
+ */
+#define DOC_PLANES_STATUS_FAIL         0x01
+#define DOC_PLANES_STATUS_PLANE0_KO    0x02
+#define DOC_PLANES_STATUS_PLANE1_KO    0x04
+
+/*
+ * DPS key management
+ *
+ * Each floor of docg3 has 2 protection areas: DPS0 and DPS1. These areas span
+ * across block boundaries, and define whether these blocks can be read or
+ * written.
+ * The definition is dynamically stored in page 0 of blocks (2,3) for DPS0, and
+ * page 0 of blocks (4,5) for DPS1.
+ */
+#define DOC_LAYOUT_DPS_KEY_LENGTH      8
+
 /**
  * struct docg3 - DiskOnChip driver private data
  * @dev: the device currently under control
  * @base: mapped IO space
  * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3)
  * @if_cfg: if true, reads are on 16bits, else reads are on 8bits
+
+ * @reliable: if 0, docg3 in normal mode, if 1 docg3 in fast mode, if 2 in
+ *            reliable mode
+ *            Fast mode implies more errors than normal mode.
+ *            Reliable mode implies that page 2*n and 2*n+1 are clones.
  * @bbt: bad block table cache
+ * @oob_write_ofs: offset of the MTD where this OOB should belong (ie. in next
+ *                 page_write)
+ * @oob_autoecc: if 1, use only bytes 0-7, 15, and fill the others with HW ECC
+ *               if 0, use all the 16 bytes.
+ * @oob_write_buf: prepared OOB for next page_write
  * @debugfs_root: debugfs root node
  */
 struct docg3 {
@@ -237,8 +290,12 @@ struct docg3 {
        void __iomem *base;
        unsigned int device_id:4;
        unsigned int if_cfg:1;
+       unsigned int reliable:2;
        int max_block;
        u8 *bbt;
+       loff_t oob_write_ofs;
+       int oob_autoecc;
+       u8 oob_write_buf[DOC_LAYOUT_OOB_SIZE];
        struct dentry *debugfs_root;
 };
 
index 45116bb302972ba5cbde40719778aa4cb065635e..706b847b46b30ae0d34b1ded35b9529495d9fbc2 100644 (file)
@@ -241,8 +241,7 @@ static void __init DoC_Probe(unsigned long physadr)
                        return;
                }
                docfound = 1;
-               mtd = kmalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
-
+               mtd = kzalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
                if (!mtd) {
                        printk(KERN_WARNING "Cannot allocate memory for data structures. Dropping.\n");
                        iounmap(docptr);
@@ -250,10 +249,6 @@ static void __init DoC_Probe(unsigned long physadr)
                }
 
                this = (struct DiskOnChip *)(&mtd[1]);
-
-               memset((char *)mtd,0, sizeof(struct mtd_info));
-               memset((char *)this, 0, sizeof(struct DiskOnChip));
-
                mtd->priv = this;
                this->virtadr = docptr;
                this->physadr = physadr;
index 884904d3f9d2deda9088046f8666e2f14b2aa664..7c60dddbefc0ba055551559c5f2046b10e37eecd 100644 (file)
@@ -992,7 +992,6 @@ static int __devexit m25p_remove(struct spi_device *spi)
 static struct spi_driver m25p80_driver = {
        .driver = {
                .name   = "m25p80",
-               .bus    = &spi_bus_type,
                .owner  = THIS_MODULE,
        },
        .id_table       = m25p_ids,
index d75c7af18a638d9f94588565e2d6b3271c95b05d..236057ead0d2ab2fcd5d57bff92e18a05e927223 100644 (file)
@@ -936,7 +936,6 @@ static int __devexit dataflash_remove(struct spi_device *spi)
 static struct spi_driver dataflash_driver = {
        .driver = {
                .name           = "mtd_dataflash",
-               .bus            = &spi_bus_type,
                .owner          = THIS_MODULE,
                .of_match_table = dataflash_dt_ids,
        },
index d38ef3bffe8d0723806835df2d1093a017757380..5fc198350b94b66ea27b5e7113f4f192c61c9dfd 100644 (file)
@@ -378,7 +378,7 @@ static int __devinit sst25l_probe(struct spi_device *spi)
        struct flash_info *flash_info;
        struct sst25l_flash *flash;
        struct flash_platform_data *data;
-       int ret, i;
+       int ret;
 
        flash_info = sst25l_match_device(spi);
        if (!flash_info)
@@ -444,7 +444,6 @@ static int __devexit sst25l_remove(struct spi_device *spi)
 static struct spi_driver sst25l_driver = {
        .driver = {
                .name   = "sst25l",
-               .bus    = &spi_bus_type,
                .owner  = THIS_MODULE,
        },
        .probe          = sst25l_probe,
index c7382bb686c679efef42dd5218cce645002512eb..19d637266fcd47026c26b6172b052f1becc23ec4 100644 (file)
@@ -168,8 +168,8 @@ static int scan_header(partition_t *part)
         (offset + sizeof(header)) < max_offset;
         offset += part->mbd.mtd->erasesize ? : 0x2000) {
 
-       err = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret,
-                             (unsigned char *)&header);
+       err = mtd_read(part->mbd.mtd, offset, sizeof(header), &ret,
+                       (unsigned char *)&header);
 
        if (err)
            return err;
@@ -224,8 +224,8 @@ static int build_maps(partition_t *part)
     for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) {
        offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN))
                      << part->header.EraseUnitSize);
-       ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &retval,
-                             (unsigned char *)&header);
+       ret = mtd_read(part->mbd.mtd, offset, sizeof(header), &retval,
+                       (unsigned char *)&header);
 
        if (ret)
            goto out_XferInfo;
@@ -289,9 +289,9 @@ static int build_maps(partition_t *part)
        part->EUNInfo[i].Deleted = 0;
        offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
 
-       ret = part->mbd.mtd->read(part->mbd.mtd, offset,
-                             part->BlocksPerUnit * sizeof(uint32_t), &retval,
-                             (unsigned char *)part->bam_cache);
+       ret = mtd_read(part->mbd.mtd, offset,
+                       part->BlocksPerUnit * sizeof(uint32_t), &retval,
+                       (unsigned char *)part->bam_cache);
 
        if (ret)
                goto out_bam_cache;
@@ -355,7 +355,7 @@ static int erase_xfer(partition_t *part,
     erase->len = 1 << part->header.EraseUnitSize;
     erase->priv = (u_long)part;
 
-    ret = part->mbd.mtd->erase(part->mbd.mtd, erase);
+    ret = mtd_erase(part->mbd.mtd, erase);
 
     if (!ret)
            xfer->EraseCount++;
@@ -422,8 +422,8 @@ static int prepare_xfer(partition_t *part, int i)
     header.LogicalEUN = cpu_to_le16(0xffff);
     header.EraseCount = cpu_to_le32(xfer->EraseCount);
 
-    ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset, sizeof(header),
-                          &retlen, (u_char *)&header);
+    ret = mtd_write(part->mbd.mtd, xfer->Offset, sizeof(header), &retlen,
+                    (u_char *)&header);
 
     if (ret) {
        return ret;
@@ -438,8 +438,8 @@ static int prepare_xfer(partition_t *part, int i)
 
     for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) {
 
-       ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
-                              &retlen, (u_char *)&ctl);
+       ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+                        (u_char *)&ctl);
 
        if (ret)
            return ret;
@@ -485,9 +485,9 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
 
        offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
 
-       ret = part->mbd.mtd->read(part->mbd.mtd, offset,
-                             part->BlocksPerUnit * sizeof(uint32_t),
-                             &retlen, (u_char *) (part->bam_cache));
+       ret = mtd_read(part->mbd.mtd, offset,
+                       part->BlocksPerUnit * sizeof(uint32_t), &retlen,
+                       (u_char *)(part->bam_cache));
 
        /* mark the cache bad, in case we get an error later */
        part->bam_index = 0xffff;
@@ -503,8 +503,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
     offset = xfer->Offset + 20; /* Bad! */
     unit = cpu_to_le16(0x7fff);
 
-    ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint16_t),
-                          &retlen, (u_char *) &unit);
+    ret = mtd_write(part->mbd.mtd, offset, sizeof(uint16_t), &retlen,
+                    (u_char *)&unit);
 
     if (ret) {
        printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n");
@@ -523,16 +523,16 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
            break;
        case BLOCK_DATA:
        case BLOCK_REPLACEMENT:
-           ret = part->mbd.mtd->read(part->mbd.mtd, src, SECTOR_SIZE,
-                        &retlen, (u_char *) buf);
+           ret = mtd_read(part->mbd.mtd, src, SECTOR_SIZE, &retlen,
+                           (u_char *)buf);
            if (ret) {
                printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n");
                return ret;
             }
 
 
-           ret = part->mbd.mtd->write(part->mbd.mtd, dest, SECTOR_SIZE,
-                        &retlen, (u_char *) buf);
+           ret = mtd_write(part->mbd.mtd, dest, SECTOR_SIZE, &retlen,
+                            (u_char *)buf);
            if (ret)  {
                printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n");
                return ret;
@@ -550,9 +550,11 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
     }
 
     /* Write the BAM to the transfer unit */
-    ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + le32_to_cpu(part->header.BAMOffset),
-                    part->BlocksPerUnit * sizeof(int32_t), &retlen,
-                   (u_char *)part->bam_cache);
+    ret = mtd_write(part->mbd.mtd,
+                    xfer->Offset + le32_to_cpu(part->header.BAMOffset),
+                    part->BlocksPerUnit * sizeof(int32_t),
+                    &retlen,
+                    (u_char *)part->bam_cache);
     if (ret) {
        printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n");
        return ret;
@@ -560,8 +562,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
 
 
     /* All clear? Then update the LogicalEUN again */
-    ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
-                          &retlen, (u_char *)&srcunitswap);
+    ret = mtd_write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
+                    &retlen, (u_char *)&srcunitswap);
 
     if (ret) {
        printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n");
@@ -648,8 +650,7 @@ static int reclaim_block(partition_t *part)
            if (queued) {
                pr_debug("ftl_cs: waiting for transfer "
                      "unit to be prepared...\n");
-               if (part->mbd.mtd->sync)
-                       part->mbd.mtd->sync(part->mbd.mtd);
+               mtd_sync(part->mbd.mtd);
            } else {
                static int ne = 0;
                if (++ne < 5)
@@ -747,10 +748,11 @@ static uint32_t find_free(partition_t *part)
        /* Invalidate cache */
        part->bam_index = 0xffff;
 
-       ret = part->mbd.mtd->read(part->mbd.mtd,
-                      part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
-                      part->BlocksPerUnit * sizeof(uint32_t),
-                      &retlen, (u_char *) (part->bam_cache));
+       ret = mtd_read(part->mbd.mtd,
+                       part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
+                       part->BlocksPerUnit * sizeof(uint32_t),
+                       &retlen,
+                       (u_char *)(part->bam_cache));
 
        if (ret) {
            printk(KERN_WARNING"ftl: Error reading BAM in find_free\n");
@@ -810,8 +812,8 @@ static int ftl_read(partition_t *part, caddr_t buffer,
        else {
            offset = (part->EUNInfo[log_addr / bsize].Offset
                          + (log_addr % bsize));
-           ret = part->mbd.mtd->read(part->mbd.mtd, offset, SECTOR_SIZE,
-                          &retlen, (u_char *) buffer);
+           ret = mtd_read(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
+                           (u_char *)buffer);
 
            if (ret) {
                printk(KERN_WARNING "Error reading MTD device in ftl_read()\n");
@@ -849,8 +851,8 @@ static int set_bam_entry(partition_t *part, uint32_t log_addr,
                  le32_to_cpu(part->header.BAMOffset));
 
 #ifdef PSYCHO_DEBUG
-    ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(uint32_t),
-                        &retlen, (u_char *)&old_addr);
+    ret = mtd_read(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+                   (u_char *)&old_addr);
     if (ret) {
        printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
        return ret;
@@ -886,8 +888,8 @@ static int set_bam_entry(partition_t *part, uint32_t log_addr,
 #endif
        part->bam_cache[blk] = le_virt_addr;
     }
-    ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
-                            &retlen, (u_char *)&le_virt_addr);
+    ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+                    (u_char *)&le_virt_addr);
 
     if (ret) {
        printk(KERN_NOTICE "ftl_cs: set_bam_entry() failed!\n");
@@ -946,8 +948,7 @@ static int ftl_write(partition_t *part, caddr_t buffer,
        part->EUNInfo[part->bam_index].Deleted++;
        offset = (part->EUNInfo[part->bam_index].Offset +
                      blk * SECTOR_SIZE);
-       ret = part->mbd.mtd->write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
-                                     buffer);
+       ret = mtd_write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, buffer);
 
        if (ret) {
            printk(KERN_NOTICE "ftl_cs: block write failed!\n");
index dd034efd18751c8aaded3779f8ba85df06110bc5..28646c95cfb845803fd983c6a83b489bf4130c41 100644 (file)
@@ -158,7 +158,7 @@ int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
        ops.oobbuf = buf;
        ops.datbuf = NULL;
 
-       res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+       res = mtd_read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
        *retlen = ops.oobretlen;
        return res;
 }
@@ -178,7 +178,7 @@ int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
        ops.oobbuf = buf;
        ops.datbuf = NULL;
 
-       res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+       res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
        *retlen = ops.oobretlen;
        return res;
 }
@@ -199,7 +199,7 @@ static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
        ops.datbuf = buf;
        ops.len = len;
 
-       res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+       res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
        *retlen = ops.retlen;
        return res;
 }
@@ -343,14 +343,17 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
                if (BlockMap[block] == BLOCK_NIL)
                        continue;
 
-               ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) +
-                               (block * SECTORSIZE), SECTORSIZE, &retlen,
-                               movebuf);
+               ret = mtd_read(mtd,
+                              (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
+                              SECTORSIZE,
+                              &retlen,
+                              movebuf);
                if (ret < 0 && !mtd_is_bitflip(ret)) {
-                       ret = mtd->read(mtd,
-                                       (inftl->EraseSize * BlockMap[block]) +
-                                       (block * SECTORSIZE), SECTORSIZE,
-                                       &retlen, movebuf);
+                       ret = mtd_read(mtd,
+                                      (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
+                                      SECTORSIZE,
+                                      &retlen,
+                                      movebuf);
                        if (ret != -EIO)
                                pr_debug("INFTL: error went away on retry?\n");
                }
@@ -914,7 +917,7 @@ foundit:
        } else {
                size_t retlen;
                loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
-               int ret = mtd->read(mtd, ptr, SECTORSIZE, &retlen, buffer);
+               int ret = mtd_read(mtd, ptr, SECTORSIZE, &retlen, buffer);
 
                /* Handle corrected bit flips gracefully */
                if (ret < 0 && !mtd_is_bitflip(ret))
index 2ff601f816cebd12b4210b199e85fbcbdfb424cf..4adc0374fb6b5736ba23cdc77f5f8ebb2bb7117b 100644 (file)
@@ -73,8 +73,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
                 * Check for BNAND header first. Then whinge if it's found
                 * but later checks fail.
                 */
-               ret = mtd->read(mtd, block * inftl->EraseSize,
-                               SECTORSIZE, &retlen, buf);
+               ret = mtd_read(mtd, block * inftl->EraseSize, SECTORSIZE,
+                              &retlen, buf);
                /* We ignore ret in case the ECC of the MediaHeader is invalid
                   (which is apparently acceptable) */
                if (retlen != SECTORSIZE) {
@@ -118,8 +118,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
                memcpy(mh, buf, sizeof(struct INFTLMediaHeader));
 
                /* Read the spare media header at offset 4096 */
-               mtd->read(mtd, block * inftl->EraseSize + 4096,
-                         SECTORSIZE, &retlen, buf);
+               mtd_read(mtd, block * inftl->EraseSize + 4096, SECTORSIZE,
+                        &retlen, buf);
                if (retlen != SECTORSIZE) {
                        printk(KERN_WARNING "INFTL: Unable to read spare "
                               "Media Header\n");
@@ -220,7 +220,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
                                 */
                                instr->addr = ip->Reserved0 * inftl->EraseSize;
                                instr->len = inftl->EraseSize;
-                               mtd->erase(mtd, instr);
+                               mtd_erase(mtd, instr);
                        }
                        if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) {
                                printk(KERN_WARNING "INFTL: Media Header "
@@ -306,7 +306,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
                        /* If any of the physical eraseblocks are bad, don't
                           use the unit. */
                        for (physblock = 0; physblock < inftl->EraseSize; physblock += inftl->mbd.mtd->erasesize) {
-                               if (inftl->mbd.mtd->block_isbad(inftl->mbd.mtd, i * inftl->EraseSize + physblock))
+                               if (mtd_block_isbad(inftl->mbd.mtd,
+                                                   i * inftl->EraseSize + physblock))
                                        inftl->PUtable[i] = BLOCK_RESERVED;
                        }
                }
@@ -342,7 +343,7 @@ static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
        int i;
 
        for (i = 0; i < len; i += SECTORSIZE) {
-               if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf))
+               if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
                        return -1;
                if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
                        return -1;
@@ -393,7 +394,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
           mark only the failed block in the bbt. */
        for (physblock = 0; physblock < inftl->EraseSize;
             physblock += instr->len, instr->addr += instr->len) {
-               mtd->erase(inftl->mbd.mtd, instr);
+               mtd_erase(inftl->mbd.mtd, instr);
 
                if (instr->state == MTD_ERASE_FAILED) {
                        printk(KERN_WARNING "INFTL: error while formatting block %d\n",
@@ -423,7 +424,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
 fail:
        /* could not format, update the bad block table (caller is responsible
           for setting the PUtable to BLOCK_RESERVED on failure) */
-       inftl->mbd.mtd->block_markbad(inftl->mbd.mtd, instr->addr);
+       mtd_block_markbad(inftl->mbd.mtd, instr->addr);
        return -1;
 }
 
index 1dca31d9a8b3dc189c06d4236e1d19bb77810a38..536bbceaeaad559868acf633a6cd994412af3c4a 100644 (file)
@@ -70,19 +70,12 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
        mtd->erase = lpddr_erase;
        mtd->write = lpddr_write_buffers;
        mtd->writev = lpddr_writev;
-       mtd->read_oob = NULL;
-       mtd->write_oob = NULL;
-       mtd->sync = NULL;
        mtd->lock = lpddr_lock;
        mtd->unlock = lpddr_unlock;
-       mtd->suspend = NULL;
-       mtd->resume = NULL;
        if (map_is_linear(map)) {
                mtd->point = lpddr_point;
                mtd->unpoint = lpddr_unpoint;
        }
-       mtd->block_isbad = NULL;
-       mtd->block_markbad = NULL;
        mtd->size = 1 << lpddr->qinfo->DevSizeShift;
        mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
        mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
index 8e0c4bf9f7fb4e3b2a56f75b540ea8355d01c9c9..6c5c431c64af3ed6fbf97d1bb763049bf119872b 100644 (file)
@@ -242,15 +242,6 @@ config MTD_NETtel
        help
          Support for flash chips on NETtel/SecureEdge/SnapGear boards.
 
-config MTD_BCM963XX
-        tristate "Map driver for Broadcom BCM963xx boards"
-        depends on BCM63XX
-       select MTD_MAP_BANK_WIDTH_2
-       select MTD_CFI_I1
-        help
-         Support for parsing CFE image tag and creating MTD partitions on
-         Broadcom BCM63xx boards.
-
 config MTD_LANTIQ
        tristate "Lantiq SoC NOR support"
        depends on LANTIQ
index 45dcb8b14f22f0b0cb3b4ed4d7ce85d447c4da79..68a9a91d344fbc65974e5978fb6ee4d6a7ee113e 100644 (file)
@@ -55,6 +55,5 @@ obj-$(CONFIG_MTD_BFIN_ASYNC)  += bfin-async-flash.o
 obj-$(CONFIG_MTD_RBTX4939)     += rbtx4939-flash.o
 obj-$(CONFIG_MTD_VMU)          += vmu-flash.o
 obj-$(CONFIG_MTD_GPIO_ADDR)    += gpio-addr-flash.o
-obj-$(CONFIG_MTD_BCM963XX)     += bcm963xx-flash.o
 obj-$(CONFIG_MTD_LATCH_ADDR)   += latch-addr-flash.o
 obj-$(CONFIG_MTD_LANTIQ)       += lantiq-flash.o
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
deleted file mode 100644 (file)
index 736ca10..0000000
+++ /dev/null
@@ -1,277 +0,0 @@
-/*
- * Copyright Â© 2006-2008  Florian Fainelli <florian@openwrt.org>
- *                       Mike Albon <malbon@openwrt.org>
- * Copyright Â© 2009-2010  Daniel Dickinson <openwrt@cshore.neomailbox.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/vmalloc.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include <asm/mach-bcm63xx/bcm963xx_tag.h>
-
-#define BCM63XX_BUSWIDTH       2               /* Buswidth */
-#define BCM63XX_EXTENDED_SIZE  0xBFC00000      /* Extended flash address */
-
-#define PFX KBUILD_MODNAME ": "
-
-static struct mtd_partition *parsed_parts;
-
-static struct mtd_info *bcm963xx_mtd_info;
-
-static struct map_info bcm963xx_map = {
-       .name           = "bcm963xx",
-       .bankwidth      = BCM63XX_BUSWIDTH,
-};
-
-static int parse_cfe_partitions(struct mtd_info *master,
-                                               struct mtd_partition **pparts)
-{
-       /* CFE, NVRAM and global Linux are always present */
-       int nrparts = 3, curpart = 0;
-       struct bcm_tag *buf;
-       struct mtd_partition *parts;
-       int ret;
-       size_t retlen;
-       unsigned int rootfsaddr, kerneladdr, spareaddr;
-       unsigned int rootfslen, kernellen, sparelen, totallen;
-       int namelen = 0;
-       int i;
-       char *boardid;
-       char *tagversion;
-
-       /* Allocate memory for buffer */
-       buf = vmalloc(sizeof(struct bcm_tag));
-       if (!buf)
-               return -ENOMEM;
-
-       /* Get the tag */
-       ret = master->read(master, master->erasesize, sizeof(struct bcm_tag),
-                                                       &retlen, (void *)buf);
-       if (retlen != sizeof(struct bcm_tag)) {
-               vfree(buf);
-               return -EIO;
-       }
-
-       sscanf(buf->kernel_address, "%u", &kerneladdr);
-       sscanf(buf->kernel_length, "%u", &kernellen);
-       sscanf(buf->total_length, "%u", &totallen);
-       tagversion = &(buf->tag_version[0]);
-       boardid = &(buf->board_id[0]);
-
-       printk(KERN_INFO PFX "CFE boot tag found with version %s "
-                               "and board type %s\n", tagversion, boardid);
-
-       kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
-       rootfsaddr = kerneladdr + kernellen;
-       spareaddr = roundup(totallen, master->erasesize) + master->erasesize;
-       sparelen = master->size - spareaddr - master->erasesize;
-       rootfslen = spareaddr - rootfsaddr;
-
-       /* Determine number of partitions */
-       namelen = 8;
-       if (rootfslen > 0) {
-               nrparts++;
-               namelen += 6;
-       };
-       if (kernellen > 0) {
-               nrparts++;
-               namelen += 6;
-       };
-
-       /* Ask kernel for more memory */
-       parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
-       if (!parts) {
-               vfree(buf);
-               return -ENOMEM;
-       };
-
-       /* Start building partition list */
-       parts[curpart].name = "CFE";
-       parts[curpart].offset = 0;
-       parts[curpart].size = master->erasesize;
-       curpart++;
-
-       if (kernellen > 0) {
-               parts[curpart].name = "kernel";
-               parts[curpart].offset = kerneladdr;
-               parts[curpart].size = kernellen;
-               curpart++;
-       };
-
-       if (rootfslen > 0) {
-               parts[curpart].name = "rootfs";
-               parts[curpart].offset = rootfsaddr;
-               parts[curpart].size = rootfslen;
-               if (sparelen > 0)
-                       parts[curpart].size += sparelen;
-               curpart++;
-       };
-
-       parts[curpart].name = "nvram";
-       parts[curpart].offset = master->size - master->erasesize;
-       parts[curpart].size = master->erasesize;
-
-       /* Global partition "linux" to make easy firmware upgrade */
-       curpart++;
-       parts[curpart].name = "linux";
-       parts[curpart].offset = parts[0].size;
-       parts[curpart].size = master->size - parts[0].size - parts[3].size;
-
-       for (i = 0; i < nrparts; i++)
-               printk(KERN_INFO PFX "Partition %d is %s offset %lx and "
-                                       "length %lx\n", i, parts[i].name,
-                                       (long unsigned int)(parts[i].offset),
-                                       (long unsigned int)(parts[i].size));
-
-       printk(KERN_INFO PFX "Spare partition is %x offset and length %x\n",
-                                                       spareaddr, sparelen);
-       *pparts = parts;
-       vfree(buf);
-
-       return nrparts;
-};
-
-static int bcm963xx_detect_cfe(struct mtd_info *master)
-{
-       int idoffset = 0x4e0;
-       static char idstring[8] = "CFE1CFE1";
-       char buf[9];
-       int ret;
-       size_t retlen;
-
-       ret = master->read(master, idoffset, 8, &retlen, (void *)buf);
-       buf[retlen] = 0;
-       printk(KERN_INFO PFX "Read Signature value of %s\n", buf);
-
-       return strncmp(idstring, buf, 8);
-}
-
-static int bcm963xx_probe(struct platform_device *pdev)
-{
-       int err = 0;
-       int parsed_nr_parts = 0;
-       char *part_type;
-       struct resource *r;
-
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r) {
-               dev_err(&pdev->dev, "no resource supplied\n");
-               return -ENODEV;
-       }
-
-       bcm963xx_map.phys = r->start;
-       bcm963xx_map.size = resource_size(r);
-       bcm963xx_map.virt = ioremap(r->start, resource_size(r));
-       if (!bcm963xx_map.virt) {
-               dev_err(&pdev->dev, "failed to ioremap\n");
-               return -EIO;
-       }
-
-       dev_info(&pdev->dev, "0x%08lx at 0x%08x\n",
-                                       bcm963xx_map.size, bcm963xx_map.phys);
-
-       simple_map_init(&bcm963xx_map);
-
-       bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map);
-       if (!bcm963xx_mtd_info) {
-               dev_err(&pdev->dev, "failed to probe using CFI\n");
-               bcm963xx_mtd_info = do_map_probe("jedec_probe", &bcm963xx_map);
-               if (bcm963xx_mtd_info)
-                       goto probe_ok;
-               dev_err(&pdev->dev, "failed to probe using JEDEC\n");
-               err = -EIO;
-               goto err_probe;
-       }
-
-probe_ok:
-       bcm963xx_mtd_info->owner = THIS_MODULE;
-
-       /* This is mutually exclusive */
-       if (bcm963xx_detect_cfe(bcm963xx_mtd_info) == 0) {
-               dev_info(&pdev->dev, "CFE bootloader detected\n");
-               if (parsed_nr_parts == 0) {
-                       int ret = parse_cfe_partitions(bcm963xx_mtd_info,
-                                                       &parsed_parts);
-                       if (ret > 0) {
-                               part_type = "CFE";
-                               parsed_nr_parts = ret;
-                       }
-               }
-       } else {
-               dev_info(&pdev->dev, "unsupported bootloader\n");
-               err = -ENODEV;
-               goto err_probe;
-       }
-
-       return mtd_device_register(bcm963xx_mtd_info, parsed_parts,
-                                  parsed_nr_parts);
-
-err_probe:
-       iounmap(bcm963xx_map.virt);
-       return err;
-}
-
-static int bcm963xx_remove(struct platform_device *pdev)
-{
-       if (bcm963xx_mtd_info) {
-               mtd_device_unregister(bcm963xx_mtd_info);
-               map_destroy(bcm963xx_mtd_info);
-       }
-
-       if (bcm963xx_map.virt) {
-               iounmap(bcm963xx_map.virt);
-               bcm963xx_map.virt = 0;
-       }
-
-       return 0;
-}
-
-static struct platform_driver bcm63xx_mtd_dev = {
-       .probe  = bcm963xx_probe,
-       .remove = bcm963xx_remove,
-       .driver = {
-               .name   = "bcm963xx-flash",
-               .owner  = THIS_MODULE,
-       },
-};
-
-static int __init bcm963xx_mtd_init(void)
-{
-       return platform_driver_register(&bcm63xx_mtd_dev);
-}
-
-static void __exit bcm963xx_mtd_exit(void)
-{
-       platform_driver_unregister(&bcm63xx_mtd_dev);
-}
-
-module_init(bcm963xx_mtd_init);
-module_exit(bcm963xx_mtd_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Broadcom BCM63xx MTD driver for CFE and RedBoot");
-MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
-MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
-MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
index 6d6b2b5674ee6f4f27282e8356b301446a66c3eb..650126c361f18865d9e29b346fdeb8650208f9b6 100644 (file)
@@ -190,17 +190,7 @@ static struct platform_driver bfin_flash_driver = {
        },
 };
 
-static int __init bfin_flash_init(void)
-{
-       return platform_driver_register(&bfin_flash_driver);
-}
-module_init(bfin_flash_init);
-
-static void __exit bfin_flash_exit(void)
-{
-       platform_driver_unregister(&bfin_flash_driver);
-}
-module_exit(bfin_flash_exit);
+module_platform_driver(bfin_flash_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("MTD map driver for Blackfins with flash/ethernet on same async bank");
index 1ec66f031c51655bb4904c985565f47fc2fc3bfe..33cce895859f0f8959cea03f06f28d24d9567b26 100644 (file)
@@ -279,17 +279,7 @@ static struct platform_driver gpio_flash_driver = {
        },
 };
 
-static int __init gpio_flash_init(void)
-{
-       return platform_driver_register(&gpio_flash_driver);
-}
-module_init(gpio_flash_init);
-
-static void __exit gpio_flash_exit(void)
-{
-       platform_driver_unregister(&gpio_flash_driver);
-}
-module_exit(gpio_flash_exit);
+module_platform_driver(gpio_flash_driver);
 
 MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
 MODULE_DESCRIPTION("MTD map driver for flashes addressed physically and with gpios");
index 437fcd2f352f8095b658cc4a9d1ef28cc01eddf5..fc7d4d0d9a4e4c31532577705f65209ffc1dbc7b 100644 (file)
@@ -246,18 +246,8 @@ static struct platform_driver ixp2000_flash_driver = {
        },
 };
 
-static int __init ixp2000_flash_init(void)
-{
-       return platform_driver_register(&ixp2000_flash_driver);
-}
-
-static void __exit ixp2000_flash_exit(void)
-{
-       platform_driver_unregister(&ixp2000_flash_driver);
-}
+module_platform_driver(ixp2000_flash_driver);
 
-module_init(ixp2000_flash_init);
-module_exit(ixp2000_flash_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
 MODULE_ALIAS("platform:IXP2000-Flash");
index 30409015a3de1d67e9d37c2ce345b0a837a28006..8b5410162d70edecfa94277f361c9e5bf28c782a 100644 (file)
@@ -270,19 +270,7 @@ static struct platform_driver ixp4xx_flash_driver = {
        },
 };
 
-static int __init ixp4xx_flash_init(void)
-{
-       return platform_driver_register(&ixp4xx_flash_driver);
-}
-
-static void __exit ixp4xx_flash_exit(void)
-{
-       platform_driver_unregister(&ixp4xx_flash_driver);
-}
-
-
-module_init(ixp4xx_flash_init);
-module_exit(ixp4xx_flash_exit);
+module_platform_driver(ixp4xx_flash_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems");
index 4f10e27ada551d12f77cfd4d72b02f15ef9c5c8d..7b889de9477bcf4b0bf6c9936c38e6246e039a3a 100644 (file)
@@ -159,7 +159,7 @@ ltq_mtd_probe(struct platform_device *pdev)
        if (!ltq_mtd->mtd) {
                dev_err(&pdev->dev, "probing failed\n");
                err = -ENXIO;
-               goto err_unmap;
+               goto err_free;
        }
 
        ltq_mtd->mtd->owner = THIS_MODULE;
@@ -179,8 +179,6 @@ ltq_mtd_probe(struct platform_device *pdev)
 
 err_destroy:
        map_destroy(ltq_mtd->mtd);
-err_unmap:
-       iounmap(ltq_mtd->map->virt);
 err_free:
        kfree(ltq_mtd->map);
 err_out:
@@ -198,8 +196,6 @@ ltq_mtd_remove(struct platform_device *pdev)
                        mtd_device_unregister(ltq_mtd->mtd);
                        map_destroy(ltq_mtd->mtd);
                }
-               if (ltq_mtd->map->virt)
-                       iounmap(ltq_mtd->map->virt);
                kfree(ltq_mtd->map);
                kfree(ltq_mtd);
        }
index 119baa7d74773088ea0f07ffd316b6ee2c807b7d..8fed58e3a4a808e9423f3e36b02eeb7e5b5276e2 100644 (file)
@@ -223,17 +223,7 @@ static struct platform_driver latch_addr_flash_driver = {
        },
 };
 
-static int __init latch_addr_flash_init(void)
-{
-       return platform_driver_register(&latch_addr_flash_driver);
-}
-module_init(latch_addr_flash_init);
-
-static void __exit latch_addr_flash_exit(void)
-{
-       platform_driver_unregister(&latch_addr_flash_driver);
-}
-module_exit(latch_addr_flash_exit);
+module_platform_driver(latch_addr_flash_driver);
 
 MODULE_AUTHOR("David Griego <dgriego@mvista.com>");
 MODULE_DESCRIPTION("MTD map driver for flashes addressed physically with upper "
index 66e8200079c2c25633db8f662967825575b89915..abc562653b31777d4ea6d4217335d14b64ec8b15 100644 (file)
@@ -85,6 +85,7 @@ static int physmap_flash_probe(struct platform_device *dev)
        struct physmap_flash_data *physmap_data;
        struct physmap_flash_info *info;
        const char **probe_type;
+       const char **part_types;
        int err = 0;
        int i;
        int devices_found = 0;
@@ -171,7 +172,9 @@ static int physmap_flash_probe(struct platform_device *dev)
        if (err)
                goto err_out;
 
-       mtd_device_parse_register(info->cmtd, part_probe_types, 0,
+       part_types = physmap_data->part_probe_types ? : part_probe_types;
+
+       mtd_device_parse_register(info->cmtd, part_types, 0,
                                  physmap_data->parts, physmap_data->nr_parts);
        return 0;
 
@@ -187,9 +190,8 @@ static void physmap_flash_shutdown(struct platform_device *dev)
        int i;
 
        for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
-               if (info->mtd[i]->suspend && info->mtd[i]->resume)
-                       if (info->mtd[i]->suspend(info->mtd[i]) == 0)
-                               info->mtd[i]->resume(info->mtd[i]);
+               if (mtd_suspend(info->mtd[i]) == 0)
+                       mtd_resume(info->mtd[i]);
 }
 #else
 #define physmap_flash_shutdown NULL
index 7d65f9d3e6902e442003dfd7455953d1199c2d9a..2e6fb6831d553802400862b3ed0356e864405ab9 100644 (file)
@@ -338,18 +338,7 @@ static struct platform_driver of_flash_driver = {
        .remove         = of_flash_remove,
 };
 
-static int __init of_flash_init(void)
-{
-       return platform_driver_register(&of_flash_driver);
-}
-
-static void __exit of_flash_exit(void)
-{
-       platform_driver_unregister(&of_flash_driver);
-}
-
-module_init(of_flash_init);
-module_exit(of_flash_exit);
+module_platform_driver(of_flash_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Vitaly Wool <vwool@ru.mvista.com>");
index 2a25b6789af4d5633ee7fcadf104fc08e9183928..436d121185b15a230878f6091ab5041270fe6d92 100644 (file)
@@ -125,8 +125,8 @@ static void pxa2xx_flash_shutdown(struct platform_device *dev)
 {
        struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
 
-       if (info && info->mtd->suspend(info->mtd) == 0)
-               info->mtd->resume(info->mtd);
+       if (info && mtd_suspend(info->mtd) == 0)
+               mtd_resume(info->mtd);
 }
 #else
 #define pxa2xx_flash_shutdown NULL
@@ -142,18 +142,7 @@ static struct platform_driver pxa2xx_flash_driver = {
        .shutdown       = pxa2xx_flash_shutdown,
 };
 
-static int __init init_pxa2xx_flash(void)
-{
-       return platform_driver_register(&pxa2xx_flash_driver);
-}
-
-static void __exit cleanup_pxa2xx_flash(void)
-{
-       platform_driver_unregister(&pxa2xx_flash_driver);
-}
-
-module_init(init_pxa2xx_flash);
-module_exit(cleanup_pxa2xx_flash);
+module_platform_driver(pxa2xx_flash_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>");
index 0237f197fd1238f553732f73a2db64ddba98c0f6..3da63fc6f16eb4faf522fdaa5b230b18c70c7354 100644 (file)
@@ -119,9 +119,8 @@ static void rbtx4939_flash_shutdown(struct platform_device *dev)
 {
        struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
 
-       if (info->mtd->suspend && info->mtd->resume)
-               if (info->mtd->suspend(info->mtd) == 0)
-                       info->mtd->resume(info->mtd);
+       if (mtd_suspend(info->mtd) == 0)
+               mtd_resume(info->mtd);
 }
 #else
 #define rbtx4939_flash_shutdown NULL
@@ -137,18 +136,7 @@ static struct platform_driver rbtx4939_flash_driver = {
        },
 };
 
-static int __init rbtx4939_flash_init(void)
-{
-       return platform_driver_register(&rbtx4939_flash_driver);
-}
-
-static void __exit rbtx4939_flash_exit(void)
-{
-       platform_driver_unregister(&rbtx4939_flash_driver);
-}
-
-module_init(rbtx4939_flash_init);
-module_exit(rbtx4939_flash_exit);
+module_platform_driver(rbtx4939_flash_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("RBTX4939 MTD map driver");
index fa9c0a9670cd88a4e050ba65f0d70f51fee9f3d7..50282199770742eb6d78cb5701741623a3b4689a 100644 (file)
@@ -377,8 +377,8 @@ static int __exit sa1100_mtd_remove(struct platform_device *pdev)
 static void sa1100_mtd_shutdown(struct platform_device *dev)
 {
        struct sa_info *info = platform_get_drvdata(dev);
-       if (info && info->mtd->suspend(info->mtd) == 0)
-               info->mtd->resume(info->mtd);
+       if (info && mtd_suspend(info->mtd) == 0)
+               mtd_resume(info->mtd);
 }
 #else
 #define sa1100_mtd_shutdown NULL
@@ -394,18 +394,7 @@ static struct platform_driver sa1100_mtd_driver = {
        },
 };
 
-static int __init sa1100_mtd_init(void)
-{
-       return platform_driver_register(&sa1100_mtd_driver);
-}
-
-static void __exit sa1100_mtd_exit(void)
-{
-       platform_driver_unregister(&sa1100_mtd_driver);
-}
-
-module_init(sa1100_mtd_init);
-module_exit(sa1100_mtd_exit);
+module_platform_driver(sa1100_mtd_driver);
 
 MODULE_AUTHOR("Nicolas Pitre");
 MODULE_DESCRIPTION("SA1100 CFI map driver");
index d88c8426bb0fe7a4032fbdd8d3cee879542c59d2..934a72c8007880407247915d1d814225c45d65f2 100644 (file)
@@ -204,8 +204,7 @@ scb2_flash_remove(struct pci_dev *dev)
                return;
 
        /* disable flash writes */
-       if (scb2_mtd->lock)
-               scb2_mtd->lock(scb2_mtd, 0, scb2_mtd->size);
+       mtd_lock(scb2_mtd, 0, scb2_mtd->size);
 
        mtd_device_unregister(scb2_mtd);
        map_destroy(scb2_mtd);
index 2d66234f57cb471e3c9700b5fc35ad1e1c9b6e5f..175e537b444fc5d578412c47dbb5022f26382aec 100644 (file)
@@ -158,15 +158,4 @@ static struct platform_driver uflash_driver = {
        .remove         = __devexit_p(uflash_remove),
 };
 
-static int __init uflash_init(void)
-{
-       return platform_driver_register(&uflash_driver);
-}
-
-static void __exit uflash_exit(void)
-{
-       platform_driver_unregister(&uflash_driver);
-}
-
-module_init(uflash_init);
-module_exit(uflash_exit);
+module_platform_driver(uflash_driver);
index ed8b5e744b12abc15b0db0189c972b3331bd1f0b..424ca5f93c6c37f74304f04a2e393523ae453f7a 100644 (file)
@@ -215,7 +215,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
 
        mutex_lock(&dev->lock);
 
-       if (dev->open++)
+       if (dev->open)
                goto unlock;
 
        kref_get(&dev->ref);
@@ -235,6 +235,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
                goto error_release;
 
 unlock:
+       dev->open++;
        mutex_unlock(&dev->lock);
        blktrans_dev_put(dev);
        return ret;
index 7c1dc908a17475eeb1f610deb8f36505e5b2e439..af6591237b9b51f22022e61a653fa7a5d8e29845 100644 (file)
@@ -85,7 +85,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
        set_current_state(TASK_INTERRUPTIBLE);
        add_wait_queue(&wait_q, &wait);
 
-       ret = mtd->erase(mtd, &erase);
+       ret = mtd_erase(mtd, &erase);
        if (ret) {
                set_current_state(TASK_RUNNING);
                remove_wait_queue(&wait_q, &wait);
@@ -102,7 +102,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
         * Next, write the data to flash.
         */
 
-       ret = mtd->write(mtd, pos, len, &retlen, buf);
+       ret = mtd_write(mtd, pos, len, &retlen, buf);
        if (ret)
                return ret;
        if (retlen != len)
@@ -152,7 +152,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
                mtd->name, pos, len);
 
        if (!sect_size)
-               return mtd->write(mtd, pos, len, &retlen, buf);
+               return mtd_write(mtd, pos, len, &retlen, buf);
 
        while (len > 0) {
                unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -184,8 +184,8 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
                            mtdblk->cache_offset != sect_start) {
                                /* fill the cache with the current sector */
                                mtdblk->cache_state = STATE_EMPTY;
-                               ret = mtd->read(mtd, sect_start, sect_size,
-                                               &retlen, mtdblk->cache_data);
+                               ret = mtd_read(mtd, sect_start, sect_size,
+                                              &retlen, mtdblk->cache_data);
                                if (ret)
                                        return ret;
                                if (retlen != sect_size)
@@ -222,7 +222,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
                        mtd->name, pos, len);
 
        if (!sect_size)
-               return mtd->read(mtd, pos, len, &retlen, buf);
+               return mtd_read(mtd, pos, len, &retlen, buf);
 
        while (len > 0) {
                unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -241,7 +241,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
                    mtdblk->cache_offset == sect_start) {
                        memcpy (buf, mtdblk->cache_data + offset, size);
                } else {
-                       ret = mtd->read(mtd, pos, size, &retlen, buf);
+                       ret = mtd_read(mtd, pos, size, &retlen, buf);
                        if (ret)
                                return ret;
                        if (retlen != size)
@@ -322,8 +322,7 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
 
        if (!--mtdblk->count) {
                /* It was the last usage. Free the cache */
-               if (mbd->mtd->sync)
-                       mbd->mtd->sync(mbd->mtd);
+               mtd_sync(mbd->mtd);
                vfree(mtdblk->cache_data);
        }
 
@@ -341,9 +340,7 @@ static int mtdblock_flush(struct mtd_blktrans_dev *dev)
        mutex_lock(&mtdblk->cache_mutex);
        write_cached_data(mtdblk);
        mutex_unlock(&mtdblk->cache_mutex);
-
-       if (dev->mtd->sync)
-               dev->mtd->sync(dev->mtd);
+       mtd_sync(dev->mtd);
        return 0;
 }
 
index 0470a6e863096779a85b26200a133c4158d7f5a5..92759a9d2985e8a478b6a05734db195453d39434 100644 (file)
@@ -30,7 +30,7 @@ static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
 {
        size_t retlen;
 
-       if (dev->mtd->read(dev->mtd, (block * 512), 512, &retlen, buf))
+       if (mtd_read(dev->mtd, (block * 512), 512, &retlen, buf))
                return 1;
        return 0;
 }
@@ -40,7 +40,7 @@ static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
 {
        size_t retlen;
 
-       if (dev->mtd->write(dev->mtd, (block * 512), 512, &retlen, buf))
+       if (mtd_write(dev->mtd, (block * 512), 512, &retlen, buf))
                return 1;
        return 0;
 }
index e7dc732ddabc6588226211b8fc23e2799e37d628..50c6a1e7f675cea884a5a0bbb71fb061bfdfe932 100644 (file)
@@ -51,7 +51,7 @@ struct mtd_file_info {
        enum mtd_file_modes mode;
 };
 
-static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
+static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
 {
        struct mtd_file_info *mfi = file->private_data;
        struct mtd_info *mtd = mfi->mtd;
@@ -77,7 +77,7 @@ static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
 
 
 
-static int mtd_open(struct inode *inode, struct file *file)
+static int mtdchar_open(struct inode *inode, struct file *file)
 {
        int minor = iminor(inode);
        int devnum = minor >> 1;
@@ -142,11 +142,11 @@ static int mtd_open(struct inode *inode, struct file *file)
 out:
        mutex_unlock(&mtd_mutex);
        return ret;
-} /* mtd_open */
+} /* mtdchar_open */
 
 /*====================================================================*/
 
-static int mtd_close(struct inode *inode, struct file *file)
+static int mtdchar_close(struct inode *inode, struct file *file)
 {
        struct mtd_file_info *mfi = file->private_data;
        struct mtd_info *mtd = mfi->mtd;
@@ -154,8 +154,8 @@ static int mtd_close(struct inode *inode, struct file *file)
        pr_debug("MTD_close\n");
 
        /* Only sync if opened RW */
-       if ((file->f_mode & FMODE_WRITE) && mtd->sync)
-               mtd->sync(mtd);
+       if ((file->f_mode & FMODE_WRITE))
+               mtd_sync(mtd);
 
        iput(mfi->ino);
 
@@ -164,7 +164,7 @@ static int mtd_close(struct inode *inode, struct file *file)
        kfree(mfi);
 
        return 0;
-} /* mtd_close */
+} /* mtdchar_close */
 
 /* Back in June 2001, dwmw2 wrote:
  *
@@ -184,11 +184,12 @@ static int mtd_close(struct inode *inode, struct file *file)
  * alignment requirements are not met in the NAND subdriver.
  */
 
-static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
+static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
+                       loff_t *ppos)
 {
        struct mtd_file_info *mfi = file->private_data;
        struct mtd_info *mtd = mfi->mtd;
-       size_t retlen=0;
+       size_t retlen;
        size_t total_retlen=0;
        int ret=0;
        int len;
@@ -212,10 +213,12 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
 
                switch (mfi->mode) {
                case MTD_FILE_MODE_OTP_FACTORY:
-                       ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+                       ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
+                                                    &retlen, kbuf);
                        break;
                case MTD_FILE_MODE_OTP_USER:
-                       ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+                       ret = mtd_read_user_prot_reg(mtd, *ppos, len,
+                                                    &retlen, kbuf);
                        break;
                case MTD_FILE_MODE_RAW:
                {
@@ -226,12 +229,12 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
                        ops.oobbuf = NULL;
                        ops.len = len;
 
-                       ret = mtd->read_oob(mtd, *ppos, &ops);
+                       ret = mtd_read_oob(mtd, *ppos, &ops);
                        retlen = ops.retlen;
                        break;
                }
                default:
-                       ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
+                       ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
                }
                /* Nand returns -EBADMSG on ECC errors, but it returns
                 * the data. For our userspace tools it is important
@@ -265,9 +268,10 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
 
        kfree(kbuf);
        return total_retlen;
-} /* mtd_read */
+} /* mtdchar_read */
 
-static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
+static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
+                       loff_t *ppos)
 {
        struct mtd_file_info *mfi = file->private_data;
        struct mtd_info *mtd = mfi->mtd;
@@ -306,11 +310,8 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
                        ret = -EROFS;
                        break;
                case MTD_FILE_MODE_OTP_USER:
-                       if (!mtd->write_user_prot_reg) {
-                               ret = -EOPNOTSUPP;
-                               break;
-                       }
-                       ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+                       ret = mtd_write_user_prot_reg(mtd, *ppos, len,
+                                                     &retlen, kbuf);
                        break;
 
                case MTD_FILE_MODE_RAW:
@@ -323,13 +324,13 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
                        ops.ooboffs = 0;
                        ops.len = len;
 
-                       ret = mtd->write_oob(mtd, *ppos, &ops);
+                       ret = mtd_write_oob(mtd, *ppos, &ops);
                        retlen = ops.retlen;
                        break;
                }
 
                default:
-                       ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
+                       ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
                }
                if (!ret) {
                        *ppos += retlen;
@@ -345,7 +346,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
 
        kfree(kbuf);
        return total_retlen;
-} /* mtd_write */
+} /* mtdchar_write */
 
 /*======================================================================
 
@@ -361,20 +362,22 @@ static void mtdchar_erase_callback (struct erase_info *instr)
 static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
 {
        struct mtd_info *mtd = mfi->mtd;
+       size_t retlen;
        int ret = 0;
 
+       /*
+        * Make a fake call to mtd_read_fact_prot_reg() to check if OTP
+        * operations are supported.
+        */
+       if (mtd_read_fact_prot_reg(mtd, -1, -1, &retlen, NULL) == -EOPNOTSUPP)
+               return -EOPNOTSUPP;
+
        switch (mode) {
        case MTD_OTP_FACTORY:
-               if (!mtd->read_fact_prot_reg)
-                       ret = -EOPNOTSUPP;
-               else
-                       mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
+               mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
                break;
        case MTD_OTP_USER:
-               if (!mtd->read_fact_prot_reg)
-                       ret = -EOPNOTSUPP;
-               else
-                       mfi->mode = MTD_FILE_MODE_OTP_USER;
+               mfi->mode = MTD_FILE_MODE_OTP_USER;
                break;
        default:
                ret = -EINVAL;
@@ -387,7 +390,7 @@ static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
 # define otp_select_filemode(f,m)      -EOPNOTSUPP
 #endif
 
-static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
+static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
        uint64_t start, uint32_t length, void __user *ptr,
        uint32_t __user *retp)
 {
@@ -424,7 +427,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
                return PTR_ERR(ops.oobbuf);
 
        start &= ~((uint64_t)mtd->writesize - 1);
-       ret = mtd->write_oob(mtd, start, &ops);
+       ret = mtd_write_oob(mtd, start, &ops);
 
        if (ops.oobretlen > 0xFFFFFFFFU)
                ret = -EOVERFLOW;
@@ -436,7 +439,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
        return ret;
 }
 
-static int mtd_do_readoob(struct file *file, struct mtd_info *mtd,
+static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
        uint64_t start, uint32_t length, void __user *ptr,
        uint32_t __user *retp)
 {
@@ -447,13 +450,8 @@ static int mtd_do_readoob(struct file *file, struct mtd_info *mtd,
        if (length > 4096)
                return -EINVAL;
 
-       if (!mtd->read_oob)
-               ret = -EOPNOTSUPP;
-       else
-               ret = access_ok(VERIFY_WRITE, ptr,
-                               length) ? 0 : -EFAULT;
-       if (ret)
-               return ret;
+       if (!access_ok(VERIFY_WRITE, ptr, length))
+               return -EFAULT;
 
        ops.ooblen = length;
        ops.ooboffs = start & (mtd->writesize - 1);
@@ -469,7 +467,7 @@ static int mtd_do_readoob(struct file *file, struct mtd_info *mtd,
                return -ENOMEM;
 
        start &= ~((uint64_t)mtd->writesize - 1);
-       ret = mtd->read_oob(mtd, start, &ops);
+       ret = mtd_read_oob(mtd, start, &ops);
 
        if (put_user(ops.oobretlen, retp))
                ret = -EFAULT;
@@ -530,7 +528,7 @@ static int shrink_ecclayout(const struct nand_ecclayout *from,
        return 0;
 }
 
-static int mtd_blkpg_ioctl(struct mtd_info *mtd,
+static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
                           struct blkpg_ioctl_arg __user *arg)
 {
        struct blkpg_ioctl_arg a;
@@ -566,7 +564,7 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
        }
 }
 
-static int mtd_write_ioctl(struct mtd_info *mtd,
+static int mtdchar_write_ioctl(struct mtd_info *mtd,
                struct mtd_write_req __user *argp)
 {
        struct mtd_write_req req;
@@ -607,7 +605,7 @@ static int mtd_write_ioctl(struct mtd_info *mtd,
                ops.oobbuf = NULL;
        }
 
-       ret = mtd->write_oob(mtd, (loff_t)req.start, &ops);
+       ret = mtd_write_oob(mtd, (loff_t)req.start, &ops);
 
        kfree(ops.datbuf);
        kfree(ops.oobbuf);
@@ -615,7 +613,7 @@ static int mtd_write_ioctl(struct mtd_info *mtd,
        return ret;
 }
 
-static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
+static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
 {
        struct mtd_file_info *mfi = file->private_data;
        struct mtd_info *mtd = mfi->mtd;
@@ -729,7 +727,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
                          wq_head is no longer there when the
                          callback routine tries to wake us up.
                        */
-                       ret = mtd->erase(mtd, erase);
+                       ret = mtd_erase(mtd, erase);
                        if (!ret) {
                                set_current_state(TASK_UNINTERRUPTIBLE);
                                add_wait_queue(&waitq, &wait);
@@ -755,7 +753,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
                if (copy_from_user(&buf, argp, sizeof(buf)))
                        ret = -EFAULT;
                else
-                       ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+                       ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
                                buf.ptr, &buf_user->length);
                break;
        }
@@ -769,7 +767,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
                if (copy_from_user(&buf, argp, sizeof(buf)))
                        ret = -EFAULT;
                else
-                       ret = mtd_do_readoob(file, mtd, buf.start, buf.length,
+                       ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
                                buf.ptr, &buf_user->start);
                break;
        }
@@ -782,7 +780,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
                if (copy_from_user(&buf, argp, sizeof(buf)))
                        ret = -EFAULT;
                else
-                       ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+                       ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
                                (void __user *)(uintptr_t)buf.usr_ptr,
                                &buf_user->length);
                break;
@@ -796,7 +794,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
                if (copy_from_user(&buf, argp, sizeof(buf)))
                        ret = -EFAULT;
                else
-                       ret = mtd_do_readoob(file, mtd, buf.start, buf.length,
+                       ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
                                (void __user *)(uintptr_t)buf.usr_ptr,
                                &buf_user->length);
                break;
@@ -804,7 +802,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
 
        case MEMWRITE:
        {
-               ret = mtd_write_ioctl(mtd,
+               ret = mtdchar_write_ioctl(mtd,
                      (struct mtd_write_req __user *)arg);
                break;
        }
@@ -816,10 +814,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
                if (copy_from_user(&einfo, argp, sizeof(einfo)))
                        return -EFAULT;
 
-               if (!mtd->lock)
-                       ret = -EOPNOTSUPP;
-               else
-                       ret = mtd->lock(mtd, einfo.start, einfo.length);
+               ret = mtd_lock(mtd, einfo.start, einfo.length);
                break;
        }
 
@@ -830,10 +825,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
                if (copy_from_user(&einfo, argp, sizeof(einfo)))
                        return -EFAULT;
 
-               if (!mtd->unlock)
-                       ret = -EOPNOTSUPP;
-               else
-                       ret = mtd->unlock(mtd, einfo.start, einfo.length);
+               ret = mtd_unlock(mtd, einfo.start, einfo.length);
                break;
        }
 
@@ -844,10 +836,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
                if (copy_from_user(&einfo, argp, sizeof(einfo)))
                        return -EFAULT;
 
-               if (!mtd->is_locked)
-                       ret = -EOPNOTSUPP;
-               else
-                       ret = mtd->is_locked(mtd, einfo.start, einfo.length);
+               ret = mtd_is_locked(mtd, einfo.start, einfo.length);
                break;
        }
 
@@ -878,10 +867,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
 
                if (copy_from_user(&offs, argp, sizeof(loff_t)))
                        return -EFAULT;
-               if (!mtd->block_isbad)
-                       ret = -EOPNOTSUPP;
-               else
-                       return mtd->block_isbad(mtd, offs);
+               return mtd_block_isbad(mtd, offs);
                break;
        }
 
@@ -891,10 +877,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
 
                if (copy_from_user(&offs, argp, sizeof(loff_t)))
                        return -EFAULT;
-               if (!mtd->block_markbad)
-                       ret = -EOPNOTSUPP;
-               else
-                       return mtd->block_markbad(mtd, offs);
+               return mtd_block_markbad(mtd, offs);
                break;
        }
 
@@ -919,17 +902,15 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
                struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
                if (!buf)
                        return -ENOMEM;
-               ret = -EOPNOTSUPP;
                switch (mfi->mode) {
                case MTD_FILE_MODE_OTP_FACTORY:
-                       if (mtd->get_fact_prot_info)
-                               ret = mtd->get_fact_prot_info(mtd, buf, 4096);
+                       ret = mtd_get_fact_prot_info(mtd, buf, 4096);
                        break;
                case MTD_FILE_MODE_OTP_USER:
-                       if (mtd->get_user_prot_info)
-                               ret = mtd->get_user_prot_info(mtd, buf, 4096);
+                       ret = mtd_get_user_prot_info(mtd, buf, 4096);
                        break;
                default:
+                       ret = -EINVAL;
                        break;
                }
                if (ret >= 0) {
@@ -953,9 +934,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
                        return -EINVAL;
                if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
                        return -EFAULT;
-               if (!mtd->lock_user_prot_reg)
-                       return -EOPNOTSUPP;
-               ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
+               ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
                break;
        }
 #endif
@@ -999,7 +978,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
                        break;
 
                case MTD_FILE_MODE_RAW:
-                       if (!mtd->read_oob || !mtd->write_oob)
+                       if (!mtd_has_oob(mtd))
                                return -EOPNOTSUPP;
                        mfi->mode = arg;
 
@@ -1014,7 +993,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
 
        case BLKPG:
        {
-               ret = mtd_blkpg_ioctl(mtd,
+               ret = mtdchar_blkpg_ioctl(mtd,
                      (struct blkpg_ioctl_arg __user *)arg);
                break;
        }
@@ -1033,12 +1012,12 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
        return ret;
 } /* memory_ioctl */
 
-static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
+static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
 {
        int ret;
 
        mutex_lock(&mtd_mutex);
-       ret = mtd_ioctl(file, cmd, arg);
+       ret = mtdchar_ioctl(file, cmd, arg);
        mutex_unlock(&mtd_mutex);
 
        return ret;
@@ -1055,7 +1034,7 @@ struct mtd_oob_buf32 {
 #define MEMWRITEOOB32          _IOWR('M', 3, struct mtd_oob_buf32)
 #define MEMREADOOB32           _IOWR('M', 4, struct mtd_oob_buf32)
 
-static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
+static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
        unsigned long arg)
 {
        struct mtd_file_info *mfi = file->private_data;
@@ -1074,7 +1053,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
                if (copy_from_user(&buf, argp, sizeof(buf)))
                        ret = -EFAULT;
                else
-                       ret = mtd_do_writeoob(file, mtd, buf.start,
+                       ret = mtdchar_writeoob(file, mtd, buf.start,
                                buf.length, compat_ptr(buf.ptr),
                                &buf_user->length);
                break;
@@ -1089,13 +1068,13 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
                if (copy_from_user(&buf, argp, sizeof(buf)))
                        ret = -EFAULT;
                else
-                       ret = mtd_do_readoob(file, mtd, buf.start,
+                       ret = mtdchar_readoob(file, mtd, buf.start,
                                buf.length, compat_ptr(buf.ptr),
                                &buf_user->start);
                break;
        }
        default:
-               ret = mtd_ioctl(file, cmd, (unsigned long)argp);
+               ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
        }
 
        mutex_unlock(&mtd_mutex);
@@ -1111,7 +1090,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
  *   mappings)
  */
 #ifndef CONFIG_MMU
-static unsigned long mtd_get_unmapped_area(struct file *file,
+static unsigned long mtdchar_get_unmapped_area(struct file *file,
                                           unsigned long addr,
                                           unsigned long len,
                                           unsigned long pgoff,
@@ -1119,32 +1098,28 @@ static unsigned long mtd_get_unmapped_area(struct file *file,
 {
        struct mtd_file_info *mfi = file->private_data;
        struct mtd_info *mtd = mfi->mtd;
+       unsigned long offset;
+       int ret;
 
-       if (mtd->get_unmapped_area) {
-               unsigned long offset;
-
-               if (addr != 0)
-                       return (unsigned long) -EINVAL;
-
-               if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
-                       return (unsigned long) -EINVAL;
+       if (addr != 0)
+               return (unsigned long) -EINVAL;
 
-               offset = pgoff << PAGE_SHIFT;
-               if (offset > mtd->size - len)
-                       return (unsigned long) -EINVAL;
+       if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
+               return (unsigned long) -EINVAL;
 
-               return mtd->get_unmapped_area(mtd, len, offset, flags);
-       }
+       offset = pgoff << PAGE_SHIFT;
+       if (offset > mtd->size - len)
+               return (unsigned long) -EINVAL;
 
-       /* can't map directly */
-       return (unsigned long) -ENOSYS;
+       ret = mtd_get_unmapped_area(mtd, len, offset, flags);
+       return ret == -EOPNOTSUPP ? -ENOSYS : ret;
 }
 #endif
 
 /*
  * set up a mapping for shared memory segments
  */
-static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
+static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
 {
 #ifdef CONFIG_MMU
        struct mtd_file_info *mfi = file->private_data;
@@ -1185,18 +1160,18 @@ static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
 
 static const struct file_operations mtd_fops = {
        .owner          = THIS_MODULE,
-       .llseek         = mtd_lseek,
-       .read           = mtd_read,
-       .write          = mtd_write,
-       .unlocked_ioctl = mtd_unlocked_ioctl,
+       .llseek         = mtdchar_lseek,
+       .read           = mtdchar_read,
+       .write          = mtdchar_write,
+       .unlocked_ioctl = mtdchar_unlocked_ioctl,
 #ifdef CONFIG_COMPAT
-       .compat_ioctl   = mtd_compat_ioctl,
+       .compat_ioctl   = mtdchar_compat_ioctl,
 #endif
-       .open           = mtd_open,
-       .release        = mtd_close,
-       .mmap           = mtd_mmap,
+       .open           = mtdchar_open,
+       .release        = mtdchar_close,
+       .mmap           = mtdchar_mmap,
 #ifndef CONFIG_MMU
-       .get_unmapped_area = mtd_get_unmapped_area,
+       .get_unmapped_area = mtdchar_get_unmapped_area,
 #endif
 };
 
index 6df4d4d4eb92bb585c9f0bc08815be208cda2d53..1ed5103b219ba3d51919b3ae285eb64f335e1af9 100644 (file)
@@ -91,7 +91,7 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
                        /* Entire transaction goes into this subdev */
                        size = len;
 
-               err = subdev->read(subdev, from, size, &retsize, buf);
+               err = mtd_read(subdev, from, size, &retsize, buf);
 
                /* Save information about bitflips! */
                if (unlikely(err)) {
@@ -148,7 +148,7 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len,
                if (!(subdev->flags & MTD_WRITEABLE))
                        err = -EROFS;
                else
-                       err = subdev->write(subdev, to, size, &retsize, buf);
+                       err = mtd_write(subdev, to, size, &retsize, buf);
 
                if (err)
                        break;
@@ -227,8 +227,9 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
                if (!(subdev->flags & MTD_WRITEABLE))
                        err = -EROFS;
                else
-                       err = subdev->writev(subdev, &vecs_copy[entry_low],
-                               entry_high - entry_low + 1, to, &retsize);
+                       err = mtd_writev(subdev, &vecs_copy[entry_low],
+                                        entry_high - entry_low + 1, to,
+                                        &retsize);
 
                vecs_copy[entry_high].iov_len = old_iov_len - size;
                vecs_copy[entry_high].iov_base += size;
@@ -273,7 +274,7 @@ concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
                if (from + devops.len > subdev->size)
                        devops.len = subdev->size - from;
 
-               err = subdev->read_oob(subdev, from, &devops);
+               err = mtd_read_oob(subdev, from, &devops);
                ops->retlen += devops.retlen;
                ops->oobretlen += devops.oobretlen;
 
@@ -333,7 +334,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
                if (to + devops.len > subdev->size)
                        devops.len = subdev->size - to;
 
-               err = subdev->write_oob(subdev, to, &devops);
+               err = mtd_write_oob(subdev, to, &devops);
                ops->retlen += devops.oobretlen;
                if (err)
                        return err;
@@ -379,7 +380,7 @@ static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
         * FIXME: Allow INTERRUPTIBLE. Which means
         * not having the wait_queue head on the stack.
         */
-       err = mtd->erase(mtd, erase);
+       err = mtd_erase(mtd, erase);
        if (!err) {
                set_current_state(TASK_UNINTERRUPTIBLE);
                add_wait_queue(&waitq, &wait);
@@ -554,12 +555,9 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
                else
                        size = len;
 
-               if (subdev->lock) {
-                       err = subdev->lock(subdev, ofs, size);
-                       if (err)
-                               break;
-               } else
-                       err = -EOPNOTSUPP;
+               err = mtd_lock(subdev, ofs, size);
+               if (err)
+                       break;
 
                len -= size;
                if (len == 0)
@@ -594,12 +592,9 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
                else
                        size = len;
 
-               if (subdev->unlock) {
-                       err = subdev->unlock(subdev, ofs, size);
-                       if (err)
-                               break;
-               } else
-                       err = -EOPNOTSUPP;
+               err = mtd_unlock(subdev, ofs, size);
+               if (err)
+                       break;
 
                len -= size;
                if (len == 0)
@@ -619,7 +614,7 @@ static void concat_sync(struct mtd_info *mtd)
 
        for (i = 0; i < concat->num_subdev; i++) {
                struct mtd_info *subdev = concat->subdev[i];
-               subdev->sync(subdev);
+               mtd_sync(subdev);
        }
 }
 
@@ -630,7 +625,7 @@ static int concat_suspend(struct mtd_info *mtd)
 
        for (i = 0; i < concat->num_subdev; i++) {
                struct mtd_info *subdev = concat->subdev[i];
-               if ((rc = subdev->suspend(subdev)) < 0)
+               if ((rc = mtd_suspend(subdev)) < 0)
                        return rc;
        }
        return rc;
@@ -643,7 +638,7 @@ static void concat_resume(struct mtd_info *mtd)
 
        for (i = 0; i < concat->num_subdev; i++) {
                struct mtd_info *subdev = concat->subdev[i];
-               subdev->resume(subdev);
+               mtd_resume(subdev);
        }
 }
 
@@ -652,7 +647,7 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
        struct mtd_concat *concat = CONCAT(mtd);
        int i, res = 0;
 
-       if (!concat->subdev[0]->block_isbad)
+       if (!mtd_can_have_bb(concat->subdev[0]))
                return res;
 
        if (ofs > mtd->size)
@@ -666,7 +661,7 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
                        continue;
                }
 
-               res = subdev->block_isbad(subdev, ofs);
+               res = mtd_block_isbad(subdev, ofs);
                break;
        }
 
@@ -678,7 +673,7 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
        struct mtd_concat *concat = CONCAT(mtd);
        int i, err = -EINVAL;
 
-       if (!concat->subdev[0]->block_markbad)
+       if (!mtd_can_have_bb(concat->subdev[0]))
                return 0;
 
        if (ofs > mtd->size)
@@ -692,7 +687,7 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
                        continue;
                }
 
-               err = subdev->block_markbad(subdev, ofs);
+               err = mtd_block_markbad(subdev, ofs);
                if (!err)
                        mtd->ecc_stats.badblocks++;
                break;
@@ -725,11 +720,7 @@ static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
                if (offset + len > subdev->size)
                        return (unsigned long) -EINVAL;
 
-               if (subdev->get_unmapped_area)
-                       return subdev->get_unmapped_area(subdev, len, offset,
-                                                        flags);
-
-               break;
+               return mtd_get_unmapped_area(subdev, len, offset, flags);
        }
 
        return (unsigned long) -ENOSYS;
index b01993ea260ef95b064a04763989bc324f61a342..6ae9ca01388b76b06798749aba0a35cae34f8b9c 100644 (file)
@@ -107,7 +107,8 @@ static LIST_HEAD(mtd_notifiers);
  */
 static void mtd_release(struct device *dev)
 {
-       dev_t index = MTD_DEVT(dev_to_mtd(dev)->index);
+       struct mtd_info *mtd = dev_get_drvdata(dev);
+       dev_t index = MTD_DEVT(mtd->index);
 
        /* remove /dev/mtdXro node if needed */
        if (index)
@@ -116,27 +117,24 @@ static void mtd_release(struct device *dev)
 
 static int mtd_cls_suspend(struct device *dev, pm_message_t state)
 {
-       struct mtd_info *mtd = dev_to_mtd(dev);
+       struct mtd_info *mtd = dev_get_drvdata(dev);
 
-       if (mtd && mtd->suspend)
-               return mtd->suspend(mtd);
-       else
-               return 0;
+       return mtd_suspend(mtd);
 }
 
 static int mtd_cls_resume(struct device *dev)
 {
-       struct mtd_info *mtd = dev_to_mtd(dev);
-       
+       struct mtd_info *mtd = dev_get_drvdata(dev);
+
        if (mtd && mtd->resume)
-               mtd->resume(mtd);
+               mtd_resume(mtd);
        return 0;
 }
 
 static ssize_t mtd_type_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       struct mtd_info *mtd = dev_to_mtd(dev);
+       struct mtd_info *mtd = dev_get_drvdata(dev);
        char *type;
 
        switch (mtd->type) {
@@ -172,7 +170,7 @@ static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
 static ssize_t mtd_flags_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       struct mtd_info *mtd = dev_to_mtd(dev);
+       struct mtd_info *mtd = dev_get_drvdata(dev);
 
        return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
 
@@ -182,7 +180,7 @@ static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
 static ssize_t mtd_size_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       struct mtd_info *mtd = dev_to_mtd(dev);
+       struct mtd_info *mtd = dev_get_drvdata(dev);
 
        return snprintf(buf, PAGE_SIZE, "%llu\n",
                (unsigned long long)mtd->size);
@@ -193,7 +191,7 @@ static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
 static ssize_t mtd_erasesize_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       struct mtd_info *mtd = dev_to_mtd(dev);
+       struct mtd_info *mtd = dev_get_drvdata(dev);
 
        return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
 
@@ -203,7 +201,7 @@ static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
 static ssize_t mtd_writesize_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       struct mtd_info *mtd = dev_to_mtd(dev);
+       struct mtd_info *mtd = dev_get_drvdata(dev);
 
        return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
 
@@ -213,7 +211,7 @@ static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
 static ssize_t mtd_subpagesize_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       struct mtd_info *mtd = dev_to_mtd(dev);
+       struct mtd_info *mtd = dev_get_drvdata(dev);
        unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
 
        return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
@@ -224,7 +222,7 @@ static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
 static ssize_t mtd_oobsize_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       struct mtd_info *mtd = dev_to_mtd(dev);
+       struct mtd_info *mtd = dev_get_drvdata(dev);
 
        return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
 
@@ -234,7 +232,7 @@ static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
 static ssize_t mtd_numeraseregions_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       struct mtd_info *mtd = dev_to_mtd(dev);
+       struct mtd_info *mtd = dev_get_drvdata(dev);
 
        return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
 
@@ -245,7 +243,7 @@ static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
 static ssize_t mtd_name_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       struct mtd_info *mtd = dev_to_mtd(dev);
+       struct mtd_info *mtd = dev_get_drvdata(dev);
 
        return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
 
@@ -338,9 +336,9 @@ int add_mtd_device(struct mtd_info *mtd)
        mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
 
        /* Some chips always power up locked. Unlock them now */
-       if ((mtd->flags & MTD_WRITEABLE)
-           && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
-               if (mtd->unlock(mtd, 0, mtd->size))
+       if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
+               error = mtd_unlock(mtd, 0, mtd->size);
+               if (error && error != -EOPNOTSUPP)
                        printk(KERN_WARNING
                               "%s: unlock failed, writes may not work\n",
                               mtd->name);
@@ -516,7 +514,6 @@ EXPORT_SYMBOL_GPL(mtd_device_unregister);
  *     or removal of MTD devices. Causes the 'add' callback to be immediately
  *     invoked for each MTD device currently present in the system.
  */
-
 void register_mtd_user (struct mtd_notifier *new)
 {
        struct mtd_info *mtd;
@@ -532,6 +529,7 @@ void register_mtd_user (struct mtd_notifier *new)
 
        mutex_unlock(&mtd_table_mutex);
 }
+EXPORT_SYMBOL_GPL(register_mtd_user);
 
 /**
  *     unregister_mtd_user - unregister a 'user' of MTD devices.
@@ -542,7 +540,6 @@ void register_mtd_user (struct mtd_notifier *new)
  *     'remove' callback to be immediately invoked for each MTD device
  *     currently present in the system.
  */
-
 int unregister_mtd_user (struct mtd_notifier *old)
 {
        struct mtd_info *mtd;
@@ -558,7 +555,7 @@ int unregister_mtd_user (struct mtd_notifier *old)
        mutex_unlock(&mtd_table_mutex);
        return 0;
 }
-
+EXPORT_SYMBOL_GPL(unregister_mtd_user);
 
 /**
  *     get_mtd_device - obtain a validated handle for an MTD device
@@ -571,7 +568,6 @@ int unregister_mtd_user (struct mtd_notifier *old)
  *     both, return the num'th driver only if its address matches. Return
  *     error code if not.
  */
-
 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
 {
        struct mtd_info *ret = NULL, *other;
@@ -604,6 +600,7 @@ out:
        mutex_unlock(&mtd_table_mutex);
        return ret;
 }
+EXPORT_SYMBOL_GPL(get_mtd_device);
 
 
 int __get_mtd_device(struct mtd_info *mtd)
@@ -624,6 +621,7 @@ int __get_mtd_device(struct mtd_info *mtd)
        mtd->usecount++;
        return 0;
 }
+EXPORT_SYMBOL_GPL(__get_mtd_device);
 
 /**
  *     get_mtd_device_nm - obtain a validated handle for an MTD device by
@@ -633,7 +631,6 @@ int __get_mtd_device(struct mtd_info *mtd)
  *     This function returns MTD device description structure in case of
  *     success and an error code in case of failure.
  */
-
 struct mtd_info *get_mtd_device_nm(const char *name)
 {
        int err = -ENODEV;
@@ -662,6 +659,7 @@ out_unlock:
        mutex_unlock(&mtd_table_mutex);
        return ERR_PTR(err);
 }
+EXPORT_SYMBOL_GPL(get_mtd_device_nm);
 
 void put_mtd_device(struct mtd_info *mtd)
 {
@@ -670,6 +668,7 @@ void put_mtd_device(struct mtd_info *mtd)
        mutex_unlock(&mtd_table_mutex);
 
 }
+EXPORT_SYMBOL_GPL(put_mtd_device);
 
 void __put_mtd_device(struct mtd_info *mtd)
 {
@@ -681,39 +680,65 @@ void __put_mtd_device(struct mtd_info *mtd)
 
        module_put(mtd->owner);
 }
+EXPORT_SYMBOL_GPL(__put_mtd_device);
 
-/* default_mtd_writev - default mtd writev method for MTD devices that
- *                     don't implement their own
+/*
+ * default_mtd_writev - the default writev method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
  */
-
-int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
-                      unsigned long count, loff_t to, size_t *retlen)
+static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+                             unsigned long count, loff_t to, size_t *retlen)
 {
        unsigned long i;
        size_t totlen = 0, thislen;
        int ret = 0;
 
-       if(!mtd->write) {
-               ret = -EROFS;
-       } else {
-               for (i=0; i<count; i++) {
-                       if (!vecs[i].iov_len)
-                               continue;
-                       ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base);
-                       totlen += thislen;
-                       if (ret || thislen != vecs[i].iov_len)
-                               break;
-                       to += vecs[i].iov_len;
-               }
+       for (i = 0; i < count; i++) {
+               if (!vecs[i].iov_len)
+                       continue;
+               ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
+                               vecs[i].iov_base);
+               totlen += thislen;
+               if (ret || thislen != vecs[i].iov_len)
+                       break;
+               to += vecs[i].iov_len;
        }
-       if (retlen)
-               *retlen = totlen;
+       *retlen = totlen;
        return ret;
 }
 
+/*
+ * mtd_writev - the vector-based MTD write method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+              unsigned long count, loff_t to, size_t *retlen)
+{
+       *retlen = 0;
+       if (!mtd->writev)
+               return default_mtd_writev(mtd, vecs, count, to, retlen);
+       return mtd->writev(mtd, vecs, count, to, retlen);
+}
+EXPORT_SYMBOL_GPL(mtd_writev);
+
 /**
  * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
- * @size: A pointer to the ideal or maximum size of the allocation. Points
+ * @mtd: mtd device description object pointer
+ * @size: a pointer to the ideal or maximum size of the allocation, points
  *        to the actual allocation size on success.
  *
  * This routine attempts to allocate a contiguous kernel buffer up to
@@ -758,15 +783,6 @@ void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
         */
        return kmalloc(*size, GFP_KERNEL);
 }
-
-EXPORT_SYMBOL_GPL(get_mtd_device);
-EXPORT_SYMBOL_GPL(get_mtd_device_nm);
-EXPORT_SYMBOL_GPL(__get_mtd_device);
-EXPORT_SYMBOL_GPL(put_mtd_device);
-EXPORT_SYMBOL_GPL(__put_mtd_device);
-EXPORT_SYMBOL_GPL(register_mtd_user);
-EXPORT_SYMBOL_GPL(unregister_mtd_user);
-EXPORT_SYMBOL_GPL(default_mtd_writev);
 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
 
 #ifdef CONFIG_PROC_FS
index 1e2fa6236705941b9fea37b5f0d5bf4a713e56c7..db8e8272d69b04d08aebbf99f6f668220e2b994a 100644 (file)
@@ -112,7 +112,7 @@ static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
        set_current_state(TASK_INTERRUPTIBLE);
        add_wait_queue(&wait_q, &wait);
 
-       ret = mtd->erase(mtd, &erase);
+       ret = mtd_erase(mtd, &erase);
        if (ret) {
                set_current_state(TASK_RUNNING);
                remove_wait_queue(&wait_q, &wait);
@@ -169,8 +169,8 @@ static void mtdoops_workfunc_erase(struct work_struct *work)
                        cxt->nextpage = 0;
        }
 
-       while (mtd->block_isbad) {
-               ret = mtd->block_isbad(mtd, cxt->nextpage * record_size);
+       while (mtd_can_have_bb(mtd)) {
+               ret = mtd_block_isbad(mtd, cxt->nextpage * record_size);
                if (!ret)
                        break;
                if (ret < 0) {
@@ -199,8 +199,8 @@ badblock:
                return;
        }
 
-       if (mtd->block_markbad && ret == -EIO) {
-               ret = mtd->block_markbad(mtd, cxt->nextpage * record_size);
+       if (mtd_can_have_bb(mtd) && ret == -EIO) {
+               ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
                if (ret < 0) {
                        printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
                        return;
@@ -221,12 +221,16 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic)
        hdr[0] = cxt->nextcount;
        hdr[1] = MTDOOPS_KERNMSG_MAGIC;
 
-       if (panic)
-               ret = mtd->panic_write(mtd, cxt->nextpage * record_size,
-                                       record_size, &retlen, cxt->oops_buf);
-       else
-               ret = mtd->write(mtd, cxt->nextpage * record_size,
-                                       record_size, &retlen, cxt->oops_buf);
+       if (panic) {
+               ret = mtd_panic_write(mtd, cxt->nextpage * record_size,
+                                     record_size, &retlen, cxt->oops_buf);
+               if (ret == -EOPNOTSUPP) {
+                       printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
+                       return;
+               }
+       } else
+               ret = mtd_write(mtd, cxt->nextpage * record_size,
+                               record_size, &retlen, cxt->oops_buf);
 
        if (retlen != record_size || ret < 0)
                printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
@@ -253,10 +257,13 @@ static void find_next_position(struct mtdoops_context *cxt)
        size_t retlen;
 
        for (page = 0; page < cxt->oops_pages; page++) {
+               if (mtd_can_have_bb(mtd) &&
+                   mtd_block_isbad(mtd, page * record_size))
+                       continue;
                /* Assume the page is used */
                mark_page_used(cxt, page);
-               ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
-                               &retlen, (u_char *) &count[0]);
+               ret = mtd_read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
+                              &retlen, (u_char *)&count[0]);
                if (retlen != MTDOOPS_HEADER_SIZE ||
                                (ret < 0 && !mtd_is_bitflip(ret))) {
                        printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
@@ -327,13 +334,8 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
        memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
 
        /* Panics must be written immediately */
-       if (reason != KMSG_DUMP_OOPS) {
-               if (!cxt->mtd->panic_write)
-                       printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
-               else
-                       mtdoops_write(cxt, 1);
-               return;
-       }
+       if (reason != KMSG_DUMP_OOPS)
+               mtdoops_write(cxt, 1);
 
        /* For other cases, schedule work to write it "nicely" */
        schedule_work(&cxt->work_write);
@@ -369,7 +371,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
 
        /* oops_page_used is a bit field */
        cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
-                       BITS_PER_LONG));
+                       BITS_PER_LONG) * sizeof(unsigned long));
        if (!cxt->oops_page_used) {
                printk(KERN_ERR "mtdoops: could not allocate page array\n");
                return;
index a0bd2de4752baa3981b09a31b1efb0cc5f7b4a8b..a3d44c3416b4f2bef8a1932976b2825a1e230609 100644 (file)
@@ -70,8 +70,7 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
                len = 0;
        else if (from + len > mtd->size)
                len = mtd->size - from;
-       res = part->master->read(part->master, from + part->offset,
-                                  len, retlen, buf);
+       res = mtd_read(part->master, from + part->offset, len, retlen, buf);
        if (unlikely(res)) {
                if (mtd_is_bitflip(res))
                        mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
@@ -89,15 +88,15 @@ static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
                len = 0;
        else if (from + len > mtd->size)
                len = mtd->size - from;
-       return part->master->point (part->master, from + part->offset,
-                                   len, retlen, virt, phys);
+       return mtd_point(part->master, from + part->offset, len, retlen,
+                        virt, phys);
 }
 
 static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
 {
        struct mtd_part *part = PART(mtd);
 
-       part->master->unpoint(part->master, from + part->offset, len);
+       mtd_unpoint(part->master, from + part->offset, len);
 }
 
 static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
@@ -108,8 +107,7 @@ static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
        struct mtd_part *part = PART(mtd);
 
        offset += part->offset;
-       return part->master->get_unmapped_area(part->master, len, offset,
-                                              flags);
+       return mtd_get_unmapped_area(part->master, len, offset, flags);
 }
 
 static int part_read_oob(struct mtd_info *mtd, loff_t from,
@@ -140,7 +138,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
                        return -EINVAL;
        }
 
-       res = part->master->read_oob(part->master, from + part->offset, ops);
+       res = mtd_read_oob(part->master, from + part->offset, ops);
        if (unlikely(res)) {
                if (mtd_is_bitflip(res))
                        mtd->ecc_stats.corrected++;
@@ -154,30 +152,28 @@ static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
                size_t len, size_t *retlen, u_char *buf)
 {
        struct mtd_part *part = PART(mtd);
-       return part->master->read_user_prot_reg(part->master, from,
-                                       len, retlen, buf);
+       return mtd_read_user_prot_reg(part->master, from, len, retlen, buf);
 }
 
 static int part_get_user_prot_info(struct mtd_info *mtd,
                struct otp_info *buf, size_t len)
 {
        struct mtd_part *part = PART(mtd);
-       return part->master->get_user_prot_info(part->master, buf, len);
+       return mtd_get_user_prot_info(part->master, buf, len);
 }
 
 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
                size_t len, size_t *retlen, u_char *buf)
 {
        struct mtd_part *part = PART(mtd);
-       return part->master->read_fact_prot_reg(part->master, from,
-                                       len, retlen, buf);
+       return mtd_read_fact_prot_reg(part->master, from, len, retlen, buf);
 }
 
 static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
                size_t len)
 {
        struct mtd_part *part = PART(mtd);
-       return part->master->get_fact_prot_info(part->master, buf, len);
+       return mtd_get_fact_prot_info(part->master, buf, len);
 }
 
 static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
@@ -190,8 +186,7 @@ static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
                len = 0;
        else if (to + len > mtd->size)
                len = mtd->size - to;
-       return part->master->write(part->master, to + part->offset,
-                                   len, retlen, buf);
+       return mtd_write(part->master, to + part->offset, len, retlen, buf);
 }
 
 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
@@ -204,8 +199,8 @@ static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
                len = 0;
        else if (to + len > mtd->size)
                len = mtd->size - to;
-       return part->master->panic_write(part->master, to + part->offset,
-                                   len, retlen, buf);
+       return mtd_panic_write(part->master, to + part->offset, len, retlen,
+                              buf);
 }
 
 static int part_write_oob(struct mtd_info *mtd, loff_t to,
@@ -220,22 +215,21 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
                return -EINVAL;
        if (ops->datbuf && to + ops->len > mtd->size)
                return -EINVAL;
-       return part->master->write_oob(part->master, to + part->offset, ops);
+       return mtd_write_oob(part->master, to + part->offset, ops);
 }
 
 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
                size_t len, size_t *retlen, u_char *buf)
 {
        struct mtd_part *part = PART(mtd);
-       return part->master->write_user_prot_reg(part->master, from,
-                                       len, retlen, buf);
+       return mtd_write_user_prot_reg(part->master, from, len, retlen, buf);
 }
 
 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
                size_t len)
 {
        struct mtd_part *part = PART(mtd);
-       return part->master->lock_user_prot_reg(part->master, from, len);
+       return mtd_lock_user_prot_reg(part->master, from, len);
 }
 
 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
@@ -244,8 +238,8 @@ static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
        struct mtd_part *part = PART(mtd);
        if (!(mtd->flags & MTD_WRITEABLE))
                return -EROFS;
-       return part->master->writev(part->master, vecs, count,
-                                       to + part->offset, retlen);
+       return mtd_writev(part->master, vecs, count, to + part->offset,
+                         retlen);
 }
 
 static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
@@ -257,7 +251,7 @@ static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
        if (instr->addr >= mtd->size)
                return -EINVAL;
        instr->addr += part->offset;
-       ret = part->master->erase(part->master, instr);
+       ret = mtd_erase(part->master, instr);
        if (ret) {
                if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
                        instr->fail_addr -= part->offset;
@@ -285,7 +279,7 @@ static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
        struct mtd_part *part = PART(mtd);
        if ((len + ofs) > mtd->size)
                return -EINVAL;
-       return part->master->lock(part->master, ofs + part->offset, len);
+       return mtd_lock(part->master, ofs + part->offset, len);
 }
 
 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -293,7 +287,7 @@ static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
        struct mtd_part *part = PART(mtd);
        if ((len + ofs) > mtd->size)
                return -EINVAL;
-       return part->master->unlock(part->master, ofs + part->offset, len);
+       return mtd_unlock(part->master, ofs + part->offset, len);
 }
 
 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -301,25 +295,25 @@ static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
        struct mtd_part *part = PART(mtd);
        if ((len + ofs) > mtd->size)
                return -EINVAL;
-       return part->master->is_locked(part->master, ofs + part->offset, len);
+       return mtd_is_locked(part->master, ofs + part->offset, len);
 }
 
 static void part_sync(struct mtd_info *mtd)
 {
        struct mtd_part *part = PART(mtd);
-       part->master->sync(part->master);
+       mtd_sync(part->master);
 }
 
 static int part_suspend(struct mtd_info *mtd)
 {
        struct mtd_part *part = PART(mtd);
-       return part->master->suspend(part->master);
+       return mtd_suspend(part->master);
 }
 
 static void part_resume(struct mtd_info *mtd)
 {
        struct mtd_part *part = PART(mtd);
-       part->master->resume(part->master);
+       mtd_resume(part->master);
 }
 
 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
@@ -328,7 +322,7 @@ static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
        if (ofs >= mtd->size)
                return -EINVAL;
        ofs += part->offset;
-       return part->master->block_isbad(part->master, ofs);
+       return mtd_block_isbad(part->master, ofs);
 }
 
 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -341,7 +335,7 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
        if (ofs >= mtd->size)
                return -EINVAL;
        ofs += part->offset;
-       res = part->master->block_markbad(part->master, ofs);
+       res = mtd_block_markbad(part->master, ofs);
        if (!res)
                mtd->ecc_stats.badblocks++;
        return res;
@@ -559,8 +553,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
                uint64_t offs = 0;
 
                while (offs < slave->mtd.size) {
-                       if (master->block_isbad(master,
-                                               offs + slave->offset))
+                       if (mtd_block_isbad(master, offs + slave->offset))
                                slave->mtd.ecc_stats.badblocks++;
                        offs += slave->mtd.erasesize;
                }
index bd9590c723e41fcbb7055cc4dc61f49f402e340d..c92f0f6bc130e12380a53eab2a53239be33916f1 100644 (file)
@@ -274,12 +274,12 @@ static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb)
        eb->root = NULL;
 
        /* badblocks not supported */
-       if (!d->mtd->block_markbad)
+       if (!mtd_can_have_bb(d->mtd))
                return 1;
 
        offset = mtdswap_eb_offset(d, eb);
        dev_warn(d->dev, "Marking bad block at %08llx\n", offset);
-       ret = d->mtd->block_markbad(d->mtd, offset);
+       ret = mtd_block_markbad(d->mtd, offset);
 
        if (ret) {
                dev_warn(d->dev, "Mark block bad failed for block at %08llx "
@@ -312,7 +312,7 @@ static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb)
 static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from,
                        struct mtd_oob_ops *ops)
 {
-       int ret = d->mtd->read_oob(d->mtd, from, ops);
+       int ret = mtd_read_oob(d->mtd, from, ops);
 
        if (mtd_is_bitflip(ret))
                return ret;
@@ -343,7 +343,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
        offset = mtdswap_eb_offset(d, eb);
 
        /* Check first if the block is bad. */
-       if (d->mtd->block_isbad && d->mtd->block_isbad(d->mtd, offset))
+       if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
                return MTDSWAP_SCANNED_BAD;
 
        ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
@@ -403,7 +403,7 @@ static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
                offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize;
        }
 
-       ret = d->mtd->write_oob(d->mtd, offset , &ops);
+       ret = mtd_write_oob(d->mtd, offset, &ops);
 
        if (ret) {
                dev_warn(d->dev, "Write OOB failed for block at %08llx "
@@ -567,7 +567,7 @@ retry:
        erase.len       = mtd->erasesize;
        erase.priv      = (u_long)&wq;
 
-       ret = mtd->erase(mtd, &erase);
+       ret = mtd_erase(mtd, &erase);
        if (ret) {
                if (retries++ < MTDSWAP_ERASE_RETRIES) {
                        dev_warn(d->dev,
@@ -689,7 +689,7 @@ retry:
                return ret;
 
        writepos = (loff_t)*bp << PAGE_SHIFT;
-       ret =  mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf);
+       ret =  mtd_write(mtd, writepos, PAGE_SIZE, &retlen, buf);
        if (ret == -EIO || mtd_is_eccerr(ret)) {
                d->curr_write_pos--;
                eb->active_count--;
@@ -736,7 +736,7 @@ static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock,
        retries = 0;
 
 retry:
-       ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
+       ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
 
        if (ret < 0 && !mtd_is_bitflip(ret)) {
                oldeb = d->eb_data + oldblock / d->pages_per_eblk;
@@ -946,7 +946,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
                        patt = mtdswap_test_patt(test + i);
                        memset(d->page_buf, patt, mtd->writesize);
                        memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
-                       ret = mtd->write_oob(mtd, pos, &ops);
+                       ret = mtd_write_oob(mtd, pos, &ops);
                        if (ret)
                                goto error;
 
@@ -955,7 +955,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
 
                pos = base;
                for (i = 0; i < mtd_pages; i++) {
-                       ret = mtd->read_oob(mtd, pos, &ops);
+                       ret = mtd_read_oob(mtd, pos, &ops);
                        if (ret)
                                goto error;
 
@@ -1047,8 +1047,7 @@ static int mtdswap_flush(struct mtd_blktrans_dev *dev)
 {
        struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
 
-       if (d->mtd->sync)
-               d->mtd->sync(d->mtd);
+       mtd_sync(d->mtd);
        return 0;
 }
 
@@ -1059,9 +1058,9 @@ static unsigned int mtdswap_badblocks(struct mtd_info *mtd, uint64_t size)
 
        badcnt = 0;
 
-       if (mtd->block_isbad)
+       if (mtd_can_have_bb(mtd))
                for (offset = 0; offset < size; offset += mtd->erasesize)
-                       if (mtd->block_isbad(mtd, offset))
+                       if (mtd_block_isbad(mtd, offset))
                                badcnt++;
 
        return badcnt;
@@ -1161,7 +1160,7 @@ static int mtdswap_readsect(struct mtd_blktrans_dev *dev,
        retries = 0;
 
 retry:
-       ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf);
+       ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, buf);
 
        d->mtd_read_count++;
        if (mtd_is_bitflip(ret)) {
index dd0279249bf42c284d3400de320ff2fd72a2df58..31b034b7eba3bd5e464c1ba3042a7da9625c2cad 100644 (file)
@@ -110,7 +110,7 @@ config MTD_NAND_AMS_DELTA
 
 config MTD_NAND_OMAP2
        tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4"
-       depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4)
+       depends on ARCH_OMAP2PLUS
        help
           Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
          platforms.
index 9e6b498c9bebb07191d8146f447a89c56d2ca11b..3197e9764fcd32025c6ea715a05765ca4e45c30c 100644 (file)
@@ -280,17 +280,7 @@ static struct platform_driver ams_delta_nand_driver = {
        },
 };
 
-static int __init ams_delta_nand_init(void)
-{
-       return platform_driver_register(&ams_delta_nand_driver);
-}
-module_init(ams_delta_nand_init);
-
-static void __exit ams_delta_nand_exit(void)
-{
-       platform_driver_unregister(&ams_delta_nand_driver);
-}
-module_exit(ams_delta_nand_exit);
+module_platform_driver(ams_delta_nand_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
index 46b58d67284726fe2cee86852f9fe4640679a8a6..50387fd4009bdbe464af98703e0653debf4baaf6 100644 (file)
@@ -546,18 +546,7 @@ static struct platform_driver nand_driver = {
        .resume = bcm_umi_nand_resume,
 };
 
-static int __init nand_init(void)
-{
-       return platform_driver_register(&nand_driver);
-}
-
-static void __exit nand_exit(void)
-{
-       platform_driver_unregister(&nand_driver);
-}
-
-module_init(nand_init);
-module_exit(nand_exit);
+module_platform_driver(nand_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Broadcom");
index c153e1f77f90d0e58dc6d892f2395a71e500b7ef..6e566156956f2465e32b122d7728d9890ed98f08 100644 (file)
@@ -675,7 +675,9 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
 
        davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val);
 
-       ret = davinci_aemif_setup_timing(info->timing, info->base,
+       ret = 0;
+       if (info->timing)
+               ret = davinci_aemif_setup_timing(info->timing, info->base,
                                                        info->core_chipsel);
        if (ret < 0) {
                dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
index 5780dbab611360d546c0770fb378060a223edda8..df921e7a496cb57dd2f27be1225fdd62fe52dfe6 100644 (file)
@@ -1072,7 +1072,7 @@ static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const ch
        size_t retlen;
 
        for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
-               ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
+               ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
                if (retlen != mtd->writesize)
                        continue;
                if (ret) {
@@ -1097,7 +1097,7 @@ static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const ch
        /* Only one mediaheader was found.  We want buf to contain a
           mediaheader on return, so we'll have to re-read the one we found. */
        offs = doc->mh0_page << this->page_shift;
-       ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
+       ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
        if (retlen != mtd->writesize) {
                /* Insanity.  Give up. */
                printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
index eedd8ee2c9ac8b83059edf9cf5856a12e4a53e48..7195ee6efe12293df9e92302959dbe786b3f80bd 100644 (file)
@@ -166,15 +166,22 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
 
        elbc_fcm_ctrl->page = page_addr;
 
-       out_be32(&lbc->fbar,
-                page_addr >> (chip->phys_erase_shift - chip->page_shift));
-
        if (priv->page_size) {
+               /*
+                * large page size chip : FPAR[PI] save the lowest 6 bits,
+                *                        FBAR[BLK] save the other bits.
+                */
+               out_be32(&lbc->fbar, page_addr >> 6);
                out_be32(&lbc->fpar,
                         ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
                         (oob ? FPAR_LP_MS : 0) | column);
                buf_num = (page_addr & 1) << 2;
        } else {
+               /*
+                * small page size chip : FPAR[PI] save the lowest 5 bits,
+                *                        FBAR[BLK] save the other bits.
+                */
+               out_be32(&lbc->fbar, page_addr >> 5);
                out_be32(&lbc->fpar,
                         ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
                         (oob ? FPAR_SP_MS : 0) | column);
@@ -349,20 +356,22 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
                fsl_elbc_run_command(mtd);
                return;
 
-       /* READID must read all 5 possible bytes while CEB is active */
        case NAND_CMD_READID:
-               dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
+       case NAND_CMD_PARAM:
+               dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD %x\n", command);
 
                out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
                                    (FIR_OP_UA  << FIR_OP1_SHIFT) |
                                    (FIR_OP_RBW << FIR_OP2_SHIFT));
-               out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
-               /* nand_get_flash_type() reads 8 bytes of entire ID string */
-               out_be32(&lbc->fbcr, 8);
-               elbc_fcm_ctrl->read_bytes = 8;
+               out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT);
+               /*
+                * although currently it's 8 bytes for READID, we always read
+                * the maximum 256 bytes(for PARAM)
+                */
+               out_be32(&lbc->fbcr, 256);
+               elbc_fcm_ctrl->read_bytes = 256;
                elbc_fcm_ctrl->use_mdr = 1;
-               elbc_fcm_ctrl->mdr = 0;
-
+               elbc_fcm_ctrl->mdr = column;
                set_addr(mtd, 0, 0, 0);
                fsl_elbc_run_command(mtd);
                return;
@@ -407,9 +416,17 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
                         page_addr, column);
 
                elbc_fcm_ctrl->column = column;
-               elbc_fcm_ctrl->oob = 0;
                elbc_fcm_ctrl->use_mdr = 1;
 
+               if (column >= mtd->writesize) {
+                       /* OOB area */
+                       column -= mtd->writesize;
+                       elbc_fcm_ctrl->oob = 1;
+               } else {
+                       WARN_ON(column != 0);
+                       elbc_fcm_ctrl->oob = 0;
+               }
+
                fcr = (NAND_CMD_STATUS   << FCR_CMD1_SHIFT) |
                      (NAND_CMD_SEQIN    << FCR_CMD2_SHIFT) |
                      (NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
@@ -434,16 +451,12 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
                                 (FIR_OP_CW1 << FIR_OP6_SHIFT) |
                                 (FIR_OP_RS  << FIR_OP7_SHIFT));
 
-                       if (column >= mtd->writesize) {
+                       if (elbc_fcm_ctrl->oob)
                                /* OOB area --> READOOB */
-                               column -= mtd->writesize;
                                fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
-                               elbc_fcm_ctrl->oob = 1;
-                       } else {
-                               WARN_ON(column != 0);
+                       else
                                /* First 256 bytes --> READ0 */
                                fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
-                       }
                }
 
                out_be32(&lbc->fcr, fcr);
@@ -463,7 +476,8 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
                 */
                if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
                    elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize)
-                       out_be32(&lbc->fbcr, elbc_fcm_ctrl->index);
+                       out_be32(&lbc->fbcr,
+                               elbc_fcm_ctrl->index - elbc_fcm_ctrl->column);
                else
                        out_be32(&lbc->fbcr, 0);
 
@@ -659,9 +673,7 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
        if (chip->pagemask & 0xff000000)
                al++;
 
-       /* add to ECCM mode set in fsl_elbc_init */
-       priv->fmr |= (12 << FMR_CWTO_SHIFT) |  /* Timeout > 12 ms */
-                    (al << FMR_AL_SHIFT);
+       priv->fmr |= al << FMR_AL_SHIFT;
 
        dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
                chip->numchips);
@@ -764,8 +776,10 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
        priv->mtd.priv = chip;
        priv->mtd.owner = THIS_MODULE;
 
-       /* Set the ECCM according to the settings in bootloader.*/
-       priv->fmr = in_be32(&lbc->fmr) & FMR_ECCM;
+       /* set timeout to maximum */
+       priv->fmr = 15 << FMR_CWTO_SHIFT;
+       if (in_be32(&lbc->bank[priv->bank].or) & OR_FCM_PGS)
+               priv->fmr |= FMR_ECCM;
 
        /* fill in nand_chip structure */
        /* set up function call table */
@@ -971,18 +985,7 @@ static struct platform_driver fsl_elbc_nand_driver = {
        .remove = fsl_elbc_nand_remove,
 };
 
-static int __init fsl_elbc_nand_init(void)
-{
-       return platform_driver_register(&fsl_elbc_nand_driver);
-}
-
-static void __exit fsl_elbc_nand_exit(void)
-{
-       platform_driver_unregister(&fsl_elbc_nand_driver);
-}
-
-module_init(fsl_elbc_nand_init);
-module_exit(fsl_elbc_nand_exit);
+module_platform_driver(fsl_elbc_nand_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Freescale");
index b4f3cc9f32fbe5614ad8d1a0a1cbe905ee01d3f5..45df542b9c61383b3b183fdbcd318bf4eeffa92a 100644 (file)
@@ -353,17 +353,7 @@ static struct platform_driver of_fun_driver = {
        .remove         = __devexit_p(fun_remove),
 };
 
-static int __init fun_module_init(void)
-{
-       return platform_driver_register(&of_fun_driver);
-}
-module_init(fun_module_init);
-
-static void __exit fun_module_exit(void)
-{
-       platform_driver_unregister(&of_fun_driver);
-}
-module_exit(fun_module_exit);
+module_platform_driver(of_fun_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
index 2c2060b2800e38b61252a07c2c417b5069302868..27000a5f5f47722a1b30707a7b7d56f83b0e0559 100644 (file)
@@ -27,6 +27,9 @@
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/nand-gpio.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
 
 struct gpiomtd {
        void __iomem            *io_sync;
@@ -171,6 +174,96 @@ static int gpio_nand_devready(struct mtd_info *mtd)
        return gpio_get_value(gpiomtd->plat.gpio_rdy);
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_nand_id_table[] = {
+       { .compatible = "gpio-control-nand" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, gpio_nand_id_table);
+
+static int gpio_nand_get_config_of(const struct device *dev,
+                                  struct gpio_nand_platdata *plat)
+{
+       u32 val;
+
+       if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
+               if (val == 2) {
+                       plat->options |= NAND_BUSWIDTH_16;
+               } else if (val != 1) {
+                       dev_err(dev, "invalid bank-width %u\n", val);
+                       return -EINVAL;
+               }
+       }
+
+       plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
+       plat->gpio_nce = of_get_gpio(dev->of_node, 1);
+       plat->gpio_ale = of_get_gpio(dev->of_node, 2);
+       plat->gpio_cle = of_get_gpio(dev->of_node, 3);
+       plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
+
+       if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
+               plat->chip_delay = val;
+
+       return 0;
+}
+
+static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+       struct resource *r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
+       u64 addr;
+
+       if (!r || of_property_read_u64(pdev->dev.of_node,
+                                      "gpio-control-nand,io-sync-reg", &addr))
+               return NULL;
+
+       r->start = addr;
+       r->end = r->start + 0x3;
+       r->flags = IORESOURCE_MEM;
+
+       return r;
+}
+#else /* CONFIG_OF */
+#define gpio_nand_id_table NULL
+static inline int gpio_nand_get_config_of(const struct device *dev,
+                                         struct gpio_nand_platdata *plat)
+{
+       return -ENOSYS;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+       return NULL;
+}
+#endif /* CONFIG_OF */
+
+static inline int gpio_nand_get_config(const struct device *dev,
+                                      struct gpio_nand_platdata *plat)
+{
+       int ret = gpio_nand_get_config_of(dev, plat);
+
+       if (!ret)
+               return ret;
+
+       if (dev->platform_data) {
+               memcpy(plat, dev->platform_data, sizeof(*plat));
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync(struct platform_device *pdev)
+{
+       struct resource *r = gpio_nand_get_io_sync_of(pdev);
+
+       if (r)
+               return r;
+
+       return platform_get_resource(pdev, IORESOURCE_MEM, 1);
+}
+
 static int __devexit gpio_nand_remove(struct platform_device *dev)
 {
        struct gpiomtd *gpiomtd = platform_get_drvdata(dev);
@@ -178,7 +271,7 @@ static int __devexit gpio_nand_remove(struct platform_device *dev)
 
        nand_release(&gpiomtd->mtd_info);
 
-       res = platform_get_resource(dev, IORESOURCE_MEM, 1);
+       res = gpio_nand_get_io_sync(dev);
        iounmap(gpiomtd->io_sync);
        if (res)
                release_mem_region(res->start, resource_size(res));
@@ -226,9 +319,10 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
        struct gpiomtd *gpiomtd;
        struct nand_chip *this;
        struct resource *res0, *res1;
-       int ret;
+       struct mtd_part_parser_data ppdata = {};
+       int ret = 0;
 
-       if (!dev->dev.platform_data)
+       if (!dev->dev.of_node && !dev->dev.platform_data)
                return -EINVAL;
 
        res0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
@@ -248,7 +342,7 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
                goto err_map;
        }
 
-       res1 = platform_get_resource(dev, IORESOURCE_MEM, 1);
+       res1 = gpio_nand_get_io_sync(dev);
        if (res1) {
                gpiomtd->io_sync = request_and_remap(res1, 4, "NAND sync", &ret);
                if (!gpiomtd->io_sync) {
@@ -257,7 +351,9 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
                }
        }
 
-       memcpy(&gpiomtd->plat, dev->dev.platform_data, sizeof(gpiomtd->plat));
+       ret = gpio_nand_get_config(&dev->dev, &gpiomtd->plat);
+       if (ret)
+               goto err_nce;
 
        ret = gpio_request(gpiomtd->plat.gpio_nce, "NAND NCE");
        if (ret)
@@ -316,8 +412,12 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
                gpiomtd->plat.adjust_parts(&gpiomtd->plat,
                                           gpiomtd->mtd_info.size);
 
-       mtd_device_register(&gpiomtd->mtd_info, gpiomtd->plat.parts,
-                           gpiomtd->plat.num_parts);
+       ppdata.of_node = dev->dev.of_node;
+       ret = mtd_device_parse_register(&gpiomtd->mtd_info, NULL, &ppdata,
+                                       gpiomtd->plat.parts,
+                                       gpiomtd->plat.num_parts);
+       if (ret)
+               goto err_wp;
        platform_set_drvdata(dev, gpiomtd);
 
        return 0;
@@ -352,6 +452,7 @@ static struct platform_driver gpio_nand_driver = {
        .remove         = gpio_nand_remove,
        .driver         = {
                .name   = "gpio-nand",
+               .of_match_table = gpio_nand_id_table,
        },
 };
 
index e2664073a89b8eae1afa5c2ef5fe3d81e0348722..ac3b9f255e009bd2696688c5d3673e60395be5d7 100644 (file)
@@ -423,17 +423,7 @@ static struct platform_driver jz_nand_driver = {
        },
 };
 
-static int __init jz_nand_init(void)
-{
-       return platform_driver_register(&jz_nand_driver);
-}
-module_init(jz_nand_init);
-
-static void __exit jz_nand_exit(void)
-{
-       platform_driver_unregister(&jz_nand_driver);
-}
-module_exit(jz_nand_exit);
+module_platform_driver(jz_nand_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
index 5ede64706346084dd5cdd6d38612d532ec493992..c240cf1af96166f8c2431e029dc90064a443ecf4 100644 (file)
@@ -879,19 +879,7 @@ static struct platform_driver mpc5121_nfc_driver = {
        },
 };
 
-static int __init mpc5121_nfc_init(void)
-{
-       return platform_driver_register(&mpc5121_nfc_driver);
-}
-
-module_init(mpc5121_nfc_init);
-
-static void __exit mpc5121_nfc_cleanup(void)
-{
-       platform_driver_unregister(&mpc5121_nfc_driver);
-}
-
-module_exit(mpc5121_nfc_cleanup);
+module_platform_driver(mpc5121_nfc_driver);
 
 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
 MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
index 3ed9c5e4d34e7bfbc51351a8475878424c325ab6..35b4565050f1430d425ed3615d3c4a806cda4b7a 100644 (file)
@@ -3132,8 +3132,8 @@ ident_done:
         * Bad block marker is stored in the last page of each block
         * on Samsung and Hynix MLC devices; stored in first two pages
         * of each block on Micron devices with 2KiB pages and on
-        * SLC Samsung, Hynix, Toshiba and AMD/Spansion. All others scan
-        * only the first page.
+        * SLC Samsung, Hynix, Toshiba, AMD/Spansion, and Macronix.
+        * All others scan only the first page.
         */
        if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
                        (*maf_id == NAND_MFR_SAMSUNG ||
@@ -3143,7 +3143,8 @@ ident_done:
                                (*maf_id == NAND_MFR_SAMSUNG ||
                                 *maf_id == NAND_MFR_HYNIX ||
                                 *maf_id == NAND_MFR_TOSHIBA ||
-                                *maf_id == NAND_MFR_AMD)) ||
+                                *maf_id == NAND_MFR_AMD ||
+                                *maf_id == NAND_MFR_MACRONIX)) ||
                        (mtd->writesize == 2048 &&
                         *maf_id == NAND_MFR_MICRON))
                chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
index 69148ae3bf58aaf806be617894725eb72d1483f1..20a112f591fe3f67347bb206e1aae480d5eeaac6 100644 (file)
@@ -201,7 +201,7 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
                        from += marker_len;
                        marker_len = 0;
                }
-               res = mtd->read(mtd, from, len, &retlen, buf);
+               res = mtd_read(mtd, from, len, &retlen, buf);
                if (res < 0) {
                        if (mtd_is_eccerr(res)) {
                                pr_info("nand_bbt: ECC error in BBT at "
@@ -298,7 +298,7 @@ static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
        if (td->options & NAND_BBT_VERSION)
                len++;
 
-       return mtd->read(mtd, offs, len, &retlen, buf);
+       return mtd_read(mtd, offs, len, &retlen, buf);
 }
 
 /* Scan read raw data from flash */
@@ -317,7 +317,7 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
                ops.len = min(len, (size_t)mtd->writesize);
                ops.oobbuf = buf + ops.len;
 
-               res = mtd->read_oob(mtd, offs, &ops);
+               res = mtd_read_oob(mtd, offs, &ops);
 
                if (res)
                        return res;
@@ -350,7 +350,7 @@ static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
        ops.oobbuf = oob;
        ops.len = len;
 
-       return mtd->write_oob(mtd, offs, &ops);
+       return mtd_write_oob(mtd, offs, &ops);
 }
 
 static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
@@ -434,7 +434,7 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
                 * Read the full oob until read_oob is fixed to handle single
                 * byte reads for 16 bit buswidth.
                 */
-               ret = mtd->read_oob(mtd, offs, &ops);
+               ret = mtd_read_oob(mtd, offs, &ops);
                /* Ignore ECC errors when checking for BBM */
                if (ret && !mtd_is_bitflip_or_eccerr(ret))
                        return ret;
@@ -756,7 +756,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
                        /* Make it block aligned */
                        to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
                        len = 1 << this->bbt_erase_shift;
-                       res = mtd->read(mtd, to, len, &retlen, buf);
+                       res = mtd_read(mtd, to, len, &retlen, buf);
                        if (res < 0) {
                                if (retlen != len) {
                                        pr_info("nand_bbt: error reading block "
@@ -769,7 +769,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
                        /* Read oob data */
                        ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
                        ops.oobbuf = &buf[len];
-                       res = mtd->read_oob(mtd, to + mtd->writesize, &ops);
+                       res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
                        if (res < 0 || ops.oobretlen != ops.ooblen)
                                goto outerr;
 
index 00cf1b0d60531a2699382fbdd5651ae11e956683..af4fe8ca7b5ef7fbdfb25bb2553d8af4f61f8d29 100644 (file)
@@ -73,11 +73,12 @@ struct nand_flash_dev nand_flash_ids[] = {
 #define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR)
 #define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
 
-       /*512 Megabit */
+       /* 512 Megabit */
        {"NAND 64MiB 1,8V 8-bit",       0xA2, 0,  64, 0, LP_OPTIONS},
        {"NAND 64MiB 1,8V 8-bit",       0xA0, 0,  64, 0, LP_OPTIONS},
        {"NAND 64MiB 3,3V 8-bit",       0xF2, 0,  64, 0, LP_OPTIONS},
        {"NAND 64MiB 3,3V 8-bit",       0xD0, 0,  64, 0, LP_OPTIONS},
+       {"NAND 64MiB 3,3V 8-bit",       0xF0, 0,  64, 0, LP_OPTIONS},
        {"NAND 64MiB 1,8V 16-bit",      0xB2, 0,  64, 0, LP_OPTIONS16},
        {"NAND 64MiB 1,8V 16-bit",      0xB0, 0,  64, 0, LP_OPTIONS16},
        {"NAND 64MiB 3,3V 16-bit",      0xC2, 0,  64, 0, LP_OPTIONS16},
@@ -176,6 +177,7 @@ struct nand_manufacturers nand_manuf_ids[] = {
        {NAND_MFR_HYNIX, "Hynix"},
        {NAND_MFR_MICRON, "Micron"},
        {NAND_MFR_AMD, "AMD"},
+       {NAND_MFR_MACRONIX, "Macronix"},
        {0x0, "Unknown"}
 };
 
index 34c03be7730105c3a26867637700f8c0ade83219..261f478f8cc37944e3365ce8b0219ec073bfcddc 100644 (file)
@@ -737,7 +737,7 @@ static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
                        return -EINVAL;
                }
                offset = erase_block_no * ns->geom.secsz;
-               if (mtd->block_markbad(mtd, offset)) {
+               if (mtd_block_markbad(mtd, offset)) {
                        NS_ERR("invalid badblocks.\n");
                        return -EINVAL;
                }
index f8aacf48ecddf6c12a63a10026e8b948d5f0a8e2..ec688548c880ef67d1cf568e6f568c678099232f 100644 (file)
@@ -294,18 +294,7 @@ static struct platform_driver ndfc_driver = {
        .remove = __devexit_p(ndfc_remove),
 };
 
-static int __init ndfc_nand_init(void)
-{
-       return platform_driver_register(&ndfc_driver);
-}
-
-static void __exit ndfc_nand_exit(void)
-{
-       platform_driver_unregister(&ndfc_driver);
-}
-
-module_init(ndfc_nand_init);
-module_exit(ndfc_nand_exit);
+module_platform_driver(ndfc_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
index b463ecfb4c1a248713073ab019efed1af2f3d538..a86aa812ca13acf5a1d4be2794f72b41987a1429 100644 (file)
@@ -201,7 +201,7 @@ static int nomadik_nand_suspend(struct device *dev)
        struct nomadik_nand_host *host = dev_get_drvdata(dev);
        int ret = 0;
        if (host)
-               ret = host->mtd.suspend(&host->mtd);
+               ret = mtd_suspend(&host->mtd);
        return ret;
 }
 
@@ -209,7 +209,7 @@ static int nomadik_nand_resume(struct device *dev)
 {
        struct nomadik_nand_host *host = dev_get_drvdata(dev);
        if (host)
-               host->mtd.resume(&host->mtd);
+               mtd_resume(&host->mtd);
        return 0;
 }
 
@@ -228,19 +228,7 @@ static struct platform_driver nomadik_nand_driver = {
        },
 };
 
-static int __init nand_nomadik_init(void)
-{
-       pr_info("Nomadik NAND driver\n");
-       return platform_driver_register(&nomadik_nand_driver);
-}
-
-static void __exit nand_nomadik_exit(void)
-{
-       platform_driver_unregister(&nomadik_nand_driver);
-}
-
-module_init(nand_nomadik_init);
-module_exit(nand_nomadik_exit);
+module_platform_driver(nomadik_nand_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("ST Microelectronics (sachin.verma@st.com)");
index fa8faedfad6ee086d15bad4d249b3b0bd0874991..8febe46e1105e9aad7212dd442e4160d855fe11d 100644 (file)
@@ -364,18 +364,7 @@ static struct platform_driver nuc900_nand_driver = {
        },
 };
 
-static int __init nuc900_nand_init(void)
-{
-       return platform_driver_register(&nuc900_nand_driver);
-}
-
-static void __exit nuc900_nand_exit(void)
-{
-       platform_driver_unregister(&nuc900_nand_driver);
-}
-
-module_init(nuc900_nand_init);
-module_exit(nuc900_nand_exit);
+module_platform_driver(nuc900_nand_driver);
 
 MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
 MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
index f745f00f3167d455f3d802a0b5ea9dc82a66b617..b3a883e2a22f63942f83e245304f2de82fcdf116 100644 (file)
@@ -1145,20 +1145,7 @@ static struct platform_driver omap_nand_driver = {
        },
 };
 
-static int __init omap_nand_init(void)
-{
-       pr_info("%s driver initializing\n", DRIVER_NAME);
-
-       return platform_driver_register(&omap_nand_driver);
-}
-
-static void __exit omap_nand_exit(void)
-{
-       platform_driver_unregister(&omap_nand_driver);
-}
-
-module_init(omap_nand_init);
-module_exit(omap_nand_exit);
+module_platform_driver(omap_nand_driver);
 
 MODULE_ALIAS("platform:" DRIVER_NAME);
 MODULE_LICENSE("GPL");
index a97264ececdbe2278edf20b11a2d072bd38feba4..974dbf8251c928842fe528c15777e4cd12efdaa8 100644 (file)
@@ -230,17 +230,7 @@ static struct platform_driver pasemi_nand_driver =
        .remove         = pasemi_nand_remove,
 };
 
-static int __init pasemi_nand_init(void)
-{
-       return platform_driver_register(&pasemi_nand_driver);
-}
-module_init(pasemi_nand_init);
-
-static void __exit pasemi_nand_exit(void)
-{
-       platform_driver_unregister(&pasemi_nand_driver);
-}
-module_exit(pasemi_nand_exit);
+module_platform_driver(pasemi_nand_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
index ea8e1234e0e25895ed2a731ae1a0225d616468fa..7f2da6953357f80f06bfab9404d411db53f6ef05 100644 (file)
@@ -148,18 +148,7 @@ static struct platform_driver plat_nand_driver = {
        },
 };
 
-static int __init plat_nand_init(void)
-{
-       return platform_driver_register(&plat_nand_driver);
-}
-
-static void __exit plat_nand_exit(void)
-{
-       platform_driver_unregister(&plat_nand_driver);
-}
-
-module_init(plat_nand_init);
-module_exit(plat_nand_exit);
+module_platform_driver(plat_nand_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Vitaly Wool");
index 9eb7f879969e31a2e5bd987bd27a5295f992516e..8544d6bf50a0ae837465d28e60bb2bf0e72edf73 100644 (file)
@@ -1258,7 +1258,7 @@ static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
 
        for (cs = 0; cs < pdata->num_cs; cs++) {
                mtd = info->host[cs]->mtd;
-               mtd->suspend(mtd);
+               mtd_suspend(mtd);
        }
 
        return 0;
@@ -1291,7 +1291,7 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
        nand_writel(info, NDSR, NDSR_MASK);
        for (cs = 0; cs < pdata->num_cs; cs++) {
                mtd = info->host[cs]->mtd;
-               mtd->resume(mtd);
+               mtd_resume(mtd);
        }
 
        return 0;
@@ -1311,17 +1311,7 @@ static struct platform_driver pxa3xx_nand_driver = {
        .resume         = pxa3xx_nand_resume,
 };
 
-static int __init pxa3xx_nand_init(void)
-{
-       return platform_driver_register(&pxa3xx_nand_driver);
-}
-module_init(pxa3xx_nand_init);
-
-static void __exit pxa3xx_nand_exit(void)
-{
-       platform_driver_unregister(&pxa3xx_nand_driver);
-}
-module_exit(pxa3xx_nand_exit);
+module_platform_driver(pxa3xx_nand_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("PXA3xx NAND controller driver");
index 619d2a504788b050aa46cc48e0cd13c31588d921..b175c0fd8b9378009ecbcbf2ad6ad3cbf9b6f8a7 100644 (file)
@@ -230,17 +230,7 @@ static struct platform_driver sharpsl_nand_driver = {
        .remove         = __devexit_p(sharpsl_nand_remove),
 };
 
-static int __init sharpsl_nand_init(void)
-{
-       return platform_driver_register(&sharpsl_nand_driver);
-}
-module_init(sharpsl_nand_init);
-
-static void __exit sharpsl_nand_exit(void)
-{
-       platform_driver_unregister(&sharpsl_nand_driver);
-}
-module_exit(sharpsl_nand_exit);
+module_platform_driver(sharpsl_nand_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
index 32ae5af7444f6a28d26e02a7ff6b53efc7e31bde..774c3c26671379a30ef8c3f4e0aec1c57ee3efec 100644 (file)
@@ -55,7 +55,7 @@ static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
        ops.datbuf = NULL;
 
 
-       ret = mtd->write_oob(mtd, ofs, &ops);
+       ret = mtd_write_oob(mtd, ofs, &ops);
        if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
                printk(KERN_NOTICE
                        "sm_common: can't mark sector at %i as bad\n",
index 0fb24f9c232772dac3c307bff2c7eb7a2d690c5f..e02b08bcf0c044e7cd845b046f81d013ec8140cc 100644 (file)
@@ -273,18 +273,7 @@ static struct platform_driver socrates_nand_driver = {
        .remove         = __devexit_p(socrates_nand_remove),
 };
 
-static int __init socrates_nand_init(void)
-{
-       return platform_driver_register(&socrates_nand_driver);
-}
-
-static void __exit socrates_nand_exit(void)
-{
-       platform_driver_unregister(&socrates_nand_driver);
-}
-
-module_init(socrates_nand_init);
-module_exit(socrates_nand_exit);
+module_platform_driver(socrates_nand_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Ilya Yanok");
index beebd95f76907de7e32102d0dd9a7593be99d0a7..6caa0cd9d6a7c23394d815516f321ae32ed97983 100644 (file)
@@ -533,18 +533,7 @@ static struct platform_driver tmio_driver = {
        .resume         = tmio_resume,
 };
 
-static int __init tmio_init(void)
-{
-       return platform_driver_register(&tmio_driver);
-}
-
-static void __exit tmio_exit(void)
-{
-       platform_driver_unregister(&tmio_driver);
-}
-
-module_init(tmio_init);
-module_exit(tmio_exit);
+module_platform_driver(tmio_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov");
index ace46fdaef58cfb3b51fc9c981e637bb7e0fd2a4..c7c4f1d11c77f2f9cb1a41700f58549377d5ea64 100644 (file)
@@ -298,11 +298,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
        drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
        if (!drvdata)
                return -ENOMEM;
-       if (!devm_request_mem_region(&dev->dev, res->start,
-                                    resource_size(res), dev_name(&dev->dev)))
-               return -EBUSY;
-       drvdata->base = devm_ioremap(&dev->dev, res->start,
-                                    resource_size(res));
+       drvdata->base = devm_request_and_ioremap(&dev->dev, res);
        if (!drvdata->base)
                return -EBUSY;
 
index cda77b562ad43da9efd14f1cfb1ab4219583e9d8..a75382aff5f68d17f419307b70eb4427f7c07507 100644 (file)
@@ -56,7 +56,7 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
        if (memcmp(mtd->name, "DiskOnChip", 10))
                return;
 
-       if (!mtd->block_isbad) {
+       if (!mtd_can_have_bb(mtd)) {
                printk(KERN_ERR
 "NFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
 "Please use the new diskonchip driver under the NAND subsystem.\n");
@@ -153,7 +153,7 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
        ops.oobbuf = buf;
        ops.datbuf = NULL;
 
-       res = mtd->read_oob(mtd, offs & ~mask, &ops);
+       res = mtd_read_oob(mtd, offs & ~mask, &ops);
        *retlen = ops.oobretlen;
        return res;
 }
@@ -174,7 +174,7 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
        ops.oobbuf = buf;
        ops.datbuf = NULL;
 
-       res = mtd->write_oob(mtd, offs & ~mask, &ops);
+       res = mtd_write_oob(mtd, offs & ~mask, &ops);
        *retlen = ops.oobretlen;
        return res;
 }
@@ -198,7 +198,7 @@ static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
        ops.datbuf = buf;
        ops.len = len;
 
-       res = mtd->write_oob(mtd, offs & ~mask, &ops);
+       res = mtd_write_oob(mtd, offs & ~mask, &ops);
        *retlen = ops.retlen;
        return res;
 }
@@ -423,12 +423,17 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
                if (BlockMap[block] == BLOCK_NIL)
                        continue;
 
-               ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512),
-                               512, &retlen, movebuf);
+               ret = mtd_read(mtd,
+                              (nftl->EraseSize * BlockMap[block]) + (block * 512),
+                              512,
+                              &retlen,
+                              movebuf);
                if (ret < 0 && !mtd_is_bitflip(ret)) {
-                       ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block])
-                                       + (block * 512), 512, &retlen,
-                                       movebuf);
+                       ret = mtd_read(mtd,
+                                      (nftl->EraseSize * BlockMap[block]) + (block * 512),
+                                      512,
+                                      &retlen,
+                                      movebuf);
                        if (ret != -EIO)
                                printk("Error went away on retry.\n");
                }
@@ -771,7 +776,7 @@ static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
        } else {
                loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs;
                size_t retlen;
-               int res = mtd->read(mtd, ptr, 512, &retlen, buffer);
+               int res = mtd_read(mtd, ptr, 512, &retlen, buffer);
 
                if (res < 0 && !mtd_is_bitflip(res))
                        return -EIO;
index ac4092591aea035068e169900b099ca8121c2185..51b9d6af307f616193ebc006b492a5fecedd535b 100644 (file)
@@ -63,8 +63,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
 
                /* Check for ANAND header first. Then can whinge if it's found but later
                   checks fail */
-               ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE,
-                               &retlen, buf);
+               ret = mtd_read(mtd, block * nftl->EraseSize, SECTORSIZE,
+                              &retlen, buf);
                /* We ignore ret in case the ECC of the MediaHeader is invalid
                   (which is apparently acceptable) */
                if (retlen != SECTORSIZE) {
@@ -242,7 +242,8 @@ The new DiskOnChip driver already scanned the bad block table.  Just query it.
                        if (buf[i & (SECTORSIZE - 1)] != 0xff)
                                nftl->ReplUnitTable[i] = BLOCK_RESERVED;
 #endif
-                       if (nftl->mbd.mtd->block_isbad(nftl->mbd.mtd, i * nftl->EraseSize))
+                       if (mtd_block_isbad(nftl->mbd.mtd,
+                                           i * nftl->EraseSize))
                                nftl->ReplUnitTable[i] = BLOCK_RESERVED;
                }
 
@@ -274,7 +275,7 @@ static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int
        int i;
 
        for (i = 0; i < len; i += SECTORSIZE) {
-               if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf))
+               if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
                        return -1;
                if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
                        return -1;
@@ -326,7 +327,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
        instr->mtd = nftl->mbd.mtd;
        instr->addr = block * nftl->EraseSize;
        instr->len = nftl->EraseSize;
-       mtd->erase(mtd, instr);
+       mtd_erase(mtd, instr);
 
        if (instr->state == MTD_ERASE_FAILED) {
                printk("Error while formatting block %d\n", block);
@@ -355,7 +356,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
 fail:
        /* could not format, update the bad block table (caller is responsible
           for setting the ReplUnitTable to BLOCK_RESERVED on failure) */
-       nftl->mbd.mtd->block_markbad(nftl->mbd.mtd, instr->addr);
+       mtd_block_markbad(nftl->mbd.mtd, instr->addr);
        return -1;
 }
 
index 7813095264a56192dbd13c0948da04cfdfd5f82f..0ccd5bff254459323827bd1f54b69d65b990c510 100644 (file)
@@ -115,21 +115,9 @@ static struct platform_driver generic_onenand_driver = {
        .remove         = __devexit_p(generic_onenand_remove),
 };
 
-MODULE_ALIAS("platform:" DRIVER_NAME);
-
-static int __init generic_onenand_init(void)
-{
-       return platform_driver_register(&generic_onenand_driver);
-}
-
-static void __exit generic_onenand_exit(void)
-{
-       platform_driver_unregister(&generic_onenand_driver);
-}
-
-module_init(generic_onenand_init);
-module_exit(generic_onenand_exit);
+module_platform_driver(generic_onenand_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
 MODULE_DESCRIPTION("Glue layer for OneNAND flash on generic boards");
+MODULE_ALIAS("platform:" DRIVER_NAME);
index a8394730b4b6fc7e9e84fcdd919e9a09c0b669da..a061bc163da2e128171bdc9fce6d146e6163b411 100644 (file)
@@ -2633,7 +2633,6 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
  */
 static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
 {
-       struct onenand_chip *this = mtd->priv;
        int ret;
 
        ret = onenand_block_isbad(mtd, ofs);
@@ -2645,7 +2644,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
        }
 
        onenand_get_device(mtd, FL_WRITING);
-       ret = this->block_markbad(mtd, ofs);
+       ret = mtd_block_markbad(mtd, ofs);
        onenand_release_device(mtd);
        return ret;
 }
index 5474547eafc2fc271b2000bcf5c146a18b4a8372..fa1ee43f735b5bb8ec97d5c6861525f8c4b5757c 100644 (file)
@@ -1133,18 +1133,7 @@ static struct platform_driver s3c_onenand_driver = {
        .remove         = __devexit_p(s3c_onenand_remove),
 };
 
-static int __init s3c_onenand_init(void)
-{
-       return platform_driver_register(&s3c_onenand_driver);
-}
-
-static void __exit s3c_onenand_exit(void)
-{
-       platform_driver_unregister(&s3c_onenand_driver);
-}
-
-module_init(s3c_onenand_init);
-module_exit(s3c_onenand_exit);
+module_platform_driver(s3c_onenand_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
index e366b1d84eade1685821dcf5021427e993656842..48970c14beffd911fd154e53dd3b56690f03d4f9 100644 (file)
@@ -78,8 +78,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
 
        if ( directory < 0 ) {
                offset = master->size + directory * master->erasesize;
-               while (master->block_isbad && 
-                      master->block_isbad(master, offset)) {
+               while (mtd_can_have_bb(master) &&
+                      mtd_block_isbad(master, offset)) {
                        if (!offset) {
                        nogood:
                                printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
@@ -89,8 +89,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
                }
        } else {
                offset = directory * master->erasesize;
-               while (master->block_isbad && 
-                      master->block_isbad(master, offset)) {
+               while (mtd_can_have_bb(master) &&
+                      mtd_block_isbad(master, offset)) {
                        offset += master->erasesize;
                        if (offset == master->size)
                                goto nogood;
@@ -104,8 +104,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
        printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n",
               master->name, offset);
 
-       ret = master->read(master, offset,
-                          master->erasesize, &retlen, (void *)buf);
+       ret = mtd_read(master, offset, master->erasesize, &retlen,
+                      (void *)buf);
 
        if (ret)
                goto out;
index 73ae217a4252a1b386c7c9b3b820dece4c3e65bd..233b946e5d66d95a2cfbabb19985e5f8a75efb13 100644 (file)
@@ -200,9 +200,9 @@ static int scan_header(struct partition *part)
                part->sector_map[i] = -1;
 
        for (i=0, blocks_found=0; i<part->total_blocks; i++) {
-               rc = part->mbd.mtd->read(part->mbd.mtd,
-                               i * part->block_size, part->header_size,
-                               &retlen, (u_char*)part->header_cache);
+               rc = mtd_read(part->mbd.mtd, i * part->block_size,
+                             part->header_size, &retlen,
+                             (u_char *)part->header_cache);
 
                if (!rc && retlen != part->header_size)
                        rc = -EIO;
@@ -250,8 +250,8 @@ static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *b
 
        addr = part->sector_map[sector];
        if (addr != -1) {
-               rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
-                                               &retlen, (u_char*)buf);
+               rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+                             (u_char *)buf);
                if (!rc && retlen != SECTOR_SIZE)
                        rc = -EIO;
 
@@ -304,9 +304,8 @@ static void erase_callback(struct erase_info *erase)
        part->blocks[i].used_sectors = 0;
        part->blocks[i].erases++;
 
-       rc = part->mbd.mtd->write(part->mbd.mtd,
-               part->blocks[i].offset, sizeof(magic), &retlen,
-               (u_char*)&magic);
+       rc = mtd_write(part->mbd.mtd, part->blocks[i].offset, sizeof(magic),
+                      &retlen, (u_char *)&magic);
 
        if (!rc && retlen != sizeof(magic))
                rc = -EIO;
@@ -342,7 +341,7 @@ static int erase_block(struct partition *part, int block)
        part->blocks[block].state = BLOCK_ERASING;
        part->blocks[block].free_sectors = 0;
 
-       rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
+       rc = mtd_erase(part->mbd.mtd, erase);
 
        if (rc) {
                printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
@@ -372,9 +371,8 @@ static int move_block_contents(struct partition *part, int block_no, u_long *old
        if (!map)
                goto err2;
 
-       rc = part->mbd.mtd->read(part->mbd.mtd,
-               part->blocks[block_no].offset, part->header_size,
-               &retlen, (u_char*)map);
+       rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
+                     part->header_size, &retlen, (u_char *)map);
 
        if (!rc && retlen != part->header_size)
                rc = -EIO;
@@ -413,8 +411,8 @@ static int move_block_contents(struct partition *part, int block_no, u_long *old
                        }
                        continue;
                }
-               rc = part->mbd.mtd->read(part->mbd.mtd, addr,
-                       SECTOR_SIZE, &retlen, sector_data);
+               rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+                             sector_data);
 
                if (!rc && retlen != SECTOR_SIZE)
                        rc = -EIO;
@@ -450,8 +448,7 @@ static int reclaim_block(struct partition *part, u_long *old_sector)
        int rc;
 
        /* we have a race if sync doesn't exist */
-       if (part->mbd.mtd->sync)
-               part->mbd.mtd->sync(part->mbd.mtd);
+       mtd_sync(part->mbd.mtd);
 
        score = 0x7fffffff; /* MAX_INT */
        best_block = -1;
@@ -563,8 +560,9 @@ static int find_writable_block(struct partition *part, u_long *old_sector)
                }
        }
 
-       rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
-               part->header_size, &retlen, (u_char*)part->header_cache);
+       rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
+                     part->header_size, &retlen,
+                     (u_char *)part->header_cache);
 
        if (!rc && retlen != part->header_size)
                rc = -EIO;
@@ -595,8 +593,8 @@ static int mark_sector_deleted(struct partition *part, u_long old_addr)
 
        addr = part->blocks[block].offset +
                        (HEADER_MAP_OFFSET + offset) * sizeof(u16);
-       rc = part->mbd.mtd->write(part->mbd.mtd, addr,
-               sizeof(del), &retlen, (u_char*)&del);
+       rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
+                      (u_char *)&del);
 
        if (!rc && retlen != sizeof(del))
                rc = -EIO;
@@ -668,8 +666,8 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
 
        addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
                block->offset;
-       rc = part->mbd.mtd->write(part->mbd.mtd,
-               addr, SECTOR_SIZE, &retlen, (u_char*)buf);
+       rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+                      (u_char *)buf);
 
        if (!rc && retlen != SECTOR_SIZE)
                rc = -EIO;
@@ -688,8 +686,8 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
        part->header_cache[i + HEADER_MAP_OFFSET] = entry;
 
        addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
-       rc = part->mbd.mtd->write(part->mbd.mtd, addr,
-                       sizeof(entry), &retlen, (u_char*)&entry);
+       rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
+                      (u_char *)&entry);
 
        if (!rc && retlen != sizeof(entry))
                rc = -EIO;
index fddb714e323cf3ea577b40c32e4abbb2aced5f29..072ed5970e2f3dacf5a1e5d654a9f9b54183d8e3 100644 (file)
@@ -25,7 +25,7 @@
 struct workqueue_struct *cache_flush_workqueue;
 
 static int cache_timeout = 1000;
-module_param(cache_timeout, bool, S_IRUGO);
+module_param(cache_timeout, int, S_IRUGO);
 MODULE_PARM_DESC(cache_timeout,
        "Timeout (in ms) for cache flush (1000 ms default");
 
@@ -278,7 +278,7 @@ again:
 
        /* Unfortunately, oob read will _always_ succeed,
                despite card removal..... */
-       ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+       ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
 
        /* Test for unknown errors */
        if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
@@ -343,7 +343,7 @@ static int sm_write_sector(struct sm_ftl *ftl,
        ops.ooblen = SM_OOB_SIZE;
        ops.oobbuf = (void *)oob;
 
-       ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+       ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
 
        /* Now we assume that hardware will catch write bitflip errors */
        /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */
@@ -479,7 +479,7 @@ static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
                return -EIO;
        }
 
-       if (mtd->erase(mtd, &erase)) {
+       if (mtd_erase(mtd, &erase)) {
                sm_printk("erase of block %d in zone %d failed",
                                                        block, zone_num);
                goto error;
@@ -645,8 +645,8 @@ int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
        if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
                return -ENODEV;
 
-       /* We use these functions for IO */
-       if (!mtd->read_oob || !mtd->write_oob)
+       /* We use OOB */
+       if (!mtd_has_oob(mtd))
                return -ENODEV;
 
        /* Find geometry information */
index 976e3d28b962de39870e70be8bfd3d3c29c70bff..ab2a52a039c3eef994e7e4a53645d34e1a059eb2 100644 (file)
@@ -122,9 +122,9 @@ static int get_valid_cis_sector(struct mtd_info *mtd)
         * is not SSFDC formatted
         */
        for (k = 0, offset = 0; k < 4; k++, offset += mtd->erasesize) {
-               if (!mtd->block_isbad(mtd, offset)) {
-                       ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen,
-                               sect_buf);
+               if (mtd_block_isbad(mtd, offset)) {
+                       ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen,
+                                      sect_buf);
 
                        /* CIS pattern match on the sector buffer */
                        if (ret < 0 || retlen != SECTOR_SIZE) {
@@ -156,7 +156,7 @@ static int read_physical_sector(struct mtd_info *mtd, uint8_t *sect_buf,
        size_t retlen;
        loff_t offset = (loff_t)sect_no << SECTOR_SHIFT;
 
-       ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf);
+       ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf);
        if (ret < 0 || retlen != SECTOR_SIZE)
                return -1;
 
@@ -175,7 +175,7 @@ static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf)
        ops.oobbuf = buf;
        ops.datbuf = NULL;
 
-       ret = mtd->read_oob(mtd, offs, &ops);
+       ret = mtd_read_oob(mtd, offs, &ops);
        if (ret < 0 || ops.oobretlen != OOB_SIZE)
                return -1;
 
@@ -255,7 +255,7 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc)
        for (phys_block = ssfdc->cis_block + 1; phys_block < ssfdc->map_len;
                        phys_block++) {
                offset = (unsigned long)phys_block * ssfdc->erase_size;
-               if (mtd->block_isbad(mtd, offset))
+               if (mtd_block_isbad(mtd, offset))
                        continue;       /* skip bad blocks */
 
                ret = read_raw_oob(mtd, offset, oob_buf);
index 933f7e5f32d301a90f7256750b03c8a1542a9609..ed9b62827f1bee15e6f5236e4c47f92cae6bfd5c 100644 (file)
@@ -78,7 +78,7 @@ static int erase_eraseblock(int ebnum)
        ei.addr = addr;
        ei.len  = mtd->erasesize;
 
-       err = mtd->erase(mtd, &ei);
+       err = mtd_erase(mtd, &ei);
        if (err) {
                printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
                return err;
@@ -139,7 +139,7 @@ static int write_eraseblock(int ebnum)
                ops.ooboffs   = use_offset;
                ops.datbuf    = NULL;
                ops.oobbuf    = writebuf;
-               err = mtd->write_oob(mtd, addr, &ops);
+               err = mtd_write_oob(mtd, addr, &ops);
                if (err || ops.oobretlen != use_len) {
                        printk(PRINT_PREF "error: writeoob failed at %#llx\n",
                               (long long)addr);
@@ -192,7 +192,7 @@ static int verify_eraseblock(int ebnum)
                ops.ooboffs   = use_offset;
                ops.datbuf    = NULL;
                ops.oobbuf    = readbuf;
-               err = mtd->read_oob(mtd, addr, &ops);
+               err = mtd_read_oob(mtd, addr, &ops);
                if (err || ops.oobretlen != use_len) {
                        printk(PRINT_PREF "error: readoob failed at %#llx\n",
                               (long long)addr);
@@ -219,7 +219,7 @@ static int verify_eraseblock(int ebnum)
                        ops.ooboffs   = 0;
                        ops.datbuf    = NULL;
                        ops.oobbuf    = readbuf;
-                       err = mtd->read_oob(mtd, addr, &ops);
+                       err = mtd_read_oob(mtd, addr, &ops);
                        if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
                                printk(PRINT_PREF "error: readoob failed at "
                                       "%#llx\n", (long long)addr);
@@ -284,7 +284,7 @@ static int verify_eraseblock_in_one_go(int ebnum)
        ops.ooboffs   = 0;
        ops.datbuf    = NULL;
        ops.oobbuf    = readbuf;
-       err = mtd->read_oob(mtd, addr, &ops);
+       err = mtd_read_oob(mtd, addr, &ops);
        if (err || ops.oobretlen != len) {
                printk(PRINT_PREF "error: readoob failed at %#llx\n",
                       (long long)addr);
@@ -329,7 +329,7 @@ static int is_block_bad(int ebnum)
        int ret;
        loff_t addr = ebnum * mtd->erasesize;
 
-       ret = mtd->block_isbad(mtd, addr);
+       ret = mtd_block_isbad(mtd, addr);
        if (ret)
                printk(PRINT_PREF "block %d is bad\n", ebnum);
        return ret;
@@ -524,7 +524,7 @@ static int __init mtd_oobtest_init(void)
        ops.oobbuf    = writebuf;
        printk(PRINT_PREF "attempting to start write past end of OOB\n");
        printk(PRINT_PREF "an error is expected...\n");
-       err = mtd->write_oob(mtd, addr0, &ops);
+       err = mtd_write_oob(mtd, addr0, &ops);
        if (err) {
                printk(PRINT_PREF "error occurred as expected\n");
                err = 0;
@@ -544,7 +544,7 @@ static int __init mtd_oobtest_init(void)
        ops.oobbuf    = readbuf;
        printk(PRINT_PREF "attempting to start read past end of OOB\n");
        printk(PRINT_PREF "an error is expected...\n");
-       err = mtd->read_oob(mtd, addr0, &ops);
+       err = mtd_read_oob(mtd, addr0, &ops);
        if (err) {
                printk(PRINT_PREF "error occurred as expected\n");
                err = 0;
@@ -568,7 +568,7 @@ static int __init mtd_oobtest_init(void)
                ops.oobbuf    = writebuf;
                printk(PRINT_PREF "attempting to write past end of device\n");
                printk(PRINT_PREF "an error is expected...\n");
-               err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
+               err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
                if (err) {
                        printk(PRINT_PREF "error occurred as expected\n");
                        err = 0;
@@ -588,7 +588,7 @@ static int __init mtd_oobtest_init(void)
                ops.oobbuf    = readbuf;
                printk(PRINT_PREF "attempting to read past end of device\n");
                printk(PRINT_PREF "an error is expected...\n");
-               err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
+               err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
                if (err) {
                        printk(PRINT_PREF "error occurred as expected\n");
                        err = 0;
@@ -612,7 +612,7 @@ static int __init mtd_oobtest_init(void)
                ops.oobbuf    = writebuf;
                printk(PRINT_PREF "attempting to write past end of device\n");
                printk(PRINT_PREF "an error is expected...\n");
-               err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
+               err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
                if (err) {
                        printk(PRINT_PREF "error occurred as expected\n");
                        err = 0;
@@ -632,7 +632,7 @@ static int __init mtd_oobtest_init(void)
                ops.oobbuf    = readbuf;
                printk(PRINT_PREF "attempting to read past end of device\n");
                printk(PRINT_PREF "an error is expected...\n");
-               err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
+               err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
                if (err) {
                        printk(PRINT_PREF "error occurred as expected\n");
                        err = 0;
@@ -670,7 +670,7 @@ static int __init mtd_oobtest_init(void)
                        ops.ooboffs   = 0;
                        ops.datbuf    = NULL;
                        ops.oobbuf    = writebuf;
-                       err = mtd->write_oob(mtd, addr, &ops);
+                       err = mtd_write_oob(mtd, addr, &ops);
                        if (err)
                                goto out;
                        if (i % 256 == 0)
@@ -698,7 +698,7 @@ static int __init mtd_oobtest_init(void)
                ops.ooboffs   = 0;
                ops.datbuf    = NULL;
                ops.oobbuf    = readbuf;
-               err = mtd->read_oob(mtd, addr, &ops);
+               err = mtd_read_oob(mtd, addr, &ops);
                if (err)
                        goto out;
                if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) {
index afafb6935fd0870707d13f9b7ea12cfbbb69af56..252ddb092fb2312f96db0e82abf573fe22137318 100644 (file)
@@ -77,7 +77,7 @@ static int erase_eraseblock(int ebnum)
        ei.addr = addr;
        ei.len  = mtd->erasesize;
 
-       err = mtd->erase(mtd, &ei);
+       err = mtd_erase(mtd, &ei);
        if (err) {
                printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
                return err;
@@ -95,12 +95,12 @@ static int erase_eraseblock(int ebnum)
 static int write_eraseblock(int ebnum)
 {
        int err = 0;
-       size_t written = 0;
+       size_t written;
        loff_t addr = ebnum * mtd->erasesize;
 
        set_random_data(writebuf, mtd->erasesize);
        cond_resched();
-       err = mtd->write(mtd, addr, mtd->erasesize, &written, writebuf);
+       err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf);
        if (err || written != mtd->erasesize)
                printk(PRINT_PREF "error: write failed at %#llx\n",
                       (long long)addr);
@@ -111,7 +111,7 @@ static int write_eraseblock(int ebnum)
 static int verify_eraseblock(int ebnum)
 {
        uint32_t j;
-       size_t read = 0;
+       size_t read;
        int err = 0, i;
        loff_t addr0, addrn;
        loff_t addr = ebnum * mtd->erasesize;
@@ -127,7 +127,7 @@ static int verify_eraseblock(int ebnum)
        set_random_data(writebuf, mtd->erasesize);
        for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
                /* Do a read to set the internal dataRAMs to different data */
-               err = mtd->read(mtd, addr0, bufsize, &read, twopages);
+               err = mtd_read(mtd, addr0, bufsize, &read, twopages);
                if (mtd_is_bitflip(err))
                        err = 0;
                if (err || read != bufsize) {
@@ -135,7 +135,7 @@ static int verify_eraseblock(int ebnum)
                               (long long)addr0);
                        return err;
                }
-               err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
+               err = mtd_read(mtd, addrn - bufsize, bufsize, &read, twopages);
                if (mtd_is_bitflip(err))
                        err = 0;
                if (err || read != bufsize) {
@@ -144,8 +144,7 @@ static int verify_eraseblock(int ebnum)
                        return err;
                }
                memset(twopages, 0, bufsize);
-               read = 0;
-               err = mtd->read(mtd, addr, bufsize, &read, twopages);
+               err = mtd_read(mtd, addr, bufsize, &read, twopages);
                if (mtd_is_bitflip(err))
                        err = 0;
                if (err || read != bufsize) {
@@ -163,7 +162,7 @@ static int verify_eraseblock(int ebnum)
        if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) {
                unsigned long oldnext = next;
                /* Do a read to set the internal dataRAMs to different data */
-               err = mtd->read(mtd, addr0, bufsize, &read, twopages);
+               err = mtd_read(mtd, addr0, bufsize, &read, twopages);
                if (mtd_is_bitflip(err))
                        err = 0;
                if (err || read != bufsize) {
@@ -171,7 +170,7 @@ static int verify_eraseblock(int ebnum)
                               (long long)addr0);
                        return err;
                }
-               err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
+               err = mtd_read(mtd, addrn - bufsize, bufsize, &read, twopages);
                if (mtd_is_bitflip(err))
                        err = 0;
                if (err || read != bufsize) {
@@ -180,8 +179,7 @@ static int verify_eraseblock(int ebnum)
                        return err;
                }
                memset(twopages, 0, bufsize);
-               read = 0;
-               err = mtd->read(mtd, addr, bufsize, &read, twopages);
+               err = mtd_read(mtd, addr, bufsize, &read, twopages);
                if (mtd_is_bitflip(err))
                        err = 0;
                if (err || read != bufsize) {
@@ -203,7 +201,7 @@ static int verify_eraseblock(int ebnum)
 
 static int crosstest(void)
 {
-       size_t read = 0;
+       size_t read;
        int err = 0, i;
        loff_t addr, addr0, addrn;
        unsigned char *pp1, *pp2, *pp3, *pp4;
@@ -228,9 +226,8 @@ static int crosstest(void)
                addrn -= mtd->erasesize;
 
        /* Read 2nd-to-last page to pp1 */
-       read = 0;
        addr = addrn - pgsize - pgsize;
-       err = mtd->read(mtd, addr, pgsize, &read, pp1);
+       err = mtd_read(mtd, addr, pgsize, &read, pp1);
        if (mtd_is_bitflip(err))
                err = 0;
        if (err || read != pgsize) {
@@ -241,9 +238,8 @@ static int crosstest(void)
        }
 
        /* Read 3rd-to-last page to pp1 */
-       read = 0;
        addr = addrn - pgsize - pgsize - pgsize;
-       err = mtd->read(mtd, addr, pgsize, &read, pp1);
+       err = mtd_read(mtd, addr, pgsize, &read, pp1);
        if (mtd_is_bitflip(err))
                err = 0;
        if (err || read != pgsize) {
@@ -254,10 +250,9 @@ static int crosstest(void)
        }
 
        /* Read first page to pp2 */
-       read = 0;
        addr = addr0;
        printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
-       err = mtd->read(mtd, addr, pgsize, &read, pp2);
+       err = mtd_read(mtd, addr, pgsize, &read, pp2);
        if (mtd_is_bitflip(err))
                err = 0;
        if (err || read != pgsize) {
@@ -268,10 +263,9 @@ static int crosstest(void)
        }
 
        /* Read last page to pp3 */
-       read = 0;
        addr = addrn - pgsize;
        printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
-       err = mtd->read(mtd, addr, pgsize, &read, pp3);
+       err = mtd_read(mtd, addr, pgsize, &read, pp3);
        if (mtd_is_bitflip(err))
                err = 0;
        if (err || read != pgsize) {
@@ -282,10 +276,9 @@ static int crosstest(void)
        }
 
        /* Read first page again to pp4 */
-       read = 0;
        addr = addr0;
        printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
-       err = mtd->read(mtd, addr, pgsize, &read, pp4);
+       err = mtd_read(mtd, addr, pgsize, &read, pp4);
        if (mtd_is_bitflip(err))
                err = 0;
        if (err || read != pgsize) {
@@ -309,7 +302,7 @@ static int crosstest(void)
 
 static int erasecrosstest(void)
 {
-       size_t read = 0, written = 0;
+       size_t read, written;
        int err = 0, i, ebnum, ebnum2;
        loff_t addr0;
        char *readbuf = twopages;
@@ -335,7 +328,7 @@ static int erasecrosstest(void)
        printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
        set_random_data(writebuf, pgsize);
        strcpy(writebuf, "There is no data like this!");
-       err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
+       err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
        if (err || written != pgsize) {
                printk(PRINT_PREF "error: write failed at %#llx\n",
                       (long long)addr0);
@@ -344,7 +337,7 @@ static int erasecrosstest(void)
 
        printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
        memset(readbuf, 0, pgsize);
-       err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
+       err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
        if (mtd_is_bitflip(err))
                err = 0;
        if (err || read != pgsize) {
@@ -368,7 +361,7 @@ static int erasecrosstest(void)
        printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
        set_random_data(writebuf, pgsize);
        strcpy(writebuf, "There is no data like this!");
-       err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
+       err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
        if (err || written != pgsize) {
                printk(PRINT_PREF "error: write failed at %#llx\n",
                       (long long)addr0);
@@ -382,7 +375,7 @@ static int erasecrosstest(void)
 
        printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
        memset(readbuf, 0, pgsize);
-       err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
+       err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
        if (mtd_is_bitflip(err))
                err = 0;
        if (err || read != pgsize) {
@@ -405,7 +398,7 @@ static int erasecrosstest(void)
 
 static int erasetest(void)
 {
-       size_t read = 0, written = 0;
+       size_t read, written;
        int err = 0, i, ebnum, ok = 1;
        loff_t addr0;
 
@@ -425,7 +418,7 @@ static int erasetest(void)
 
        printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
        set_random_data(writebuf, pgsize);
-       err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
+       err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
        if (err || written != pgsize) {
                printk(PRINT_PREF "error: write failed at %#llx\n",
                       (long long)addr0);
@@ -438,7 +431,7 @@ static int erasetest(void)
                return err;
 
        printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
-       err = mtd->read(mtd, addr0, pgsize, &read, twopages);
+       err = mtd_read(mtd, addr0, pgsize, &read, twopages);
        if (mtd_is_bitflip(err))
                err = 0;
        if (err || read != pgsize) {
@@ -469,7 +462,7 @@ static int is_block_bad(int ebnum)
        loff_t addr = ebnum * mtd->erasesize;
        int ret;
 
-       ret = mtd->block_isbad(mtd, addr);
+       ret = mtd_block_isbad(mtd, addr);
        if (ret)
                printk(PRINT_PREF "block %d is bad\n", ebnum);
        return ret;
index 550fe51225a731bcc434ac04ef8890588521b0a6..121aba189cec6de33297a2afb88ecd7892d329f5 100644 (file)
@@ -44,7 +44,7 @@ static int pgcnt;
 
 static int read_eraseblock_by_page(int ebnum)
 {
-       size_t read = 0;
+       size_t read;
        int i, ret, err = 0;
        loff_t addr = ebnum * mtd->erasesize;
        void *buf = iobuf;
@@ -52,7 +52,7 @@ static int read_eraseblock_by_page(int ebnum)
 
        for (i = 0; i < pgcnt; i++) {
                memset(buf, 0 , pgcnt);
-               ret = mtd->read(mtd, addr, pgsize, &read, buf);
+               ret = mtd_read(mtd, addr, pgsize, &read, buf);
                if (ret == -EUCLEAN)
                        ret = 0;
                if (ret || read != pgsize) {
@@ -74,7 +74,7 @@ static int read_eraseblock_by_page(int ebnum)
                        ops.ooboffs   = 0;
                        ops.datbuf    = NULL;
                        ops.oobbuf    = oobbuf;
-                       ret = mtd->read_oob(mtd, addr, &ops);
+                       ret = mtd_read_oob(mtd, addr, &ops);
                        if ((ret && !mtd_is_bitflip(ret)) ||
                                        ops.oobretlen != mtd->oobsize) {
                                printk(PRINT_PREF "error: read oob failed at "
@@ -132,7 +132,7 @@ static int is_block_bad(int ebnum)
        loff_t addr = ebnum * mtd->erasesize;
        int ret;
 
-       ret = mtd->block_isbad(mtd, addr);
+       ret = mtd_block_isbad(mtd, addr);
        if (ret)
                printk(PRINT_PREF "block %d is bad\n", ebnum);
        return ret;
@@ -148,8 +148,7 @@ static int scan_for_bad_eraseblocks(void)
                return -ENOMEM;
        }
 
-       /* NOR flash does not implement block_isbad */
-       if (mtd->block_isbad == NULL)
+       if (!mtd_can_have_bb(mtd))
                return 0;
 
        printk(PRINT_PREF "scanning for bad eraseblocks\n");
index 493b367bdd35211af2db7273d981398a938dd039..2aec4f3b72be3d9975ce92d83b1fdd063872a95b 100644 (file)
@@ -79,7 +79,7 @@ static int erase_eraseblock(int ebnum)
        ei.addr = addr;
        ei.len  = mtd->erasesize;
 
-       err = mtd->erase(mtd, &ei);
+       err = mtd_erase(mtd, &ei);
        if (err) {
                printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
                return err;
@@ -105,7 +105,7 @@ static int multiblock_erase(int ebnum, int blocks)
        ei.addr = addr;
        ei.len  = mtd->erasesize * blocks;
 
-       err = mtd->erase(mtd, &ei);
+       err = mtd_erase(mtd, &ei);
        if (err) {
                printk(PRINT_PREF "error %d while erasing EB %d, blocks %d\n",
                       err, ebnum, blocks);
@@ -139,11 +139,11 @@ static int erase_whole_device(void)
 
 static int write_eraseblock(int ebnum)
 {
-       size_t written = 0;
+       size_t written;
        int err = 0;
        loff_t addr = ebnum * mtd->erasesize;
 
-       err = mtd->write(mtd, addr, mtd->erasesize, &written, iobuf);
+       err = mtd_write(mtd, addr, mtd->erasesize, &written, iobuf);
        if (err || written != mtd->erasesize) {
                printk(PRINT_PREF "error: write failed at %#llx\n", addr);
                if (!err)
@@ -155,13 +155,13 @@ static int write_eraseblock(int ebnum)
 
 static int write_eraseblock_by_page(int ebnum)
 {
-       size_t written = 0;
+       size_t written;
        int i, err = 0;
        loff_t addr = ebnum * mtd->erasesize;
        void *buf = iobuf;
 
        for (i = 0; i < pgcnt; i++) {
-               err = mtd->write(mtd, addr, pgsize, &written, buf);
+               err = mtd_write(mtd, addr, pgsize, &written, buf);
                if (err || written != pgsize) {
                        printk(PRINT_PREF "error: write failed at %#llx\n",
                               addr);
@@ -178,13 +178,13 @@ static int write_eraseblock_by_page(int ebnum)
 
 static int write_eraseblock_by_2pages(int ebnum)
 {
-       size_t written = 0, sz = pgsize * 2;
+       size_t written, sz = pgsize * 2;
        int i, n = pgcnt / 2, err = 0;
        loff_t addr = ebnum * mtd->erasesize;
        void *buf = iobuf;
 
        for (i = 0; i < n; i++) {
-               err = mtd->write(mtd, addr, sz, &written, buf);
+               err = mtd_write(mtd, addr, sz, &written, buf);
                if (err || written != sz) {
                        printk(PRINT_PREF "error: write failed at %#llx\n",
                               addr);
@@ -196,7 +196,7 @@ static int write_eraseblock_by_2pages(int ebnum)
                buf += sz;
        }
        if (pgcnt % 2) {
-               err = mtd->write(mtd, addr, pgsize, &written, buf);
+               err = mtd_write(mtd, addr, pgsize, &written, buf);
                if (err || written != pgsize) {
                        printk(PRINT_PREF "error: write failed at %#llx\n",
                               addr);
@@ -210,11 +210,11 @@ static int write_eraseblock_by_2pages(int ebnum)
 
 static int read_eraseblock(int ebnum)
 {
-       size_t read = 0;
+       size_t read;
        int err = 0;
        loff_t addr = ebnum * mtd->erasesize;
 
-       err = mtd->read(mtd, addr, mtd->erasesize, &read, iobuf);
+       err = mtd_read(mtd, addr, mtd->erasesize, &read, iobuf);
        /* Ignore corrected ECC errors */
        if (mtd_is_bitflip(err))
                err = 0;
@@ -229,13 +229,13 @@ static int read_eraseblock(int ebnum)
 
 static int read_eraseblock_by_page(int ebnum)
 {
-       size_t read = 0;
+       size_t read;
        int i, err = 0;
        loff_t addr = ebnum * mtd->erasesize;
        void *buf = iobuf;
 
        for (i = 0; i < pgcnt; i++) {
-               err = mtd->read(mtd, addr, pgsize, &read, buf);
+               err = mtd_read(mtd, addr, pgsize, &read, buf);
                /* Ignore corrected ECC errors */
                if (mtd_is_bitflip(err))
                        err = 0;
@@ -255,13 +255,13 @@ static int read_eraseblock_by_page(int ebnum)
 
 static int read_eraseblock_by_2pages(int ebnum)
 {
-       size_t read = 0, sz = pgsize * 2;
+       size_t read, sz = pgsize * 2;
        int i, n = pgcnt / 2, err = 0;
        loff_t addr = ebnum * mtd->erasesize;
        void *buf = iobuf;
 
        for (i = 0; i < n; i++) {
-               err = mtd->read(mtd, addr, sz, &read, buf);
+               err = mtd_read(mtd, addr, sz, &read, buf);
                /* Ignore corrected ECC errors */
                if (mtd_is_bitflip(err))
                        err = 0;
@@ -276,7 +276,7 @@ static int read_eraseblock_by_2pages(int ebnum)
                buf += sz;
        }
        if (pgcnt % 2) {
-               err = mtd->read(mtd, addr, pgsize, &read, buf);
+               err = mtd_read(mtd, addr, pgsize, &read, buf);
                /* Ignore corrected ECC errors */
                if (mtd_is_bitflip(err))
                        err = 0;
@@ -296,7 +296,7 @@ static int is_block_bad(int ebnum)
        loff_t addr = ebnum * mtd->erasesize;
        int ret;
 
-       ret = mtd->block_isbad(mtd, addr);
+       ret = mtd_block_isbad(mtd, addr);
        if (ret)
                printk(PRINT_PREF "block %d is bad\n", ebnum);
        return ret;
@@ -336,8 +336,7 @@ static int scan_for_bad_eraseblocks(void)
                return -ENOMEM;
        }
 
-       /* NOR flash does not implement block_isbad */
-       if (mtd->block_isbad == NULL)
+       if (!mtd_can_have_bb(mtd))
                goto out;
 
        printk(PRINT_PREF "scanning for bad eraseblocks\n");
index 52ffd9120e0ded0eaf45d262cb4b6f115aad0ada..7b33f22d0b583196ae0a7fb5ccd6563293e239af 100644 (file)
@@ -112,7 +112,7 @@ static int erase_eraseblock(int ebnum)
        ei.addr = addr;
        ei.len  = mtd->erasesize;
 
-       err = mtd->erase(mtd, &ei);
+       err = mtd_erase(mtd, &ei);
        if (unlikely(err)) {
                printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
                return err;
@@ -132,7 +132,7 @@ static int is_block_bad(int ebnum)
        loff_t addr = ebnum * mtd->erasesize;
        int ret;
 
-       ret = mtd->block_isbad(mtd, addr);
+       ret = mtd_block_isbad(mtd, addr);
        if (ret)
                printk(PRINT_PREF "block %d is bad\n", ebnum);
        return ret;
@@ -140,7 +140,7 @@ static int is_block_bad(int ebnum)
 
 static int do_read(void)
 {
-       size_t read = 0;
+       size_t read;
        int eb = rand_eb();
        int offs = rand_offs();
        int len = rand_len(offs), err;
@@ -153,7 +153,7 @@ static int do_read(void)
                        len = mtd->erasesize - offs;
        }
        addr = eb * mtd->erasesize + offs;
-       err = mtd->read(mtd, addr, len, &read, readbuf);
+       err = mtd_read(mtd, addr, len, &read, readbuf);
        if (mtd_is_bitflip(err))
                err = 0;
        if (unlikely(err || read != len)) {
@@ -169,7 +169,7 @@ static int do_read(void)
 static int do_write(void)
 {
        int eb = rand_eb(), offs, err, len;
-       size_t written = 0;
+       size_t written;
        loff_t addr;
 
        offs = offsets[eb];
@@ -192,7 +192,7 @@ static int do_write(void)
                }
        }
        addr = eb * mtd->erasesize + offs;
-       err = mtd->write(mtd, addr, len, &written, writebuf);
+       err = mtd_write(mtd, addr, len, &written, writebuf);
        if (unlikely(err || written != len)) {
                printk(PRINT_PREF "error: write failed at 0x%llx\n",
                       (long long)addr);
@@ -227,8 +227,7 @@ static int scan_for_bad_eraseblocks(void)
                return -ENOMEM;
        }
 
-       /* NOR flash does not implement block_isbad */
-       if (mtd->block_isbad == NULL)
+       if (!mtd_can_have_bb(mtd))
                return 0;
 
        printk(PRINT_PREF "scanning for bad eraseblocks\n");
@@ -284,6 +283,12 @@ static int __init mtd_stresstest_init(void)
               (unsigned long long)mtd->size, mtd->erasesize,
               pgsize, ebcnt, pgcnt, mtd->oobsize);
 
+       if (ebcnt < 2) {
+               printk(PRINT_PREF "error: need at least 2 eraseblocks\n");
+               err = -ENOSPC;
+               goto out_put_mtd;
+       }
+
        /* Read or write up 2 eraseblocks at a time */
        bufsize = mtd->erasesize * 2;
 
@@ -322,6 +327,7 @@ out:
        kfree(bbt);
        vfree(writebuf);
        vfree(readbuf);
+out_put_mtd:
        put_mtd_device(mtd);
        if (err)
                printk(PRINT_PREF "error %d occurred\n", err);
index 1a05bfac4eeee5f6f9406b469ab27a74760c3cc8..9667bf53528223a6cec4ab1e04a7358cb323991f 100644 (file)
@@ -80,7 +80,7 @@ static int erase_eraseblock(int ebnum)
        ei.addr = addr;
        ei.len  = mtd->erasesize;
 
-       err = mtd->erase(mtd, &ei);
+       err = mtd_erase(mtd, &ei);
        if (err) {
                printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
                return err;
@@ -115,12 +115,12 @@ static int erase_whole_device(void)
 
 static int write_eraseblock(int ebnum)
 {
-       size_t written = 0;
+       size_t written;
        int err = 0;
        loff_t addr = ebnum * mtd->erasesize;
 
        set_random_data(writebuf, subpgsize);
-       err = mtd->write(mtd, addr, subpgsize, &written, writebuf);
+       err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
        if (unlikely(err || written != subpgsize)) {
                printk(PRINT_PREF "error: write failed at %#llx\n",
                       (long long)addr);
@@ -134,7 +134,7 @@ static int write_eraseblock(int ebnum)
        addr += subpgsize;
 
        set_random_data(writebuf, subpgsize);
-       err = mtd->write(mtd, addr, subpgsize, &written, writebuf);
+       err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
        if (unlikely(err || written != subpgsize)) {
                printk(PRINT_PREF "error: write failed at %#llx\n",
                       (long long)addr);
@@ -150,7 +150,7 @@ static int write_eraseblock(int ebnum)
 
 static int write_eraseblock2(int ebnum)
 {
-       size_t written = 0;
+       size_t written;
        int err = 0, k;
        loff_t addr = ebnum * mtd->erasesize;
 
@@ -158,7 +158,7 @@ static int write_eraseblock2(int ebnum)
                if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
                        break;
                set_random_data(writebuf, subpgsize * k);
-               err = mtd->write(mtd, addr, subpgsize * k, &written, writebuf);
+               err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
                if (unlikely(err || written != subpgsize * k)) {
                        printk(PRINT_PREF "error: write failed at %#llx\n",
                               (long long)addr);
@@ -189,14 +189,13 @@ static void print_subpage(unsigned char *p)
 
 static int verify_eraseblock(int ebnum)
 {
-       size_t read = 0;
+       size_t read;
        int err = 0;
        loff_t addr = ebnum * mtd->erasesize;
 
        set_random_data(writebuf, subpgsize);
        clear_data(readbuf, subpgsize);
-       read = 0;
-       err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+       err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
        if (unlikely(err || read != subpgsize)) {
                if (mtd_is_bitflip(err) && read == subpgsize) {
                        printk(PRINT_PREF "ECC correction at %#llx\n",
@@ -223,8 +222,7 @@ static int verify_eraseblock(int ebnum)
 
        set_random_data(writebuf, subpgsize);
        clear_data(readbuf, subpgsize);
-       read = 0;
-       err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+       err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
        if (unlikely(err || read != subpgsize)) {
                if (mtd_is_bitflip(err) && read == subpgsize) {
                        printk(PRINT_PREF "ECC correction at %#llx\n",
@@ -252,7 +250,7 @@ static int verify_eraseblock(int ebnum)
 
 static int verify_eraseblock2(int ebnum)
 {
-       size_t read = 0;
+       size_t read;
        int err = 0, k;
        loff_t addr = ebnum * mtd->erasesize;
 
@@ -261,8 +259,7 @@ static int verify_eraseblock2(int ebnum)
                        break;
                set_random_data(writebuf, subpgsize * k);
                clear_data(readbuf, subpgsize * k);
-               read = 0;
-               err = mtd->read(mtd, addr, subpgsize * k, &read, readbuf);
+               err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf);
                if (unlikely(err || read != subpgsize * k)) {
                        if (mtd_is_bitflip(err) && read == subpgsize * k) {
                                printk(PRINT_PREF "ECC correction at %#llx\n",
@@ -288,15 +285,14 @@ static int verify_eraseblock2(int ebnum)
 static int verify_eraseblock_ff(int ebnum)
 {
        uint32_t j;
-       size_t read = 0;
+       size_t read;
        int err = 0;
        loff_t addr = ebnum * mtd->erasesize;
 
        memset(writebuf, 0xff, subpgsize);
        for (j = 0; j < mtd->erasesize / subpgsize; ++j) {
                clear_data(readbuf, subpgsize);
-               read = 0;
-               err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+               err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
                if (unlikely(err || read != subpgsize)) {
                        if (mtd_is_bitflip(err) && read == subpgsize) {
                                printk(PRINT_PREF "ECC correction at %#llx\n",
@@ -344,7 +340,7 @@ static int is_block_bad(int ebnum)
        loff_t addr = ebnum * mtd->erasesize;
        int ret;
 
-       ret = mtd->block_isbad(mtd, addr);
+       ret = mtd_block_isbad(mtd, addr);
        if (ret)
                printk(PRINT_PREF "block %d is bad\n", ebnum);
        return ret;
index 03ab649a6964292f50f40f9073c45ae097a47b8e..b65861bc7b8e59397df4b8469774dcb4881705cf 100644 (file)
@@ -105,7 +105,7 @@ static inline int erase_eraseblock(int ebnum)
        ei.addr = addr;
        ei.len  = mtd->erasesize;
 
-       err = mtd->erase(mtd, &ei);
+       err = mtd_erase(mtd, &ei);
        if (err) {
                printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
                return err;
@@ -127,7 +127,7 @@ static inline int erase_eraseblock(int ebnum)
 static inline int check_eraseblock(int ebnum, unsigned char *buf)
 {
        int err, retries = 0;
-       size_t read = 0;
+       size_t read;
        loff_t addr = ebnum * mtd->erasesize;
        size_t len = mtd->erasesize;
 
@@ -137,7 +137,7 @@ static inline int check_eraseblock(int ebnum, unsigned char *buf)
        }
 
 retry:
-       err = mtd->read(mtd, addr, len, &read, check_buf);
+       err = mtd_read(mtd, addr, len, &read, check_buf);
        if (mtd_is_bitflip(err))
                printk(PRINT_PREF "single bit flip occurred at EB %d "
                       "MTD reported that it was fixed.\n", ebnum);
@@ -181,7 +181,7 @@ retry:
 static inline int write_pattern(int ebnum, void *buf)
 {
        int err;
-       size_t written = 0;
+       size_t written;
        loff_t addr = ebnum * mtd->erasesize;
        size_t len = mtd->erasesize;
 
@@ -189,7 +189,7 @@ static inline int write_pattern(int ebnum, void *buf)
                addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
                len = pgcnt * pgsize;
        }
-       err = mtd->write(mtd, addr, len, &written, buf);
+       err = mtd_write(mtd, addr, len, &written, buf);
        if (err) {
                printk(PRINT_PREF "error %d while writing EB %d, written %zd"
                      " bytes\n", err, ebnum, written);
@@ -290,10 +290,9 @@ static int __init tort_init(void)
         * Check if there is a bad eraseblock among those we are going to test.
         */
        memset(&bad_ebs[0], 0, sizeof(int) * ebcnt);
-       if (mtd->block_isbad) {
+       if (mtd_can_have_bb(mtd)) {
                for (i = eb; i < eb + ebcnt; i++) {
-                       err = mtd->block_isbad(mtd,
-                                              (loff_t)i * mtd->erasesize);
+                       err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize);
 
                        if (err < 0) {
                                printk(PRINT_PREF "block_isbad() returned %d "
index 6c3fb5ab20f5723f3e97ce203bf28d274ee85f38..115749f20f9e5402d62785950092b05a4ec1206e 100644 (file)
@@ -664,7 +664,7 @@ static int io_init(struct ubi_device *ubi)
        ubi->peb_count  = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
        ubi->flash_size = ubi->mtd->size;
 
-       if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
+       if (mtd_can_have_bb(ubi->mtd))
                ubi->bad_allowed = 1;
 
        if (ubi->mtd->type == MTD_NORFLASH) {
index ab80c0debac8f4d20c4dc8500cba7569a04d0c7b..e2cdebf40840e4f3630e6a7bf9868ab46781ab74 100644 (file)
@@ -216,7 +216,7 @@ void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
        buf = vmalloc(len);
        if (!buf)
                return;
-       err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
+       err = mtd_read(ubi->mtd, addr, len, &read, buf);
        if (err && err != -EUCLEAN) {
                ubi_err("error %d while reading %d bytes from PEB %d:%d, "
                        "read %zd bytes", err, len, pnum, offset, read);
index fb7f19b62d91fabe2fdc785bf08aa1a32f523497..cd26da8ad225b882d918188e46c6c2d38cccc803 100644 (file)
@@ -1028,12 +1028,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
         * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
         * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
         * LEB is already locked, we just do not move it and return
-        * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later.
+        * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
+        * we do not know the reasons of the contention - it may be just a
+        * normal I/O on this LEB, so we want to re-try.
         */
        err = leb_write_trylock(ubi, vol_id, lnum);
        if (err) {
                dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
-               return MOVE_CANCEL_RACE;
+               return MOVE_RETRY;
        }
 
        /*
index f20b6f22f240378ce6386ee8c4e59d25f81cb3de..5cde4e5ca3e542a38945be3d12f106b4c3853e80 100644 (file)
@@ -170,7 +170,7 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
 
        addr = (loff_t)pnum * ubi->peb_size + offset;
 retry:
-       err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
+       err = mtd_read(ubi->mtd, addr, len, &read, buf);
        if (err) {
                const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
 
@@ -289,7 +289,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
        }
 
        addr = (loff_t)pnum * ubi->peb_size + offset;
-       err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf);
+       err = mtd_write(ubi->mtd, addr, len, &written, buf);
        if (err) {
                ubi_err("error %d while writing %d bytes to PEB %d:%d, written "
                        "%zd bytes", err, len, pnum, offset, written);
@@ -361,7 +361,7 @@ retry:
        ei.callback = erase_callback;
        ei.priv     = (unsigned long)&wq;
 
-       err = ubi->mtd->erase(ubi->mtd, &ei);
+       err = mtd_erase(ubi->mtd, &ei);
        if (err) {
                if (retries++ < UBI_IO_RETRIES) {
                        dbg_io("error %d while erasing PEB %d, retry",
@@ -525,11 +525,10 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
         * the header comment in scan.c for more information).
         */
        addr = (loff_t)pnum * ubi->peb_size;
-       err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data);
+       err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
        if (!err) {
                addr += ubi->vid_hdr_aloffset;
-               err = ubi->mtd->write(ubi->mtd, addr, 4, &written,
-                                     (void *)&data);
+               err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
                if (!err)
                        return 0;
        }
@@ -635,7 +634,7 @@ int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
        if (ubi->bad_allowed) {
                int ret;
 
-               ret = mtd->block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
+               ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
                if (ret < 0)
                        ubi_err("error %d while checking if PEB %d is bad",
                                ret, pnum);
@@ -670,7 +669,7 @@ int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
        if (!ubi->bad_allowed)
                return 0;
 
-       err = mtd->block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
+       err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
        if (err)
                ubi_err("cannot mark PEB %d bad, error %d", pnum, err);
        return err;
@@ -1357,7 +1356,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
                return 0;
        }
 
-       err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1);
+       err = mtd_read(ubi->mtd, addr, len, &read, buf1);
        if (err && !mtd_is_bitflip(err))
                goto out_free;
 
@@ -1421,7 +1420,7 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
                return 0;
        }
 
-       err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
+       err = mtd_read(ubi->mtd, addr, len, &read, buf);
        if (err && !mtd_is_bitflip(err)) {
                ubi_err("error %d while reading %d bytes from PEB %d:%d, "
                        "read %zd bytes", err, len, pnum, offset, read);
index 1a35fc5e3b40a947451acc340b58b7c29f2ded41..9fdb35367fe0478c1625b1c94c63d0349d7a025d 100644 (file)
@@ -714,9 +714,7 @@ int ubi_sync(int ubi_num)
        if (!ubi)
                return -ENODEV;
 
-       if (ubi->mtd->sync)
-               ubi->mtd->sync(ubi->mtd);
-
+       mtd_sync(ubi->mtd);
        ubi_put_device(ubi);
        return 0;
 }
index dc64c767fd2175e2503510bb1cea51d3ffb63f47..d51d75d344462c1ec3b0ae657b423594f5857517 100644 (file)
@@ -120,6 +120,7 @@ enum {
  *                     PEB
  * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
  *                       target PEB
+ * MOVE_RETRY: retry scrubbing the PEB
  */
 enum {
        MOVE_CANCEL_RACE = 1,
@@ -127,6 +128,7 @@ enum {
        MOVE_TARGET_RD_ERR,
        MOVE_TARGET_WR_ERR,
        MOVE_CANCEL_BITFLIPS,
+       MOVE_RETRY,
 };
 
 /**
index 42c684cf368815814614be19b0398d65fc99fcf0..0696e36b053939e878d0215aefe4f4598ba61ac3 100644 (file)
@@ -795,7 +795,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
                        protect = 1;
                        goto out_not_moved;
                }
-
+               if (err == MOVE_RETRY) {
+                       scrubbing = 1;
+                       goto out_not_moved;
+               }
                if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
                    err == MOVE_TARGET_RD_ERR) {
                        /*
@@ -1049,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
 
        ubi_err("failed to erase PEB %d, error %d", pnum, err);
        kfree(wl_wrk);
-       kmem_cache_free(ubi_wl_entry_slab, e);
 
        if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
            err == -EBUSY) {
@@ -1062,14 +1064,16 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
                        goto out_ro;
                }
                return err;
-       } else if (err != -EIO) {
+       }
+
+       kmem_cache_free(ubi_wl_entry_slab, e);
+       if (err != -EIO)
                /*
                 * If this is not %-EIO, we have no idea what to do. Scheduling
                 * this physical eraseblock for erasure again would cause
                 * errors again and again. Well, lets switch to R/O mode.
                 */
                goto out_ro;
-       }
 
        /* It is %-EIO, the PEB went bad */
 
index 639cf8ab62ba09b26af2d1796da1463bd4763cfc..59effac15f36ad0d87b92858223436b2152225c5 100644 (file)
@@ -1634,7 +1634,7 @@ static int __init netback_init(void)
        int rc = 0;
        int group;
 
-       if (!xen_pv_domain())
+       if (!xen_domain())
                return -ENODEV;
 
        xen_netbk_group_nr = num_online_cpus();
index 1ce729d6af75b34f1a0177e5ee6af2ee4b29dfb3..410018c4c52818f0b3a697d64e4300a505825f91 100644 (file)
@@ -474,17 +474,14 @@ static const struct xenbus_device_id netback_ids[] = {
 };
 
 
-static struct xenbus_driver netback = {
-       .name = "vif",
-       .owner = THIS_MODULE,
-       .ids = netback_ids,
+static DEFINE_XENBUS_DRIVER(netback, ,
        .probe = netback_probe,
        .remove = netback_remove,
        .uevent = netback_uevent,
        .otherend_changed = frontend_changed,
-};
+);
 
 int xenvif_xenbus_init(void)
 {
-       return xenbus_register_backend(&netback);
+       return xenbus_register_backend(&netback_driver);
 }
index 0a59c57864f510ba5bcaf54df933dfbaa76f49e2..fa679057630f9152f418e9e2f9b60f549dded7e3 100644 (file)
@@ -1914,7 +1914,7 @@ static void xennet_sysfs_delif(struct net_device *netdev)
 
 #endif /* CONFIG_SYSFS */
 
-static struct xenbus_device_id netfront_ids[] = {
+static const struct xenbus_device_id netfront_ids[] = {
        { "vif" },
        { "" }
 };
@@ -1941,15 +1941,12 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
        return 0;
 }
 
-static struct xenbus_driver netfront_driver = {
-       .name = "vif",
-       .owner = THIS_MODULE,
-       .ids = netfront_ids,
+static DEFINE_XENBUS_DRIVER(netfront, ,
        .probe = netfront_probe,
        .remove = __devexit_p(xennet_remove),
        .resume = netfront_resume,
        .otherend_changed = netback_changed,
-};
+);
 
 static int __init netif_init(void)
 {
index 91a375fb6ae622181231fce19b1a5907e94c9f40..ea2bd1be26404092585dcb4d20b0ab113a808ab7 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/machdep.h>
 #endif /* CONFIG_PPC */
 
+#include <asm/setup.h>
 #include <asm/page.h>
 
 char *of_fdt_get_string(struct boot_param_header *blob, u32 offset)
index b0dd08e6a9da1cc4f8ee78ebf53f1e4e5f1de2b2..9dd90b30f91a23703c420f5b60cf1fc2d31f55cd 100644 (file)
@@ -175,21 +175,22 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
        u32 max_requests;
        int pos;
 
-       pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
        if (!pos)
                return -EINVAL;
 
-       pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
-       pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF,  &status);
-       if ((control & PCI_PRI_ENABLE) || !(status & PCI_PRI_STATUS_STOPPED))
+       pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+       pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
+       if ((control & PCI_PRI_CTRL_ENABLE) ||
+           !(status & PCI_PRI_STATUS_STOPPED))
                return -EBUSY;
 
-       pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ_OFF, &max_requests);
+       pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests);
        reqs = min(max_requests, reqs);
-       pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ_OFF, reqs);
+       pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
 
-       control |= PCI_PRI_ENABLE;
-       pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control);
+       control |= PCI_PRI_CTRL_ENABLE;
+       pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
 
        return 0;
 }
@@ -206,13 +207,13 @@ void pci_disable_pri(struct pci_dev *pdev)
        u16 control;
        int pos;
 
-       pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
        if (!pos)
                return;
 
-       pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
-       control &= ~PCI_PRI_ENABLE;
-       pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control);
+       pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+       control &= ~PCI_PRI_CTRL_ENABLE;
+       pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
 }
 EXPORT_SYMBOL_GPL(pci_disable_pri);
 
@@ -227,13 +228,13 @@ bool pci_pri_enabled(struct pci_dev *pdev)
        u16 control;
        int pos;
 
-       pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
        if (!pos)
                return false;
 
-       pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
+       pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
 
-       return (control & PCI_PRI_ENABLE) ? true : false;
+       return (control & PCI_PRI_CTRL_ENABLE) ? true : false;
 }
 EXPORT_SYMBOL_GPL(pci_pri_enabled);
 
@@ -249,17 +250,17 @@ int pci_reset_pri(struct pci_dev *pdev)
        u16 control;
        int pos;
 
-       pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
        if (!pos)
                return -EINVAL;
 
-       pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
-       if (control & PCI_PRI_ENABLE)
+       pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+       if (control & PCI_PRI_CTRL_ENABLE)
                return -EBUSY;
 
-       control |= PCI_PRI_RESET;
+       control |= PCI_PRI_CTRL_RESET;
 
-       pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control);
+       pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
 
        return 0;
 }
@@ -282,14 +283,14 @@ bool pci_pri_stopped(struct pci_dev *pdev)
        u16 control, status;
        int pos;
 
-       pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
        if (!pos)
                return true;
 
-       pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
-       pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF,  &status);
+       pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+       pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
 
-       if (control & PCI_PRI_ENABLE)
+       if (control & PCI_PRI_CTRL_ENABLE)
                return false;
 
        return (status & PCI_PRI_STATUS_STOPPED) ? true : false;
@@ -311,15 +312,15 @@ int pci_pri_status(struct pci_dev *pdev)
        u16 status, control;
        int pos;
 
-       pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
        if (!pos)
                return -EINVAL;
 
-       pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
-       pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF,  &status);
+       pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+       pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
 
        /* Stopped bit is undefined when enable == 1, so clear it */
-       if (control & PCI_PRI_ENABLE)
+       if (control & PCI_PRI_CTRL_ENABLE)
                status &= ~PCI_PRI_STATUS_STOPPED;
 
        return status;
@@ -342,25 +343,25 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
        u16 control, supported;
        int pos;
 
-       pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
        if (!pos)
                return -EINVAL;
 
-       pci_read_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, &control);
-       pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF,     &supported);
+       pci_read_config_word(pdev, pos + PCI_PASID_CTRL, &control);
+       pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
 
-       if (!(supported & PCI_PASID_ENABLE))
+       if (control & PCI_PASID_CTRL_ENABLE)
                return -EINVAL;
 
-       supported &= PCI_PASID_EXEC | PCI_PASID_PRIV;
+       supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
 
        /* User wants to enable anything unsupported? */
        if ((supported & features) != features)
                return -EINVAL;
 
-       control = PCI_PASID_ENABLE | features;
+       control = PCI_PASID_CTRL_ENABLE | features;
 
-       pci_write_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, control);
+       pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
 
        return 0;
 }
@@ -376,11 +377,11 @@ void pci_disable_pasid(struct pci_dev *pdev)
        u16 control = 0;
        int pos;
 
-       pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
        if (!pos)
                return;
 
-       pci_write_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, control);
+       pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
 }
 EXPORT_SYMBOL_GPL(pci_disable_pasid);
 
@@ -391,22 +392,21 @@ EXPORT_SYMBOL_GPL(pci_disable_pasid);
  * Returns a negative value when no PASI capability is present.
  * Otherwise is returns a bitmask with supported features. Current
  * features reported are:
- * PCI_PASID_ENABLE - PASID capability can be enabled
- * PCI_PASID_EXEC - Execute permission supported
- * PCI_PASID_PRIV - Priviledged mode supported
+ * PCI_PASID_CAP_EXEC - Execute permission supported
+ * PCI_PASID_CAP_PRIV - Priviledged mode supported
  */
 int pci_pasid_features(struct pci_dev *pdev)
 {
        u16 supported;
        int pos;
 
-       pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
        if (!pos)
                return -EINVAL;
 
-       pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported);
+       pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
 
-       supported &= PCI_PASID_ENABLE | PCI_PASID_EXEC | PCI_PASID_PRIV;
+       supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
 
        return supported;
 }
@@ -426,11 +426,11 @@ int pci_max_pasids(struct pci_dev *pdev)
        u16 supported;
        int pos;
 
-       pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
        if (!pos)
                return -EINVAL;
 
-       pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported);
+       pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
 
        supported = (supported & PASID_NUMBER_MASK) >> PASID_NUMBER_SHIFT;
 
index 838f571027b750e390090fb06dc032abda3db639..9a33fdde2d1639ae5722eef34f37771d01bd6594 100644 (file)
@@ -45,7 +45,6 @@ extern int pciehp_poll_time;
 extern int pciehp_debug;
 extern int pciehp_force;
 extern struct workqueue_struct *pciehp_wq;
-extern struct workqueue_struct *pciehp_ordered_wq;
 
 #define dbg(format, arg...)                                            \
 do {                                                                   \
index 7ac8358df8fdd1a6591f259bb0216f37ca44cf65..b8c99d35ac97af9aef83c5e46b9e9337bf7e9610 100644 (file)
@@ -43,7 +43,6 @@ int pciehp_poll_mode;
 int pciehp_poll_time;
 int pciehp_force;
 struct workqueue_struct *pciehp_wq;
-struct workqueue_struct *pciehp_ordered_wq;
 
 #define DRIVER_VERSION "0.4"
 #define DRIVER_AUTHOR  "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -345,18 +344,11 @@ static int __init pcied_init(void)
        if (!pciehp_wq)
                return -ENOMEM;
 
-       pciehp_ordered_wq = alloc_ordered_workqueue("pciehp_ordered", 0);
-       if (!pciehp_ordered_wq) {
-               destroy_workqueue(pciehp_wq);
-               return -ENOMEM;
-       }
-
        pciehp_firmware_init();
        retval = pcie_port_service_register(&hpdriver_portdrv);
        dbg("pcie_port_service_register = %d\n", retval);
        info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
        if (retval) {
-               destroy_workqueue(pciehp_ordered_wq);
                destroy_workqueue(pciehp_wq);
                dbg("Failure to register service\n");
        }
@@ -366,9 +358,8 @@ static int __init pcied_init(void)
 static void __exit pcied_cleanup(void)
 {
        dbg("unload_pciehpd()\n");
-       destroy_workqueue(pciehp_ordered_wq);
-       destroy_workqueue(pciehp_wq);
        pcie_port_service_unregister(&hpdriver_portdrv);
+       destroy_workqueue(pciehp_wq);
        info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
 }
 
index 085dbb5fc168be33de77d0ced5c411c807ffc2ef..27f44295a65738976b4be0f07f4cd33b04e2cc85 100644 (file)
@@ -344,7 +344,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
                kfree(info);
                goto out;
        }
-       queue_work(pciehp_ordered_wq, &info->work);
+       queue_work(pciehp_wq, &info->work);
  out:
        mutex_unlock(&p_slot->lock);
 }
@@ -439,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot)
        else
                p_slot->state = POWERON_STATE;
 
-       queue_work(pciehp_ordered_wq, &info->work);
+       queue_work(pciehp_wq, &info->work);
 }
 
 static void interrupt_event_handler(struct work_struct *work)
index 7b1414810ae3e0e0a5ccbcd9bec0f21051cf95db..bcdbb16436216886949ed44352d1c0ea4606f21a 100644 (file)
@@ -806,7 +806,6 @@ static void pcie_cleanup_slot(struct controller *ctrl)
        struct slot *slot = ctrl->slot;
        cancel_delayed_work(&slot->work);
        flush_workqueue(pciehp_wq);
-       flush_workqueue(pciehp_ordered_wq);
        kfree(slot);
 }
 
index 0e6d04d7ba4f7c0a2f8494f3e99aceaed1eecacb..337e16ab4a92ba3cde34b53b220b61d9191b4658 100644 (file)
@@ -323,6 +323,8 @@ static void free_msi_irqs(struct pci_dev *dev)
                        if (list_is_last(&entry->list, &dev->msi_list))
                                iounmap(entry->mask_base);
                }
+               kobject_del(&entry->kobj);
+               kobject_put(&entry->kobj);
                list_del(&entry->list);
                kfree(entry);
        }
@@ -403,6 +405,98 @@ void pci_restore_msi_state(struct pci_dev *dev)
 }
 EXPORT_SYMBOL_GPL(pci_restore_msi_state);
 
+
+#define to_msi_attr(obj) container_of(obj, struct msi_attribute, attr)
+#define to_msi_desc(obj) container_of(obj, struct msi_desc, kobj)
+
+struct msi_attribute {
+       struct attribute        attr;
+       ssize_t (*show)(struct msi_desc *entry, struct msi_attribute *attr,
+                       char *buf);
+       ssize_t (*store)(struct msi_desc *entry, struct msi_attribute *attr,
+                        const char *buf, size_t count);
+};
+
+static ssize_t show_msi_mode(struct msi_desc *entry, struct msi_attribute *atr,
+                            char *buf)
+{
+       return sprintf(buf, "%s\n", entry->msi_attrib.is_msix ? "msix" : "msi");
+}
+
+static ssize_t msi_irq_attr_show(struct kobject *kobj,
+                                struct attribute *attr, char *buf)
+{
+       struct msi_attribute *attribute = to_msi_attr(attr);
+       struct msi_desc *entry = to_msi_desc(kobj);
+
+       if (!attribute->show)
+               return -EIO;
+
+       return attribute->show(entry, attribute, buf);
+}
+
+static const struct sysfs_ops msi_irq_sysfs_ops = {
+       .show = msi_irq_attr_show,
+};
+
+static struct msi_attribute mode_attribute =
+       __ATTR(mode, S_IRUGO, show_msi_mode, NULL);
+
+
+struct attribute *msi_irq_default_attrs[] = {
+       &mode_attribute.attr,
+       NULL
+};
+
+void msi_kobj_release(struct kobject *kobj)
+{
+       struct msi_desc *entry = to_msi_desc(kobj);
+
+       pci_dev_put(entry->dev);
+}
+
+static struct kobj_type msi_irq_ktype = {
+       .release = msi_kobj_release,
+       .sysfs_ops = &msi_irq_sysfs_ops,
+       .default_attrs = msi_irq_default_attrs,
+};
+
+static int populate_msi_sysfs(struct pci_dev *pdev)
+{
+       struct msi_desc *entry;
+       struct kobject *kobj;
+       int ret;
+       int count = 0;
+
+       pdev->msi_kset = kset_create_and_add("msi_irqs", NULL, &pdev->dev.kobj);
+       if (!pdev->msi_kset)
+               return -ENOMEM;
+
+       list_for_each_entry(entry, &pdev->msi_list, list) {
+               kobj = &entry->kobj;
+               kobj->kset = pdev->msi_kset;
+               pci_dev_get(pdev);
+               ret = kobject_init_and_add(kobj, &msi_irq_ktype, NULL,
+                                    "%u", entry->irq);
+               if (ret)
+                       goto out_unroll;
+
+               count++;
+       }
+
+       return 0;
+
+out_unroll:
+       list_for_each_entry(entry, &pdev->msi_list, list) {
+               if (!count)
+                       break;
+               kobject_del(&entry->kobj);
+               kobject_put(&entry->kobj);
+               count--;
+       }
+       return ret;
+}
+
 /**
  * msi_capability_init - configure device's MSI capability structure
  * @dev: pointer to the pci_dev data structure of MSI device function
@@ -454,6 +548,13 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
                return ret;
        }
 
+       ret = populate_msi_sysfs(dev);
+       if (ret) {
+               msi_mask_irq(entry, mask, ~mask);
+               free_msi_irqs(dev);
+               return ret;
+       }
+
        /* Set MSI enabled bits  */
        pci_intx_for_msi(dev, 0);
        msi_set_enable(dev, pos, 1);
@@ -574,6 +675,12 @@ static int msix_capability_init(struct pci_dev *dev,
 
        msix_program_entries(dev, entries);
 
+       ret = populate_msi_sysfs(dev);
+       if (ret) {
+               ret = 0;
+               goto error;
+       }
+
        /* Set MSI-X enabled bits and unmask the function */
        pci_intx_for_msi(dev, 0);
        dev->msix_enabled = 1;
@@ -732,6 +839,8 @@ void pci_disable_msi(struct pci_dev *dev)
 
        pci_msi_shutdown(dev);
        free_msi_irqs(dev);
+       kset_unregister(dev->msi_kset);
+       dev->msi_kset = NULL;
 }
 EXPORT_SYMBOL(pci_disable_msi);
 
@@ -830,6 +939,8 @@ void pci_disable_msix(struct pci_dev *dev)
 
        pci_msix_shutdown(dev);
        free_msi_irqs(dev);
+       kset_unregister(dev->msi_kset);
+       dev->msi_kset = NULL;
 }
 EXPORT_SYMBOL(pci_disable_msix);
 
@@ -870,5 +981,15 @@ EXPORT_SYMBOL(pci_msi_enabled);
 
 void pci_msi_init_pci_dev(struct pci_dev *dev)
 {
+       int pos;
        INIT_LIST_HEAD(&dev->msi_list);
+
+       /* Disable the msi hardware to avoid screaming interrupts
+        * during boot.  This is the power on reset default so
+        * usually this should be a noop.
+        */
+       pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+       if (pos)
+               msi_set_enable(dev, pos, 0);
+       msix_set_enable(dev, 0);
 }
index 4ecb6408b0d61e2128c26100bba3d6690402de5d..060fd22a1103856988d6866b64c4d8e8e3e95a89 100644 (file)
@@ -45,16 +45,20 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
 {
        struct pci_dev *pci_dev = context;
 
-       if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) {
+       if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev)
+               return;
+
+       if (!pci_dev->pm_cap || !pci_dev->pme_support
+            || pci_check_pme_status(pci_dev)) {
                if (pci_dev->pme_poll)
                        pci_dev->pme_poll = false;
 
                pci_wakeup_event(pci_dev);
-               pci_check_pme_status(pci_dev);
                pm_runtime_resume(&pci_dev->dev);
-               if (pci_dev->subordinate)
-                       pci_pme_wakeup_bus(pci_dev->subordinate);
        }
+
+       if (pci_dev->subordinate)
+               pci_pme_wakeup_bus(pci_dev->subordinate);
 }
 
 /**
@@ -395,7 +399,6 @@ static int __init acpi_pci_init(void)
 
        if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
                printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
-               pcie_clear_aspm();
                pcie_no_aspm();
        }
 
index cbfbab18be91d7ecdf9ebd3952e9b77aac590726..1cfbf228fbb1d7f6531ae5196ca7aa12d637df4d 100644 (file)
@@ -68,7 +68,7 @@ struct pcie_link_state {
        struct aspm_latency acceptable[8];
 };
 
-static int aspm_disabled, aspm_force, aspm_clear_state;
+static int aspm_disabled, aspm_force;
 static bool aspm_support_enabled = true;
 static DEFINE_MUTEX(aspm_lock);
 static LIST_HEAD(link_list);
@@ -500,9 +500,6 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
        int pos;
        u32 reg32;
 
-       if (aspm_clear_state)
-               return -EINVAL;
-
        /*
         * Some functions in a slot might not all be PCIe functions,
         * very strange. Disable ASPM for the whole slot
@@ -574,9 +571,6 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
            pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
                return;
 
-       if (aspm_disabled && !aspm_clear_state)
-               return;
-
        /* VIA has a strange chipset, root port is under a bridge */
        if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
            pdev->bus->self)
@@ -608,7 +602,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
         * the BIOS's expectation, we'll do so once pci_enable_device() is
         * called.
         */
-       if (aspm_policy != POLICY_POWERSAVE || aspm_clear_state) {
+       if (aspm_policy != POLICY_POWERSAVE) {
                pcie_config_aspm_path(link);
                pcie_set_clkpm(link, policy_to_clkpm_state(link));
        }
@@ -649,8 +643,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
        struct pci_dev *parent = pdev->bus->self;
        struct pcie_link_state *link, *root, *parent_link;
 
-       if ((aspm_disabled && !aspm_clear_state) || !pci_is_pcie(pdev) ||
-           !parent || !parent->link_state)
+       if (!pci_is_pcie(pdev) || !parent || !parent->link_state)
                return;
        if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
            (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
@@ -734,13 +727,18 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
  * pci_disable_link_state - disable pci device's link state, so the link will
  * never enter specific states
  */
-static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem,
+                                    bool force)
 {
        struct pci_dev *parent = pdev->bus->self;
        struct pcie_link_state *link;
 
-       if (aspm_disabled || !pci_is_pcie(pdev))
+       if (aspm_disabled && !force)
+               return;
+
+       if (!pci_is_pcie(pdev))
                return;
+
        if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
            pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
                parent = pdev;
@@ -768,16 +766,31 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
 
 void pci_disable_link_state_locked(struct pci_dev *pdev, int state)
 {
-       __pci_disable_link_state(pdev, state, false);
+       __pci_disable_link_state(pdev, state, false, false);
 }
 EXPORT_SYMBOL(pci_disable_link_state_locked);
 
 void pci_disable_link_state(struct pci_dev *pdev, int state)
 {
-       __pci_disable_link_state(pdev, state, true);
+       __pci_disable_link_state(pdev, state, true, false);
 }
 EXPORT_SYMBOL(pci_disable_link_state);
 
+void pcie_clear_aspm(struct pci_bus *bus)
+{
+       struct pci_dev *child;
+
+       /*
+        * Clear any ASPM setup that the firmware has carried out on this bus
+        */
+       list_for_each_entry(child, &bus->devices, bus_list) {
+               __pci_disable_link_state(child, PCIE_LINK_STATE_L0S |
+                                        PCIE_LINK_STATE_L1 |
+                                        PCIE_LINK_STATE_CLKPM,
+                                        false, true);
+       }
+}
+
 static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
 {
        int i;
@@ -935,6 +948,7 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
 static int __init pcie_aspm_disable(char *str)
 {
        if (!strcmp(str, "off")) {
+               aspm_policy = POLICY_DEFAULT;
                aspm_disabled = 1;
                aspm_support_enabled = false;
                printk(KERN_INFO "PCIe ASPM is disabled\n");
@@ -947,16 +961,18 @@ static int __init pcie_aspm_disable(char *str)
 
 __setup("pcie_aspm=", pcie_aspm_disable);
 
-void pcie_clear_aspm(void)
-{
-       if (!aspm_force)
-               aspm_clear_state = 1;
-}
-
 void pcie_no_aspm(void)
 {
-       if (!aspm_force)
+       /*
+        * Disabling ASPM is intended to prevent the kernel from modifying
+        * existing hardware state, not to clear existing state. To that end:
+        * (a) set policy to POLICY_DEFAULT in order to avoid changing state
+        * (b) prevent userspace from changing policy
+        */
+       if (!aspm_force) {
+               aspm_policy = POLICY_DEFAULT;
                aspm_disabled = 1;
+       }
 }
 
 /**
index 90832a955991f1d875eaf57a01a3b5c90930cb72..7cf3d2fcf56a9369209395304494d4b7699d5f6d 100644 (file)
@@ -1126,14 +1126,11 @@ static const struct xenbus_device_id xenpci_ids[] = {
        {""},
 };
 
-static struct xenbus_driver xenbus_pcifront_driver = {
-       .name                   = "pcifront",
-       .owner                  = THIS_MODULE,
-       .ids                    = xenpci_ids,
+static DEFINE_XENBUS_DRIVER(xenpci, "pcifront",
        .probe                  = pcifront_xenbus_probe,
        .remove                 = pcifront_xenbus_remove,
        .otherend_changed       = pcifront_backend_changed,
-};
+);
 
 static int __init pcifront_init(void)
 {
@@ -1142,12 +1139,12 @@ static int __init pcifront_init(void)
 
        pci_frontend_registrar(1 /* enable */);
 
-       return xenbus_register_frontend(&xenbus_pcifront_driver);
+       return xenbus_register_frontend(&xenpci_driver);
 }
 
 static void __exit pcifront_cleanup(void)
 {
-       xenbus_unregister_driver(&xenbus_pcifront_driver);
+       xenbus_unregister_driver(&xenpci_driver);
        pci_frontend_registrar(0 /* disable */);
 }
 module_init(pcifront_init);
index e17e2f8001d2a6ebb770d4e20c6aa0a3cd2576cd..afaf885581251a9adb23c2111e1d370766b3cafa 100644 (file)
@@ -12,7 +12,10 @@ menu "Pin controllers"
        depends on PINCTRL
 
 config PINMUX
-       bool "Support pinmux controllers"
+       bool "Support pin multiplexing controllers"
+
+config PINCONF
+       bool "Support pin configuration controllers"
 
 config DEBUG_PINCTRL
        bool "Debug PINCTRL calls"
@@ -20,16 +23,25 @@ config DEBUG_PINCTRL
        help
          Say Y here to add some extra checks and diagnostics to PINCTRL calls.
 
-config PINMUX_SIRF
-       bool "CSR SiRFprimaII pinmux driver"
+config PINCTRL_SIRF
+       bool "CSR SiRFprimaII pin controller driver"
        depends on ARCH_PRIMA2
        select PINMUX
 
-config PINMUX_U300
-       bool "U300 pinmux driver"
+config PINCTRL_U300
+       bool "U300 pin controller driver"
        depends on ARCH_U300
        select PINMUX
 
+config PINCTRL_COH901
+       bool "ST-Ericsson U300 COH 901 335/571 GPIO"
+       depends on GPIOLIB && ARCH_U300 && PINMUX_U300
+       help
+         Say yes here to support GPIO interface on ST-Ericsson U300.
+         The names of the two IP block variants supported are
+         COH 901 335 and COH 901 571/3. They contain 3, 5 or 7
+         ports of 8 GPIO pins each.
+
 endmenu
 
 endif
index bdc548a6b7e9bdbab3497bc23299843e69bb937e..827601cc68f6c6c14fdaeb5aa195daf45a924a17 100644 (file)
@@ -1,8 +1,10 @@
 # generic pinmux support
 
-ccflags-$(CONFIG_DEBUG_PINMUX) += -DDEBUG
+ccflags-$(CONFIG_DEBUG_PINCTRL)        += -DDEBUG
 
 obj-$(CONFIG_PINCTRL)          += core.o
 obj-$(CONFIG_PINMUX)           += pinmux.o
-obj-$(CONFIG_PINMUX_SIRF)      += pinmux-sirf.o
-obj-$(CONFIG_PINMUX_U300)      += pinmux-u300.o
+obj-$(CONFIG_PINCONF)          += pinconf.o
+obj-$(CONFIG_PINCTRL_SIRF)     += pinctrl-sirf.o
+obj-$(CONFIG_PINCTRL_U300)     += pinctrl-u300.o
+obj-$(CONFIG_PINCTRL_COH901)   += pinctrl-coh901.o
index eadef9e191ea59d5f59b9453894a1d3d1824b4bf..569bdb3ef1046155b021648390087a9010591d3b 100644 (file)
 #include <linux/pinctrl/machine.h>
 #include "core.h"
 #include "pinmux.h"
+#include "pinconf.h"
 
 /* Global list of pin control devices */
 static DEFINE_MUTEX(pinctrldev_list_mutex);
 static LIST_HEAD(pinctrldev_list);
 
-static void pinctrl_dev_release(struct device *dev)
-{
-       struct pinctrl_dev *pctldev = dev_get_drvdata(dev);
-       kfree(pctldev);
-}
-
 const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev)
 {
        /* We're not allowed to register devices without name */
@@ -70,14 +65,14 @@ struct pinctrl_dev *get_pinctrl_dev_from_dev(struct device *dev,
 
        mutex_lock(&pinctrldev_list_mutex);
        list_for_each_entry(pctldev, &pinctrldev_list, node) {
-               if (dev &&  &pctldev->dev == dev) {
+               if (dev && pctldev->dev == dev) {
                        /* Matched on device pointer */
                        found = true;
                        break;
                }
 
                if (devname &&
-                   !strcmp(dev_name(&pctldev->dev), devname)) {
+                   !strcmp(dev_name(pctldev->dev), devname)) {
                        /* Matched on device name */
                        found = true;
                        break;
@@ -88,7 +83,7 @@ struct pinctrl_dev *get_pinctrl_dev_from_dev(struct device *dev,
        return found ? pctldev : NULL;
 }
 
-struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, int pin)
+struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, unsigned int pin)
 {
        struct pin_desc *pindesc;
        unsigned long flags;
@@ -100,6 +95,31 @@ struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, int pin)
        return pindesc;
 }
 
+/**
+ * pin_get_from_name() - look up a pin number from a name
+ * @pctldev: the pin control device to lookup the pin on
+ * @name: the name of the pin to look up
+ */
+int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name)
+{
+       unsigned i, pin;
+
+       /* The pin number can be retrived from the pin controller descriptor */
+       for (i = 0; i < pctldev->desc->npins; i++) {
+               struct pin_desc *desc;
+
+               pin = pctldev->desc->pins[i].number;
+               desc = pin_desc_get(pctldev, pin);
+               /* Pin space may be sparse */
+               if (desc == NULL)
+                       continue;
+               if (desc->name && !strcmp(name, desc->name))
+                       return pin;
+       }
+
+       return -EINVAL;
+}
+
 /**
  * pin_is_valid() - check if pin exists on controller
  * @pctldev: the pin control device to check the pin on
@@ -139,6 +159,8 @@ static void pinctrl_free_pindescs(struct pinctrl_dev *pctldev,
                if (pindesc != NULL) {
                        radix_tree_delete(&pctldev->pin_desc_tree,
                                          pins[i].number);
+                       if (pindesc->dynamic_name)
+                               kfree(pindesc->name);
                }
                kfree(pindesc);
        }
@@ -160,19 +182,27 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
        pindesc = kzalloc(sizeof(*pindesc), GFP_KERNEL);
        if (pindesc == NULL)
                return -ENOMEM;
+
        spin_lock_init(&pindesc->lock);
 
        /* Set owner */
        pindesc->pctldev = pctldev;
 
        /* Copy basic pin info */
-       pindesc->name = name;
+       if (pindesc->name) {
+               pindesc->name = name;
+       } else {
+               pindesc->name = kasprintf(GFP_KERNEL, "PIN%u", number);
+               if (pindesc->name == NULL)
+                       return -ENOMEM;
+               pindesc->dynamic_name = true;
+       }
 
        spin_lock(&pctldev->pin_desc_tree_lock);
        radix_tree_insert(&pctldev->pin_desc_tree, number, pindesc);
        spin_unlock(&pctldev->pin_desc_tree_lock);
        pr_debug("registered pin %d (%s) on %s\n",
-                number, name ? name : "(unnamed)", pctldev->desc->name);
+                number, pindesc->name, pctldev->desc->name);
        return 0;
 }
 
@@ -284,21 +314,52 @@ void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
        mutex_unlock(&pctldev->gpio_ranges_lock);
 }
 
+/**
+ * pinctrl_get_group_selector() - returns the group selector for a group
+ * @pctldev: the pin controller handling the group
+ * @pin_group: the pin group to look up
+ */
+int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
+                              const char *pin_group)
+{
+       const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
+       unsigned group_selector = 0;
+
+       while (pctlops->list_groups(pctldev, group_selector) >= 0) {
+               const char *gname = pctlops->get_group_name(pctldev,
+                                                           group_selector);
+               if (!strcmp(gname, pin_group)) {
+                       dev_dbg(pctldev->dev,
+                               "found group selector %u for %s\n",
+                               group_selector,
+                               pin_group);
+                       return group_selector;
+               }
+
+               group_selector++;
+       }
+
+       dev_err(pctldev->dev, "does not have pin group %s\n",
+               pin_group);
+
+       return -EINVAL;
+}
+
 #ifdef CONFIG_DEBUG_FS
 
 static int pinctrl_pins_show(struct seq_file *s, void *what)
 {
        struct pinctrl_dev *pctldev = s->private;
        const struct pinctrl_ops *ops = pctldev->desc->pctlops;
-       unsigned pin;
+       unsigned i, pin;
 
        seq_printf(s, "registered pins: %d\n", pctldev->desc->npins);
-       seq_printf(s, "max pin number: %d\n", pctldev->desc->maxpin);
 
-       /* The highest pin number need to be included in the loop, thus <= */
-       for (pin = 0; pin <= pctldev->desc->maxpin; pin++) {
+       /* The pin number can be retrived from the pin controller descriptor */
+       for (i = 0; i < pctldev->desc->npins; i++) {
                struct pin_desc *desc;
 
+               pin = pctldev->desc->pins[i].number;
                desc = pin_desc_get(pctldev, pin);
                /* Pin space may be sparse */
                if (desc == NULL)
@@ -363,8 +424,11 @@ static int pinctrl_gpioranges_show(struct seq_file *s, void *what)
        /* Loop over the ranges */
        mutex_lock(&pctldev->gpio_ranges_lock);
        list_for_each_entry(range, &pctldev->gpio_ranges, node) {
-               seq_printf(s, "%u: %s [%u - %u]\n", range->id, range->name,
-                          range->base, (range->base + range->npins - 1));
+               seq_printf(s, "%u: %s GPIOS [%u - %u] PINS [%u - %u]\n",
+                          range->id, range->name,
+                          range->base, (range->base + range->npins - 1),
+                          range->pin_base,
+                          (range->pin_base + range->npins - 1));
        }
        mutex_unlock(&pctldev->gpio_ranges_lock);
 
@@ -375,11 +439,15 @@ static int pinctrl_devices_show(struct seq_file *s, void *what)
 {
        struct pinctrl_dev *pctldev;
 
-       seq_puts(s, "name [pinmux]\n");
+       seq_puts(s, "name [pinmux] [pinconf]\n");
        mutex_lock(&pinctrldev_list_mutex);
        list_for_each_entry(pctldev, &pinctrldev_list, node) {
                seq_printf(s, "%s ", pctldev->desc->name);
                if (pctldev->desc->pmxops)
+                       seq_puts(s, "yes ");
+               else
+                       seq_puts(s, "no ");
+               if (pctldev->desc->confops)
                        seq_puts(s, "yes");
                else
                        seq_puts(s, "no");
@@ -444,11 +512,11 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
 {
        static struct dentry *device_root;
 
-       device_root = debugfs_create_dir(dev_name(&pctldev->dev),
+       device_root = debugfs_create_dir(dev_name(pctldev->dev),
                                         debugfs_root);
        if (IS_ERR(device_root) || !device_root) {
                pr_warn("failed to create debugfs directory for %s\n",
-                       dev_name(&pctldev->dev));
+                       dev_name(pctldev->dev));
                return;
        }
        debugfs_create_file("pins", S_IFREG | S_IRUGO,
@@ -458,6 +526,7 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
        debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO,
                            device_root, pctldev, &pinctrl_gpioranges_ops);
        pinmux_init_device_debugfs(device_root, pctldev);
+       pinconf_init_device_debugfs(device_root, pctldev);
 }
 
 static void pinctrl_init_debugfs(void)
@@ -495,7 +564,6 @@ static void pinctrl_init_debugfs(void)
 struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
                                    struct device *dev, void *driver_data)
 {
-       static atomic_t pinmux_no = ATOMIC_INIT(0);
        struct pinctrl_dev *pctldev;
        int ret;
 
@@ -514,6 +582,16 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
                }
        }
 
+       /* If we're implementing pinconfig, check the ops for sanity */
+       if (pctldesc->confops) {
+               ret = pinconf_check_ops(pctldesc->confops);
+               if (ret) {
+                       pr_err("%s pin config ops lacks necessary functions\n",
+                              pctldesc->name);
+                       return NULL;
+               }
+       }
+
        pctldev = kzalloc(sizeof(struct pinctrl_dev), GFP_KERNEL);
        if (pctldev == NULL)
                return NULL;
@@ -526,18 +604,7 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
        spin_lock_init(&pctldev->pin_desc_tree_lock);
        INIT_LIST_HEAD(&pctldev->gpio_ranges);
        mutex_init(&pctldev->gpio_ranges_lock);
-
-       /* Register device */
-       pctldev->dev.parent = dev;
-       dev_set_name(&pctldev->dev, "pinctrl.%d",
-                    atomic_inc_return(&pinmux_no) - 1);
-       pctldev->dev.release = pinctrl_dev_release;
-       ret = device_register(&pctldev->dev);
-       if (ret != 0) {
-               pr_err("error in device registration\n");
-               goto out_reg_dev_err;
-       }
-       dev_set_drvdata(&pctldev->dev, pctldev);
+       pctldev->dev = dev;
 
        /* Register all the pins */
        pr_debug("try to register %d pins on %s...\n",
@@ -547,7 +614,7 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
                pr_err("error during pin registration\n");
                pinctrl_free_pindescs(pctldev, pctldesc->pins,
                                      pctldesc->npins);
-               goto out_reg_pins_err;
+               goto out_err;
        }
 
        pinctrl_init_device_debugfs(pctldev);
@@ -557,10 +624,8 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
        pinmux_hog_maps(pctldev);
        return pctldev;
 
-out_reg_pins_err:
-       device_del(&pctldev->dev);
-out_reg_dev_err:
-       put_device(&pctldev->dev);
+out_err:
+       kfree(pctldev);
        return NULL;
 }
 EXPORT_SYMBOL_GPL(pinctrl_register);
@@ -584,7 +649,7 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
        /* Destroy descriptor tree */
        pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
                              pctldev->desc->npins);
-       device_unregister(&pctldev->dev);
+       kfree(pctldev);
 }
 EXPORT_SYMBOL_GPL(pinctrl_unregister);
 
index 472fa1341cc0c3f9f7a59446d4a625c727ce4d7f..177a3310547f31ceb8a9cd1191cb55f7f253469d 100644 (file)
@@ -9,6 +9,10 @@
  * License terms: GNU General Public License (GPL) version 2
  */
 
+#include <linux/pinctrl/pinconf.h>
+
+struct pinctrl_gpio_range;
+
 /**
  * struct pinctrl_dev - pin control class device
  * @node: node to include this pin controller in the global pin controller list
@@ -34,7 +38,7 @@ struct pinctrl_dev {
        spinlock_t pin_desc_tree_lock;
        struct list_head gpio_ranges;
        struct mutex gpio_ranges_lock;
-       struct device dev;
+       struct device *dev;
        struct module *owner;
        void *driver_data;
 #ifdef CONFIG_PINMUX
@@ -48,6 +52,7 @@ struct pinctrl_dev {
  * @pctldev: corresponding pin control device
  * @name: a name for the pin, e.g. the name of the pin/pad/finger on a
  *     datasheet or such
+ * @dynamic_name: if the name of this pin was dynamically allocated
  * @lock: a lock to protect the descriptor structure
  * @mux_requested: whether the pin is already requested by pinmux or not
  * @mux_function: a named muxing function for the pin that will be passed to
@@ -56,6 +61,7 @@ struct pinctrl_dev {
 struct pin_desc {
        struct pinctrl_dev *pctldev;
        const char *name;
+       bool dynamic_name;
        spinlock_t lock;
        /* These fields only added when supporting pinmux drivers */
 #ifdef CONFIG_PINMUX
@@ -65,7 +71,10 @@ struct pin_desc {
 
 struct pinctrl_dev *get_pinctrl_dev_from_dev(struct device *dev,
                                             const char *dev_name);
-struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, int pin);
+struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, unsigned int pin);
+int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name);
 int pinctrl_get_device_gpio_range(unsigned gpio,
                                  struct pinctrl_dev **outdev,
                                  struct pinctrl_gpio_range **outrange);
+int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
+                              const char *pin_group);
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
new file mode 100644 (file)
index 0000000..1259872
--- /dev/null
@@ -0,0 +1,326 @@
+/*
+ * Core driver for the pin config portions of the pin control subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#define pr_fmt(fmt) "pinconfig core: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include "core.h"
+#include "pinconf.h"
+
+int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+                          unsigned long *config)
+{
+       const struct pinconf_ops *ops = pctldev->desc->confops;
+
+       if (!ops || !ops->pin_config_get) {
+               dev_err(pctldev->dev, "cannot get pin configuration, missing "
+                       "pin_config_get() function in driver\n");
+               return -EINVAL;
+       }
+
+       return ops->pin_config_get(pctldev, pin, config);
+}
+
+/**
+ * pin_config_get() - get the configuration of a single pin parameter
+ * @dev_name: name of the pin controller device for this pin
+ * @name: name of the pin to get the config for
+ * @config: the config pointed to by this argument will be filled in with the
+ *     current pin state, it can be used directly by drivers as a numeral, or
+ *     it can be dereferenced to any struct.
+ */
+int pin_config_get(const char *dev_name, const char *name,
+                         unsigned long *config)
+{
+       struct pinctrl_dev *pctldev;
+       int pin;
+
+       pctldev = get_pinctrl_dev_from_dev(NULL, dev_name);
+       if (!pctldev)
+               return -EINVAL;
+
+       pin = pin_get_from_name(pctldev, name);
+       if (pin < 0)
+               return pin;
+
+       return pin_config_get_for_pin(pctldev, pin, config);
+}
+EXPORT_SYMBOL(pin_config_get);
+
+int pin_config_set_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+                          unsigned long config)
+{
+       const struct pinconf_ops *ops = pctldev->desc->confops;
+       int ret;
+
+       if (!ops || !ops->pin_config_set) {
+               dev_err(pctldev->dev, "cannot configure pin, missing "
+                       "config function in driver\n");
+               return -EINVAL;
+       }
+
+       ret = ops->pin_config_set(pctldev, pin, config);
+       if (ret) {
+               dev_err(pctldev->dev,
+                       "unable to set pin configuration on pin %d\n", pin);
+               return ret;
+       }
+
+       return 0;
+}
+
+/**
+ * pin_config_set() - set the configuration of a single pin parameter
+ * @dev_name: name of pin controller device for this pin
+ * @name: name of the pin to set the config for
+ * @config: the config in this argument will contain the desired pin state, it
+ *     can be used directly by drivers as a numeral, or it can be dereferenced
+ *     to any struct.
+ */
+int pin_config_set(const char *dev_name, const char *name,
+                  unsigned long config)
+{
+       struct pinctrl_dev *pctldev;
+       int pin;
+
+       pctldev = get_pinctrl_dev_from_dev(NULL, dev_name);
+       if (!pctldev)
+               return -EINVAL;
+
+       pin = pin_get_from_name(pctldev, name);
+       if (pin < 0)
+               return pin;
+
+       return pin_config_set_for_pin(pctldev, pin, config);
+}
+EXPORT_SYMBOL(pin_config_set);
+
+int pin_config_group_get(const char *dev_name, const char *pin_group,
+                        unsigned long *config)
+{
+       struct pinctrl_dev *pctldev;
+       const struct pinconf_ops *ops;
+       int selector;
+
+       pctldev = get_pinctrl_dev_from_dev(NULL, dev_name);
+       if (!pctldev)
+               return -EINVAL;
+       ops = pctldev->desc->confops;
+
+       if (!ops || !ops->pin_config_group_get) {
+               dev_err(pctldev->dev, "cannot get configuration for pin "
+                       "group, missing group config get function in "
+                       "driver\n");
+               return -EINVAL;
+       }
+
+       selector = pinctrl_get_group_selector(pctldev, pin_group);
+       if (selector < 0)
+               return selector;
+
+       return ops->pin_config_group_get(pctldev, selector, config);
+}
+EXPORT_SYMBOL(pin_config_group_get);
+
+
+int pin_config_group_set(const char *dev_name, const char *pin_group,
+                        unsigned long config)
+{
+       struct pinctrl_dev *pctldev;
+       const struct pinconf_ops *ops;
+       const struct pinctrl_ops *pctlops;
+       int selector;
+       const unsigned *pins;
+       unsigned num_pins;
+       int ret;
+       int i;
+
+       pctldev = get_pinctrl_dev_from_dev(NULL, dev_name);
+       if (!pctldev)
+               return -EINVAL;
+       ops = pctldev->desc->confops;
+       pctlops = pctldev->desc->pctlops;
+
+       if (!ops || (!ops->pin_config_group_set && !ops->pin_config_set)) {
+               dev_err(pctldev->dev, "cannot configure pin group, missing "
+                       "config function in driver\n");
+               return -EINVAL;
+       }
+
+       selector = pinctrl_get_group_selector(pctldev, pin_group);
+       if (selector < 0)
+               return selector;
+
+       ret = pctlops->get_group_pins(pctldev, selector, &pins, &num_pins);
+       if (ret) {
+               dev_err(pctldev->dev, "cannot configure pin group, error "
+                       "getting pins\n");
+               return ret;
+       }
+
+       /*
+        * If the pin controller supports handling entire groups we use that
+        * capability.
+        */
+       if (ops->pin_config_group_set) {
+               ret = ops->pin_config_group_set(pctldev, selector, config);
+               /*
+                * If the pin controller prefer that a certain group be handled
+                * pin-by-pin as well, it returns -EAGAIN.
+                */
+               if (ret != -EAGAIN)
+                       return ret;
+       }
+
+       /*
+        * If the controller cannot handle entire groups, we configure each pin
+        * individually.
+        */
+       if (!ops->pin_config_set)
+               return 0;
+
+       for (i = 0; i < num_pins; i++) {
+               ret = ops->pin_config_set(pctldev, pins[i], config);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(pin_config_group_set);
+
+int pinconf_check_ops(const struct pinconf_ops *ops)
+{
+       /* We must be able to read out pin status */
+       if (!ops->pin_config_get && !ops->pin_config_group_get)
+               return -EINVAL;
+       /* We have to be able to config the pins in SOME way */
+       if (!ops->pin_config_set && !ops->pin_config_group_set)
+               return -EINVAL;
+       return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static void pinconf_dump_pin(struct pinctrl_dev *pctldev,
+                            struct seq_file *s, int pin)
+{
+       const struct pinconf_ops *ops = pctldev->desc->confops;
+
+       if (ops && ops->pin_config_dbg_show)
+               ops->pin_config_dbg_show(pctldev, s, pin);
+}
+
+static int pinconf_pins_show(struct seq_file *s, void *what)
+{
+       struct pinctrl_dev *pctldev = s->private;
+       unsigned i, pin;
+
+       seq_puts(s, "Pin config settings per pin\n");
+       seq_puts(s, "Format: pin (name): pinmux setting array\n");
+
+       /* The pin number can be retrived from the pin controller descriptor */
+       for (i = 0; pin < pctldev->desc->npins; i++) {
+               struct pin_desc *desc;
+
+               pin = pctldev->desc->pins[i].number;
+               desc = pin_desc_get(pctldev, pin);
+               /* Skip if we cannot search the pin */
+               if (desc == NULL)
+                       continue;
+
+               seq_printf(s, "pin %d (%s):", pin,
+                          desc->name ? desc->name : "unnamed");
+
+               pinconf_dump_pin(pctldev, s, pin);
+
+               seq_printf(s, "\n");
+       }
+
+       return 0;
+}
+
+static void pinconf_dump_group(struct pinctrl_dev *pctldev,
+                              struct seq_file *s, unsigned selector,
+                              const char *gname)
+{
+       const struct pinconf_ops *ops = pctldev->desc->confops;
+
+       if (ops && ops->pin_config_group_dbg_show)
+               ops->pin_config_group_dbg_show(pctldev, s, selector);
+}
+
+static int pinconf_groups_show(struct seq_file *s, void *what)
+{
+       struct pinctrl_dev *pctldev = s->private;
+       const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
+       const struct pinconf_ops *ops = pctldev->desc->confops;
+       unsigned selector = 0;
+
+       if (!ops || !ops->pin_config_group_get)
+               return 0;
+
+       seq_puts(s, "Pin config settings per pin group\n");
+       seq_puts(s, "Format: group (name): pinmux setting array\n");
+
+       while (pctlops->list_groups(pctldev, selector) >= 0) {
+               const char *gname = pctlops->get_group_name(pctldev, selector);
+
+               seq_printf(s, "%u (%s):", selector, gname);
+               pinconf_dump_group(pctldev, s, selector, gname);
+               selector++;
+       }
+
+       return 0;
+}
+
+static int pinconf_pins_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, pinconf_pins_show, inode->i_private);
+}
+
+static int pinconf_groups_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, pinconf_groups_show, inode->i_private);
+}
+
+static const struct file_operations pinconf_pins_ops = {
+       .open           = pinconf_pins_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static const struct file_operations pinconf_groups_ops = {
+       .open           = pinconf_groups_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+void pinconf_init_device_debugfs(struct dentry *devroot,
+                        struct pinctrl_dev *pctldev)
+{
+       debugfs_create_file("pinconf-pins", S_IFREG | S_IRUGO,
+                           devroot, pctldev, &pinconf_pins_ops);
+       debugfs_create_file("pinconf-groups", S_IFREG | S_IRUGO,
+                           devroot, pctldev, &pinconf_groups_ops);
+}
+
+#endif
diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h
new file mode 100644 (file)
index 0000000..e7dc616
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Internal interface between the core pin control system and the
+ * pin config portions
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifdef CONFIG_PINCONF
+
+int pinconf_check_ops(const struct pinconf_ops *ops);
+void pinconf_init_device_debugfs(struct dentry *devroot,
+                                struct pinctrl_dev *pctldev);
+int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+                          unsigned long *config);
+int pin_config_set_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+                          unsigned long config);
+
+#else
+
+static inline int pinconf_check_ops(const struct pinconf_ops *ops)
+{
+       return 0;
+}
+
+static inline void pinconf_init_device_debugfs(struct dentry *devroot,
+                                              struct pinctrl_dev *pctldev)
+{
+}
+
+#endif
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
new file mode 100644 (file)
index 0000000..69fb707
--- /dev/null
@@ -0,0 +1,938 @@
+/*
+ * U300 GPIO module.
+ *
+ * Copyright (C) 2007-2011 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * This can driver either of the two basic GPIO cores
+ * available in the U300 platforms:
+ * COH 901 335   - Used in DB3150 (U300 1.0) and DB3200 (U330 1.0)
+ * COH 901 571/3 - Used in DB3210 (U365 2.0) and DB3350 (U335 1.0)
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/pinctrl/pinmux.h>
+#include <mach/gpio-u300.h>
+
+/*
+ * Bias modes for U300 GPIOs
+ *
+ * GPIO_U300_CONFIG_BIAS_UNKNOWN: this bias mode is not known to us
+ * GPIO_U300_CONFIG_BIAS_FLOAT: no specific bias, the GPIO will float or state
+ *     is not controlled by software
+ * GPIO_U300_CONFIG_BIAS_PULL_UP: the GPIO will be pulled up (usually with high
+ *     impedance to VDD)
+ */
+#define GPIO_U300_CONFIG_BIAS_UNKNOWN  0x1000
+#define GPIO_U300_CONFIG_BIAS_FLOAT    0x1001
+#define GPIO_U300_CONFIG_BIAS_PULL_UP  0x1002
+
+/*
+ * Drive modes for U300 GPIOs (output)
+ *
+ * GPIO_U300_CONFIG_DRIVE_PUSH_PULL: the GPIO will be driven actively high and
+ *     low, this is the most typical case and is typically achieved with two
+ *     active transistors on the output
+ * GPIO_U300_CONFIG_DRIVE_OPEN_DRAIN: the GPIO will be driven with open drain
+ *     (open collector) which means it is usually wired with other output
+ *     ports which are then pulled up with an external resistor
+ * GPIO_U300_CONFIG_DRIVE_OPEN_SOURCE: the GPIO will be driven with open drain
+ *     (open emitter) which is the same as open drain mutatis mutandis but
+ *     pulled to ground
+ */
+#define GPIO_U300_CONFIG_DRIVE_PUSH_PULL       0x2000
+#define GPIO_U300_CONFIG_DRIVE_OPEN_DRAIN      0x2001
+#define GPIO_U300_CONFIG_DRIVE_OPEN_SOURCE     0x2002
+
+/*
+ * Register definitions for COH 901 335 variant
+ */
+#define U300_335_PORT_STRIDE                           (0x1C)
+/* Port X Pin Data Register 32bit, this is both input and output (R/W) */
+#define U300_335_PXPDIR                                        (0x00)
+#define U300_335_PXPDOR                                        (0x00)
+/* Port X Pin Config Register 32bit (R/W) */
+#define U300_335_PXPCR                                 (0x04)
+/* This register layout is the same in both blocks */
+#define U300_GPIO_PXPCR_ALL_PINS_MODE_MASK             (0x0000FFFFUL)
+#define U300_GPIO_PXPCR_PIN_MODE_MASK                  (0x00000003UL)
+#define U300_GPIO_PXPCR_PIN_MODE_SHIFT                 (0x00000002UL)
+#define U300_GPIO_PXPCR_PIN_MODE_INPUT                 (0x00000000UL)
+#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL      (0x00000001UL)
+#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_DRAIN     (0x00000002UL)
+#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_SOURCE    (0x00000003UL)
+/* Port X Interrupt Event Register 32bit (R/W) */
+#define U300_335_PXIEV                                 (0x08)
+/* Port X Interrupt Enable Register 32bit (R/W) */
+#define U300_335_PXIEN                                 (0x0C)
+/* Port X Interrupt Force Register 32bit (R/W) */
+#define U300_335_PXIFR                                 (0x10)
+/* Port X Interrupt Config Register 32bit (R/W) */
+#define U300_335_PXICR                                 (0x14)
+/* This register layout is the same in both blocks */
+#define U300_GPIO_PXICR_ALL_IRQ_CONFIG_MASK            (0x000000FFUL)
+#define U300_GPIO_PXICR_IRQ_CONFIG_MASK                        (0x00000001UL)
+#define U300_GPIO_PXICR_IRQ_CONFIG_FALLING_EDGE                (0x00000000UL)
+#define U300_GPIO_PXICR_IRQ_CONFIG_RISING_EDGE         (0x00000001UL)
+/* Port X Pull-up Enable Register 32bit (R/W) */
+#define U300_335_PXPER                                 (0x18)
+/* This register layout is the same in both blocks */
+#define U300_GPIO_PXPER_ALL_PULL_UP_DISABLE_MASK       (0x000000FFUL)
+#define U300_GPIO_PXPER_PULL_UP_DISABLE                        (0x00000001UL)
+/* Control Register 32bit (R/W) */
+#define U300_335_CR                                    (0x54)
+#define U300_335_CR_BLOCK_CLOCK_ENABLE                 (0x00000001UL)
+
+/*
+ * Register definitions for COH 901 571 / 3 variant
+ */
+#define U300_571_PORT_STRIDE                           (0x30)
+/*
+ * Control Register 32bit (R/W)
+ * bit 15-9 (mask 0x0000FE00) contains the number of cores. 8*cores
+ * gives the number of GPIO pins.
+ * bit 8-2  (mask 0x000001FC) contains the core version ID.
+ */
+#define U300_571_CR                                    (0x00)
+#define U300_571_CR_SYNC_SEL_ENABLE                    (0x00000002UL)
+#define U300_571_CR_BLOCK_CLKRQ_ENABLE                 (0x00000001UL)
+/*
+ * These registers have the same layout and function as the corresponding
+ * COH 901 335 registers, just at different offset.
+ */
+#define U300_571_PXPDIR                                        (0x04)
+#define U300_571_PXPDOR                                        (0x08)
+#define U300_571_PXPCR                                 (0x0C)
+#define U300_571_PXPER                                 (0x10)
+#define U300_571_PXIEV                                 (0x14)
+#define U300_571_PXIEN                                 (0x18)
+#define U300_571_PXIFR                                 (0x1C)
+#define U300_571_PXICR                                 (0x20)
+
+/* 8 bits per port, no version has more than 7 ports */
+#define U300_GPIO_PINS_PER_PORT 8
+#define U300_GPIO_MAX (U300_GPIO_PINS_PER_PORT * 7)
+
+struct u300_gpio {
+       struct gpio_chip chip;
+       struct list_head port_list;
+       struct clk *clk;
+       struct resource *memres;
+       void __iomem *base;
+       struct device *dev;
+       int irq_base;
+       u32 stride;
+       /* Register offsets */
+       u32 pcr;
+       u32 dor;
+       u32 dir;
+       u32 per;
+       u32 icr;
+       u32 ien;
+       u32 iev;
+};
+
+struct u300_gpio_port {
+       struct list_head node;
+       struct u300_gpio *gpio;
+       char name[8];
+       int irq;
+       int number;
+       u8 toggle_edge_mode;
+};
+
+/*
+ * Macro to expand to read a specific register found in the "gpio"
+ * struct. It requires the struct u300_gpio *gpio variable to exist in
+ * its context. It calculates the port offset from the given pin
+ * offset, muliplies by the port stride and adds the register offset
+ * so it provides a pointer to the desired register.
+ */
+#define U300_PIN_REG(pin, reg) \
+       (gpio->base + (pin >> 3) * gpio->stride + gpio->reg)
+
+/*
+ * Provides a bitmask for a specific gpio pin inside an 8-bit GPIO
+ * register.
+ */
+#define U300_PIN_BIT(pin) \
+       (1 << (pin & 0x07))
+
+struct u300_gpio_confdata {
+       u16 bias_mode;
+       bool output;
+       int outval;
+};
+
+/* BS335 has seven ports of 8 bits each = GPIO pins 0..55 */
+#define BS335_GPIO_NUM_PORTS 7
+/* BS365 has five ports of 8 bits each = GPIO pins 0..39 */
+#define BS365_GPIO_NUM_PORTS 5
+
+#define U300_FLOATING_INPUT { \
+       .bias_mode = GPIO_U300_CONFIG_BIAS_FLOAT, \
+       .output = false, \
+}
+
+#define U300_PULL_UP_INPUT { \
+       .bias_mode = GPIO_U300_CONFIG_BIAS_PULL_UP, \
+       .output = false, \
+}
+
+#define U300_OUTPUT_LOW { \
+       .output = true, \
+       .outval = 0, \
+}
+
+#define U300_OUTPUT_HIGH { \
+       .output = true, \
+       .outval = 1, \
+}
+
+
+/* Initial configuration */
+static const struct __initdata u300_gpio_confdata
+bs335_gpio_config[BS335_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
+       /* Port 0, pins 0-7 */
+       {
+               U300_FLOATING_INPUT,
+               U300_OUTPUT_HIGH,
+               U300_FLOATING_INPUT,
+               U300_OUTPUT_LOW,
+               U300_OUTPUT_LOW,
+               U300_OUTPUT_LOW,
+               U300_OUTPUT_LOW,
+               U300_OUTPUT_LOW,
+       },
+       /* Port 1, pins 0-7 */
+       {
+               U300_OUTPUT_LOW,
+               U300_OUTPUT_LOW,
+               U300_OUTPUT_LOW,
+               U300_PULL_UP_INPUT,
+               U300_FLOATING_INPUT,
+               U300_OUTPUT_HIGH,
+               U300_OUTPUT_LOW,
+               U300_OUTPUT_LOW,
+       },
+       /* Port 2, pins 0-7 */
+       {
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_OUTPUT_LOW,
+               U300_PULL_UP_INPUT,
+               U300_OUTPUT_LOW,
+               U300_PULL_UP_INPUT,
+       },
+       /* Port 3, pins 0-7 */
+       {
+               U300_PULL_UP_INPUT,
+               U300_OUTPUT_LOW,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+       },
+       /* Port 4, pins 0-7 */
+       {
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+       },
+       /* Port 5, pins 0-7 */
+       {
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+       },
+       /* Port 6, pind 0-7 */
+       {
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+       }
+};
+
+static const struct __initdata u300_gpio_confdata
+bs365_gpio_config[BS365_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
+       /* Port 0, pins 0-7 */
+       {
+               U300_FLOATING_INPUT,
+               U300_OUTPUT_LOW,
+               U300_FLOATING_INPUT,
+               U300_OUTPUT_LOW,
+               U300_OUTPUT_LOW,
+               U300_OUTPUT_LOW,
+               U300_PULL_UP_INPUT,
+               U300_FLOATING_INPUT,
+       },
+       /* Port 1, pins 0-7 */
+       {
+               U300_OUTPUT_LOW,
+               U300_FLOATING_INPUT,
+               U300_OUTPUT_LOW,
+               U300_FLOATING_INPUT,
+               U300_FLOATING_INPUT,
+               U300_OUTPUT_HIGH,
+               U300_OUTPUT_LOW,
+               U300_OUTPUT_LOW,
+       },
+       /* Port 2, pins 0-7 */
+       {
+               U300_FLOATING_INPUT,
+               U300_PULL_UP_INPUT,
+               U300_OUTPUT_LOW,
+               U300_OUTPUT_LOW,
+               U300_PULL_UP_INPUT,
+               U300_PULL_UP_INPUT,
+               U300_PULL_UP_INPUT,
+               U300_PULL_UP_INPUT,
+       },
+       /* Port 3, pins 0-7 */
+       {
+               U300_PULL_UP_INPUT,
+               U300_PULL_UP_INPUT,
+               U300_PULL_UP_INPUT,
+               U300_PULL_UP_INPUT,
+               U300_PULL_UP_INPUT,
+               U300_PULL_UP_INPUT,
+               U300_PULL_UP_INPUT,
+               U300_PULL_UP_INPUT,
+       },
+       /* Port 4, pins 0-7 */
+       {
+               U300_PULL_UP_INPUT,
+               U300_PULL_UP_INPUT,
+               U300_PULL_UP_INPUT,
+               U300_PULL_UP_INPUT,
+               /* These 4 pins doesn't exist on DB3210 */
+               U300_OUTPUT_LOW,
+               U300_OUTPUT_LOW,
+               U300_OUTPUT_LOW,
+               U300_OUTPUT_LOW,
+       }
+};
+
+/**
+ * to_u300_gpio() - get the pointer to u300_gpio
+ * @chip: the gpio chip member of the structure u300_gpio
+ */
+static inline struct u300_gpio *to_u300_gpio(struct gpio_chip *chip)
+{
+       return container_of(chip, struct u300_gpio, chip);
+}
+
+static int u300_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+       /*
+        * Map back to global GPIO space and request muxing, the direction
+        * parameter does not matter for this controller.
+        */
+       int gpio = chip->base + offset;
+
+       return pinmux_request_gpio(gpio);
+}
+
+static void u300_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+       int gpio = chip->base + offset;
+
+       pinmux_free_gpio(gpio);
+}
+
+static int u300_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+       struct u300_gpio *gpio = to_u300_gpio(chip);
+
+       return readl(U300_PIN_REG(offset, dir)) & U300_PIN_BIT(offset);
+}
+
+static void u300_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+       struct u300_gpio *gpio = to_u300_gpio(chip);
+       unsigned long flags;
+       u32 val;
+
+       local_irq_save(flags);
+
+       val = readl(U300_PIN_REG(offset, dor));
+       if (value)
+               writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, dor));
+       else
+               writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, dor));
+
+       local_irq_restore(flags);
+}
+
+static int u300_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+       struct u300_gpio *gpio = to_u300_gpio(chip);
+       unsigned long flags;
+       u32 val;
+
+       local_irq_save(flags);
+       val = readl(U300_PIN_REG(offset, pcr));
+       /* Mask out this pin, note 2 bits per setting */
+       val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << ((offset & 0x07) << 1));
+       writel(val, U300_PIN_REG(offset, pcr));
+       local_irq_restore(flags);
+       return 0;
+}
+
+static int u300_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+                                     int value)
+{
+       struct u300_gpio *gpio = to_u300_gpio(chip);
+       unsigned long flags;
+       u32 oldmode;
+       u32 val;
+
+       local_irq_save(flags);
+       val = readl(U300_PIN_REG(offset, pcr));
+       /*
+        * Drive mode must be set by the special mode set function, set
+        * push/pull mode by default if no mode has been selected.
+        */
+       oldmode = val & (U300_GPIO_PXPCR_PIN_MODE_MASK <<
+                        ((offset & 0x07) << 1));
+       /* mode = 0 means input, else some mode is already set */
+       if (oldmode == 0) {
+               val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK <<
+                        ((offset & 0x07) << 1));
+               val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL
+                       << ((offset & 0x07) << 1));
+               writel(val, U300_PIN_REG(offset, pcr));
+       }
+       u300_gpio_set(chip, offset, value);
+       local_irq_restore(flags);
+       return 0;
+}
+
+static int u300_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+       struct u300_gpio *gpio = to_u300_gpio(chip);
+       int retirq = gpio->irq_base + offset;
+
+       dev_dbg(gpio->dev, "request IRQ for GPIO %d, return %d\n", offset,
+               retirq);
+       return retirq;
+}
+
+static int u300_gpio_config(struct gpio_chip *chip, unsigned offset,
+                    u16 param, unsigned long *data)
+{
+       struct u300_gpio *gpio = to_u300_gpio(chip);
+       unsigned long flags;
+       u32 val;
+
+       local_irq_save(flags);
+       switch (param) {
+       case GPIO_U300_CONFIG_BIAS_UNKNOWN:
+       case GPIO_U300_CONFIG_BIAS_FLOAT:
+               val = readl(U300_PIN_REG(offset, per));
+               writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, per));
+               break;
+       case GPIO_U300_CONFIG_BIAS_PULL_UP:
+               val = readl(U300_PIN_REG(offset, per));
+               writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, per));
+               break;
+       case GPIO_U300_CONFIG_DRIVE_PUSH_PULL:
+               val = readl(U300_PIN_REG(offset, pcr));
+               val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
+                        << ((offset & 0x07) << 1));
+               val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL
+                       << ((offset & 0x07) << 1));
+               writel(val, U300_PIN_REG(offset, pcr));
+               break;
+       case GPIO_U300_CONFIG_DRIVE_OPEN_DRAIN:
+               val = readl(U300_PIN_REG(offset, pcr));
+               val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
+                        << ((offset & 0x07) << 1));
+               val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_DRAIN
+                       << ((offset & 0x07) << 1));
+               writel(val, U300_PIN_REG(offset, pcr));
+               break;
+       case GPIO_U300_CONFIG_DRIVE_OPEN_SOURCE:
+               val = readl(U300_PIN_REG(offset, pcr));
+               val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
+                        << ((offset & 0x07) << 1));
+               val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_SOURCE
+                       << ((offset & 0x07) << 1));
+               writel(val, U300_PIN_REG(offset, pcr));
+               break;
+       default:
+               local_irq_restore(flags);
+               dev_err(gpio->dev, "illegal configuration requested\n");
+               return -EINVAL;
+       }
+       local_irq_restore(flags);
+       return 0;
+}
+
+static struct gpio_chip u300_gpio_chip = {
+       .label                  = "u300-gpio-chip",
+       .owner                  = THIS_MODULE,
+       .request                = u300_gpio_request,
+       .free                   = u300_gpio_free,
+       .get                    = u300_gpio_get,
+       .set                    = u300_gpio_set,
+       .direction_input        = u300_gpio_direction_input,
+       .direction_output       = u300_gpio_direction_output,
+       .to_irq                 = u300_gpio_to_irq,
+};
+
+static void u300_toggle_trigger(struct u300_gpio *gpio, unsigned offset)
+{
+       u32 val;
+
+       val = readl(U300_PIN_REG(offset, icr));
+       /* Set mode depending on state */
+       if (u300_gpio_get(&gpio->chip, offset)) {
+               /* High now, let's trigger on falling edge next then */
+               writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
+               dev_dbg(gpio->dev, "next IRQ on falling edge on pin %d\n",
+                       offset);
+       } else {
+               /* Low now, let's trigger on rising edge next then */
+               writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
+               dev_dbg(gpio->dev, "next IRQ on rising edge on pin %d\n",
+                       offset);
+       }
+}
+
+static int u300_gpio_irq_type(struct irq_data *d, unsigned trigger)
+{
+       struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
+       struct u300_gpio *gpio = port->gpio;
+       int offset = d->irq - gpio->irq_base;
+       u32 val;
+
+       if ((trigger & IRQF_TRIGGER_RISING) &&
+           (trigger & IRQF_TRIGGER_FALLING)) {
+               /*
+                * The GPIO block can only trigger on falling OR rising edges,
+                * not both. So we need to toggle the mode whenever the pin
+                * goes from one state to the other with a special state flag
+                */
+               dev_dbg(gpio->dev,
+                       "trigger on both rising and falling edge on pin %d\n",
+                       offset);
+               port->toggle_edge_mode |= U300_PIN_BIT(offset);
+               u300_toggle_trigger(gpio, offset);
+       } else if (trigger & IRQF_TRIGGER_RISING) {
+               dev_dbg(gpio->dev, "trigger on rising edge on pin %d\n",
+                       offset);
+               val = readl(U300_PIN_REG(offset, icr));
+               writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
+               port->toggle_edge_mode &= ~U300_PIN_BIT(offset);
+       } else if (trigger & IRQF_TRIGGER_FALLING) {
+               dev_dbg(gpio->dev, "trigger on falling edge on pin %d\n",
+                       offset);
+               val = readl(U300_PIN_REG(offset, icr));
+               writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
+               port->toggle_edge_mode &= ~U300_PIN_BIT(offset);
+       }
+
+       return 0;
+}
+
+static void u300_gpio_irq_enable(struct irq_data *d)
+{
+       struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
+       struct u300_gpio *gpio = port->gpio;
+       int offset = d->irq - gpio->irq_base;
+       u32 val;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       val = readl(U300_PIN_REG(offset, ien));
+       writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, ien));
+       local_irq_restore(flags);
+}
+
+static void u300_gpio_irq_disable(struct irq_data *d)
+{
+       struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
+       struct u300_gpio *gpio = port->gpio;
+       int offset = d->irq - gpio->irq_base;
+       u32 val;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       val = readl(U300_PIN_REG(offset, ien));
+       writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, ien));
+       local_irq_restore(flags);
+}
+
+static struct irq_chip u300_gpio_irqchip = {
+       .name                   = "u300-gpio-irqchip",
+       .irq_enable             = u300_gpio_irq_enable,
+       .irq_disable            = u300_gpio_irq_disable,
+       .irq_set_type           = u300_gpio_irq_type,
+
+};
+
+static void u300_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+       struct u300_gpio_port *port = irq_get_handler_data(irq);
+       struct u300_gpio *gpio = port->gpio;
+       int pinoffset = port->number << 3; /* get the right stride */
+       unsigned long val;
+
+       desc->irq_data.chip->irq_ack(&desc->irq_data);
+       /* Read event register */
+       val = readl(U300_PIN_REG(pinoffset, iev));
+       /* Mask relevant bits */
+       val &= 0xFFU; /* 8 bits per port */
+       /* ACK IRQ (clear event) */
+       writel(val, U300_PIN_REG(pinoffset, iev));
+
+       /* Call IRQ handler */
+       if (val != 0) {
+               int irqoffset;
+
+               for_each_set_bit(irqoffset, &val, U300_GPIO_PINS_PER_PORT) {
+                       int pin_irq = gpio->irq_base + (port->number << 3)
+                               + irqoffset;
+                       int offset = pinoffset + irqoffset;
+
+                       dev_dbg(gpio->dev, "GPIO IRQ %d on pin %d\n",
+                               pin_irq, offset);
+                       generic_handle_irq(pin_irq);
+                       /*
+                        * Triggering IRQ on both rising and falling edge
+                        * needs mockery
+                        */
+                       if (port->toggle_edge_mode & U300_PIN_BIT(offset))
+                               u300_toggle_trigger(gpio, offset);
+               }
+       }
+
+       desc->irq_data.chip->irq_unmask(&desc->irq_data);
+}
+
+static void __init u300_gpio_init_pin(struct u300_gpio *gpio,
+                                     int offset,
+                                     const struct u300_gpio_confdata *conf)
+{
+       /* Set mode: input or output */
+       if (conf->output) {
+               u300_gpio_direction_output(&gpio->chip, offset, conf->outval);
+
+               /* Deactivate bias mode for output */
+               u300_gpio_config(&gpio->chip, offset,
+                                GPIO_U300_CONFIG_BIAS_FLOAT,
+                                NULL);
+
+               /* Set drive mode for output */
+               u300_gpio_config(&gpio->chip, offset,
+                                GPIO_U300_CONFIG_DRIVE_PUSH_PULL, NULL);
+
+               dev_dbg(gpio->dev, "set up pin %d as output, value: %d\n",
+                       offset, conf->outval);
+       } else {
+               u300_gpio_direction_input(&gpio->chip, offset);
+
+               /* Always set output low on input pins */
+               u300_gpio_set(&gpio->chip, offset, 0);
+
+               /* Set bias mode for input */
+               u300_gpio_config(&gpio->chip, offset, conf->bias_mode, NULL);
+
+               dev_dbg(gpio->dev, "set up pin %d as input, bias: %04x\n",
+                       offset, conf->bias_mode);
+       }
+}
+
+static void __init u300_gpio_init_coh901571(struct u300_gpio *gpio,
+                                    struct u300_gpio_platform *plat)
+{
+       int i, j;
+
+       /* Write default config and values to all pins */
+       for (i = 0; i < plat->ports; i++) {
+               for (j = 0; j < 8; j++) {
+                       const struct u300_gpio_confdata *conf;
+                       int offset = (i*8) + j;
+
+                       if (plat->variant == U300_GPIO_COH901571_3_BS335)
+                               conf = &bs335_gpio_config[i][j];
+                       else if (plat->variant == U300_GPIO_COH901571_3_BS365)
+                               conf = &bs365_gpio_config[i][j];
+                       else
+                               break;
+
+                       u300_gpio_init_pin(gpio, offset, conf);
+               }
+       }
+}
+
+static inline void u300_gpio_free_ports(struct u300_gpio *gpio)
+{
+       struct u300_gpio_port *port;
+       struct list_head *p, *n;
+
+       list_for_each_safe(p, n, &gpio->port_list) {
+               port = list_entry(p, struct u300_gpio_port, node);
+               list_del(&port->node);
+               free_irq(port->irq, port);
+               kfree(port);
+       }
+}
+
+static int __init u300_gpio_probe(struct platform_device *pdev)
+{
+       struct u300_gpio_platform *plat = dev_get_platdata(&pdev->dev);
+       struct u300_gpio *gpio;
+       int err = 0;
+       int portno;
+       u32 val;
+       u32 ifr;
+       int i;
+
+       gpio = kzalloc(sizeof(struct u300_gpio), GFP_KERNEL);
+       if (gpio == NULL) {
+               dev_err(&pdev->dev, "failed to allocate memory\n");
+               return -ENOMEM;
+       }
+
+       gpio->chip = u300_gpio_chip;
+       gpio->chip.ngpio = plat->ports * U300_GPIO_PINS_PER_PORT;
+       gpio->irq_base = plat->gpio_irq_base;
+       gpio->chip.dev = &pdev->dev;
+       gpio->chip.base = plat->gpio_base;
+       gpio->dev = &pdev->dev;
+
+       /* Get GPIO clock */
+       gpio->clk = clk_get(gpio->dev, NULL);
+       if (IS_ERR(gpio->clk)) {
+               err = PTR_ERR(gpio->clk);
+               dev_err(gpio->dev, "could not get GPIO clock\n");
+               goto err_no_clk;
+       }
+       err = clk_enable(gpio->clk);
+       if (err) {
+               dev_err(gpio->dev, "could not enable GPIO clock\n");
+               goto err_no_clk_enable;
+       }
+
+       gpio->memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!gpio->memres) {
+               dev_err(gpio->dev, "could not get GPIO memory resource\n");
+               err = -ENODEV;
+               goto err_no_resource;
+       }
+
+       if (!request_mem_region(gpio->memres->start,
+                               resource_size(gpio->memres),
+                               "GPIO Controller")) {
+               err = -ENODEV;
+               goto err_no_ioregion;
+       }
+
+       gpio->base = ioremap(gpio->memres->start, resource_size(gpio->memres));
+       if (!gpio->base) {
+               err = -ENOMEM;
+               goto err_no_ioremap;
+       }
+
+       if (plat->variant == U300_GPIO_COH901335) {
+               dev_info(gpio->dev,
+                        "initializing GPIO Controller COH 901 335\n");
+               gpio->stride = U300_335_PORT_STRIDE;
+               gpio->pcr = U300_335_PXPCR;
+               gpio->dor = U300_335_PXPDOR;
+               gpio->dir = U300_335_PXPDIR;
+               gpio->per = U300_335_PXPER;
+               gpio->icr = U300_335_PXICR;
+               gpio->ien = U300_335_PXIEN;
+               gpio->iev = U300_335_PXIEV;
+               ifr = U300_335_PXIFR;
+
+               /* Turn on the GPIO block */
+               writel(U300_335_CR_BLOCK_CLOCK_ENABLE,
+                      gpio->base + U300_335_CR);
+       } else if (plat->variant == U300_GPIO_COH901571_3_BS335 ||
+                  plat->variant == U300_GPIO_COH901571_3_BS365) {
+               dev_info(gpio->dev,
+                        "initializing GPIO Controller COH 901 571/3\n");
+               gpio->stride = U300_571_PORT_STRIDE;
+               gpio->pcr = U300_571_PXPCR;
+               gpio->dor = U300_571_PXPDOR;
+               gpio->dir = U300_571_PXPDIR;
+               gpio->per = U300_571_PXPER;
+               gpio->icr = U300_571_PXICR;
+               gpio->ien = U300_571_PXIEN;
+               gpio->iev = U300_571_PXIEV;
+               ifr = U300_571_PXIFR;
+
+               val = readl(gpio->base + U300_571_CR);
+               dev_info(gpio->dev, "COH901571/3 block version: %d, " \
+                        "number of cores: %d totalling %d pins\n",
+                        ((val & 0x000001FC) >> 2),
+                        ((val & 0x0000FE00) >> 9),
+                        ((val & 0x0000FE00) >> 9) * 8);
+               writel(U300_571_CR_BLOCK_CLKRQ_ENABLE,
+                      gpio->base + U300_571_CR);
+               u300_gpio_init_coh901571(gpio, plat);
+       } else {
+               dev_err(gpio->dev, "unknown block variant\n");
+               err = -ENODEV;
+               goto err_unknown_variant;
+       }
+
+       /* Add each port with its IRQ separately */
+       INIT_LIST_HEAD(&gpio->port_list);
+       for (portno = 0 ; portno < plat->ports; portno++) {
+               struct u300_gpio_port *port =
+                       kmalloc(sizeof(struct u300_gpio_port), GFP_KERNEL);
+
+               if (!port) {
+                       dev_err(gpio->dev, "out of memory\n");
+                       err = -ENOMEM;
+                       goto err_no_port;
+               }
+
+               snprintf(port->name, 8, "gpio%d", portno);
+               port->number = portno;
+               port->gpio = gpio;
+
+               port->irq = platform_get_irq_byname(pdev,
+                                                   port->name);
+
+               dev_dbg(gpio->dev, "register IRQ %d for %s\n", port->irq,
+                       port->name);
+
+               irq_set_chained_handler(port->irq, u300_gpio_irq_handler);
+               irq_set_handler_data(port->irq, port);
+
+               /* For each GPIO pin set the unique IRQ handler */
+               for (i = 0; i < U300_GPIO_PINS_PER_PORT; i++) {
+                       int irqno = gpio->irq_base + (portno << 3) + i;
+
+                       dev_dbg(gpio->dev, "handler for IRQ %d on %s\n",
+                               irqno, port->name);
+                       irq_set_chip_and_handler(irqno, &u300_gpio_irqchip,
+                                                handle_simple_irq);
+                       set_irq_flags(irqno, IRQF_VALID);
+                       irq_set_chip_data(irqno, port);
+               }
+
+               /* Turns off irq force (test register) for this port */
+               writel(0x0, gpio->base + portno * gpio->stride + ifr);
+
+               list_add_tail(&port->node, &gpio->port_list);
+       }
+       dev_dbg(gpio->dev, "initialized %d GPIO ports\n", portno);
+
+       err = gpiochip_add(&gpio->chip);
+       if (err) {
+               dev_err(gpio->dev, "unable to add gpiochip: %d\n", err);
+               goto err_no_chip;
+       }
+
+       platform_set_drvdata(pdev, gpio);
+
+       return 0;
+
+err_no_chip:
+err_no_port:
+       u300_gpio_free_ports(gpio);
+err_unknown_variant:
+       iounmap(gpio->base);
+err_no_ioremap:
+       release_mem_region(gpio->memres->start, resource_size(gpio->memres));
+err_no_ioregion:
+err_no_resource:
+       clk_disable(gpio->clk);
+err_no_clk_enable:
+       clk_put(gpio->clk);
+err_no_clk:
+       kfree(gpio);
+       dev_info(&pdev->dev, "module ERROR:%d\n", err);
+       return err;
+}
+
+static int __exit u300_gpio_remove(struct platform_device *pdev)
+{
+       struct u300_gpio_platform *plat = dev_get_platdata(&pdev->dev);
+       struct u300_gpio *gpio = platform_get_drvdata(pdev);
+       int err;
+
+       /* Turn off the GPIO block */
+       if (plat->variant == U300_GPIO_COH901335)
+               writel(0x00000000U, gpio->base + U300_335_CR);
+       if (plat->variant == U300_GPIO_COH901571_3_BS335 ||
+           plat->variant == U300_GPIO_COH901571_3_BS365)
+               writel(0x00000000U, gpio->base + U300_571_CR);
+
+       err = gpiochip_remove(&gpio->chip);
+       if (err < 0) {
+               dev_err(gpio->dev, "unable to remove gpiochip: %d\n", err);
+               return err;
+       }
+       u300_gpio_free_ports(gpio);
+       iounmap(gpio->base);
+       release_mem_region(gpio->memres->start,
+                          resource_size(gpio->memres));
+       clk_disable(gpio->clk);
+       clk_put(gpio->clk);
+       platform_set_drvdata(pdev, NULL);
+       kfree(gpio);
+       return 0;
+}
+
+static struct platform_driver u300_gpio_driver = {
+       .driver         = {
+               .name   = "u300-gpio",
+       },
+       .remove         = __exit_p(u300_gpio_remove),
+};
+
+
+static int __init u300_gpio_init(void)
+{
+       return platform_driver_probe(&u300_gpio_driver, u300_gpio_probe);
+}
+
+static void __exit u300_gpio_exit(void)
+{
+       platform_driver_unregister(&u300_gpio_driver);
+}
+
+arch_initcall(u300_gpio_init);
+module_exit(u300_gpio_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
+MODULE_DESCRIPTION("ST-Ericsson AB COH 901 335/COH 901 571/3 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
new file mode 100644 (file)
index 0000000..6b3534c
--- /dev/null
@@ -0,0 +1,1218 @@
+/*
+ * pinmux driver for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/bitops.h>
+
+#define DRIVER_NAME "pinmux-sirf"
+
+#define SIRFSOC_NUM_PADS    622
+#define SIRFSOC_GPIO_PAD_EN(g) ((g)*0x100 + 0x84)
+#define SIRFSOC_RSC_PIN_MUX 0x4
+
+/*
+ * pad list for the pinmux subsystem
+ * refer to CS-131858-DC-6A.xls
+ */
+static const struct pinctrl_pin_desc sirfsoc_pads[] = {
+       PINCTRL_PIN(4, "pwm0"),
+       PINCTRL_PIN(5, "pwm1"),
+       PINCTRL_PIN(6, "pwm2"),
+       PINCTRL_PIN(7, "pwm3"),
+       PINCTRL_PIN(8, "warm_rst_b"),
+       PINCTRL_PIN(9, "odo_0"),
+       PINCTRL_PIN(10, "odo_1"),
+       PINCTRL_PIN(11, "dr_dir"),
+       PINCTRL_PIN(13, "scl_1"),
+       PINCTRL_PIN(15, "sda_1"),
+       PINCTRL_PIN(16, "x_ldd[16]"),
+       PINCTRL_PIN(17, "x_ldd[17]"),
+       PINCTRL_PIN(18, "x_ldd[18]"),
+       PINCTRL_PIN(19, "x_ldd[19]"),
+       PINCTRL_PIN(20, "x_ldd[20]"),
+       PINCTRL_PIN(21, "x_ldd[21]"),
+       PINCTRL_PIN(22, "x_ldd[22]"),
+       PINCTRL_PIN(23, "x_ldd[23], lcdrom_frdy"),
+       PINCTRL_PIN(24, "gps_sgn"),
+       PINCTRL_PIN(25, "gps_mag"),
+       PINCTRL_PIN(26, "gps_clk"),
+       PINCTRL_PIN(27, "sd_cd_b_1"),
+       PINCTRL_PIN(28, "sd_vcc_on_1"),
+       PINCTRL_PIN(29, "sd_wp_b_1"),
+       PINCTRL_PIN(30, "sd_clk_3"),
+       PINCTRL_PIN(31, "sd_cmd_3"),
+
+       PINCTRL_PIN(32, "x_sd_dat_3[0]"),
+       PINCTRL_PIN(33, "x_sd_dat_3[1]"),
+       PINCTRL_PIN(34, "x_sd_dat_3[2]"),
+       PINCTRL_PIN(35, "x_sd_dat_3[3]"),
+       PINCTRL_PIN(36, "x_sd_clk_4"),
+       PINCTRL_PIN(37, "x_sd_cmd_4"),
+       PINCTRL_PIN(38, "x_sd_dat_4[0]"),
+       PINCTRL_PIN(39, "x_sd_dat_4[1]"),
+       PINCTRL_PIN(40, "x_sd_dat_4[2]"),
+       PINCTRL_PIN(41, "x_sd_dat_4[3]"),
+       PINCTRL_PIN(42, "x_cko_1"),
+       PINCTRL_PIN(43, "x_ac97_bit_clk"),
+       PINCTRL_PIN(44, "x_ac97_dout"),
+       PINCTRL_PIN(45, "x_ac97_din"),
+       PINCTRL_PIN(46, "x_ac97_sync"),
+       PINCTRL_PIN(47, "x_txd_1"),
+       PINCTRL_PIN(48, "x_txd_2"),
+       PINCTRL_PIN(49, "x_rxd_1"),
+       PINCTRL_PIN(50, "x_rxd_2"),
+       PINCTRL_PIN(51, "x_usclk_0"),
+       PINCTRL_PIN(52, "x_utxd_0"),
+       PINCTRL_PIN(53, "x_urxd_0"),
+       PINCTRL_PIN(54, "x_utfs_0"),
+       PINCTRL_PIN(55, "x_urfs_0"),
+       PINCTRL_PIN(56, "x_usclk_1"),
+       PINCTRL_PIN(57, "x_utxd_1"),
+       PINCTRL_PIN(58, "x_urxd_1"),
+       PINCTRL_PIN(59, "x_utfs_1"),
+       PINCTRL_PIN(60, "x_urfs_1"),
+       PINCTRL_PIN(61, "x_usclk_2"),
+       PINCTRL_PIN(62, "x_utxd_2"),
+       PINCTRL_PIN(63, "x_urxd_2"),
+
+       PINCTRL_PIN(64, "x_utfs_2"),
+       PINCTRL_PIN(65, "x_urfs_2"),
+       PINCTRL_PIN(66, "x_df_we_b"),
+       PINCTRL_PIN(67, "x_df_re_b"),
+       PINCTRL_PIN(68, "x_txd_0"),
+       PINCTRL_PIN(69, "x_rxd_0"),
+       PINCTRL_PIN(78, "x_cko_0"),
+       PINCTRL_PIN(79, "x_vip_pxd[7]"),
+       PINCTRL_PIN(80, "x_vip_pxd[6]"),
+       PINCTRL_PIN(81, "x_vip_pxd[5]"),
+       PINCTRL_PIN(82, "x_vip_pxd[4]"),
+       PINCTRL_PIN(83, "x_vip_pxd[3]"),
+       PINCTRL_PIN(84, "x_vip_pxd[2]"),
+       PINCTRL_PIN(85, "x_vip_pxd[1]"),
+       PINCTRL_PIN(86, "x_vip_pxd[0]"),
+       PINCTRL_PIN(87, "x_vip_vsync"),
+       PINCTRL_PIN(88, "x_vip_hsync"),
+       PINCTRL_PIN(89, "x_vip_pxclk"),
+       PINCTRL_PIN(90, "x_sda_0"),
+       PINCTRL_PIN(91, "x_scl_0"),
+       PINCTRL_PIN(92, "x_df_ry_by"),
+       PINCTRL_PIN(93, "x_df_cs_b[1]"),
+       PINCTRL_PIN(94, "x_df_cs_b[0]"),
+       PINCTRL_PIN(95, "x_l_pclk"),
+
+       PINCTRL_PIN(96, "x_l_lck"),
+       PINCTRL_PIN(97, "x_l_fck"),
+       PINCTRL_PIN(98, "x_l_de"),
+       PINCTRL_PIN(99, "x_ldd[0]"),
+       PINCTRL_PIN(100, "x_ldd[1]"),
+       PINCTRL_PIN(101, "x_ldd[2]"),
+       PINCTRL_PIN(102, "x_ldd[3]"),
+       PINCTRL_PIN(103, "x_ldd[4]"),
+       PINCTRL_PIN(104, "x_ldd[5]"),
+       PINCTRL_PIN(105, "x_ldd[6]"),
+       PINCTRL_PIN(106, "x_ldd[7]"),
+       PINCTRL_PIN(107, "x_ldd[8]"),
+       PINCTRL_PIN(108, "x_ldd[9]"),
+       PINCTRL_PIN(109, "x_ldd[10]"),
+       PINCTRL_PIN(110, "x_ldd[11]"),
+       PINCTRL_PIN(111, "x_ldd[12]"),
+       PINCTRL_PIN(112, "x_ldd[13]"),
+       PINCTRL_PIN(113, "x_ldd[14]"),
+       PINCTRL_PIN(114, "x_ldd[15]"),
+};
+
+/**
+ * @dev: a pointer back to containing device
+ * @virtbase: the offset to the controller in virtual memory
+ */
+struct sirfsoc_pmx {
+       struct device *dev;
+       struct pinctrl_dev *pmx;
+       void __iomem *gpio_virtbase;
+       void __iomem *rsc_virtbase;
+};
+
+/* SIRFSOC_GPIO_PAD_EN set */
+struct sirfsoc_muxmask {
+       unsigned long group;
+       unsigned long mask;
+};
+
+struct sirfsoc_padmux {
+       unsigned long muxmask_counts;
+       const struct sirfsoc_muxmask *muxmask;
+       /* RSC_PIN_MUX set */
+       unsigned long funcmask;
+       unsigned long funcval;
+};
+
+ /**
+ * struct sirfsoc_pin_group - describes a SiRFprimaII pin group
+ * @name: the name of this specific pin group
+ * @pins: an array of discrete physical pins used in this group, taken
+ *     from the driver-local pin enumeration space
+ * @num_pins: the number of pins in this group array, i.e. the number of
+ *     elements in .pins so we can iterate over that array
+ */
+struct sirfsoc_pin_group {
+       const char *name;
+       const unsigned int *pins;
+       const unsigned num_pins;
+};
+
+static const struct sirfsoc_muxmask lcd_16bits_sirfsoc_muxmask[] = {
+       {
+               .group = 3,
+               .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) |
+                       BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
+                       BIT(17) | BIT(18),
+       }, {
+               .group = 2,
+               .mask = BIT(31),
+       },
+};
+
+static const struct sirfsoc_padmux lcd_16bits_padmux = {
+       .muxmask_counts = ARRAY_SIZE(lcd_16bits_sirfsoc_muxmask),
+       .muxmask = lcd_16bits_sirfsoc_muxmask,
+       .funcmask = BIT(4),
+       .funcval = 0,
+};
+
+static const unsigned lcd_16bits_pins[] = { 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+       105, 106, 107, 108, 109, 110, 111, 112, 113, 114 };
+
+static const struct sirfsoc_muxmask lcd_18bits_muxmask[] = {
+       {
+               .group = 3,
+               .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) |
+                       BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
+                       BIT(17) | BIT(18),
+       }, {
+               .group = 2,
+               .mask = BIT(31),
+       }, {
+               .group = 0,
+               .mask = BIT(16) | BIT(17),
+       },
+};
+
+static const struct sirfsoc_padmux lcd_18bits_padmux = {
+       .muxmask_counts = ARRAY_SIZE(lcd_18bits_muxmask),
+       .muxmask = lcd_18bits_muxmask,
+       .funcmask = BIT(4),
+       .funcval = 0,
+};
+
+static const unsigned lcd_18bits_pins[] = { 16, 17, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+       105, 106, 107, 108, 109, 110, 111, 112, 113, 114};
+
+static const struct sirfsoc_muxmask lcd_24bits_muxmask[] = {
+       {
+               .group = 3,
+               .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) |
+                       BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
+                       BIT(17) | BIT(18),
+       }, {
+               .group = 2,
+               .mask = BIT(31),
+       }, {
+               .group = 0,
+               .mask = BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23),
+       },
+};
+
+static const struct sirfsoc_padmux lcd_24bits_padmux = {
+       .muxmask_counts = ARRAY_SIZE(lcd_24bits_muxmask),
+       .muxmask = lcd_24bits_muxmask,
+       .funcmask = BIT(4),
+       .funcval = 0,
+};
+
+static const unsigned lcd_24bits_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+       105, 106, 107, 108, 109, 110, 111, 112, 113, 114 };
+
+static const struct sirfsoc_muxmask lcdrom_muxmask[] = {
+       {
+               .group = 3,
+               .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) |
+                       BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
+                       BIT(17) | BIT(18),
+       }, {
+               .group = 2,
+               .mask = BIT(31),
+       }, {
+               .group = 0,
+               .mask = BIT(23),
+       },
+};
+
+static const struct sirfsoc_padmux lcdrom_padmux = {
+       .muxmask_counts = ARRAY_SIZE(lcdrom_muxmask),
+       .muxmask = lcdrom_muxmask,
+       .funcmask = BIT(4),
+       .funcval = BIT(4),
+};
+
+static const unsigned lcdrom_pins[] = { 23, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+       105, 106, 107, 108, 109, 110, 111, 112, 113, 114 };
+
+static const struct sirfsoc_muxmask uart0_muxmask[] = {
+       {
+               .group = 2,
+               .mask = BIT(4) | BIT(5),
+       }, {
+               .group = 1,
+               .mask = BIT(23) | BIT(28),
+       },
+};
+
+static const struct sirfsoc_padmux uart0_padmux = {
+       .muxmask_counts = ARRAY_SIZE(uart0_muxmask),
+       .muxmask = uart0_muxmask,
+       .funcmask = BIT(9),
+       .funcval = BIT(9),
+};
+
+static const unsigned uart0_pins[] = { 55, 60, 68, 69 };
+
+static const struct sirfsoc_muxmask uart0_nostreamctrl_muxmask[] = {
+       {
+               .group = 2,
+               .mask = BIT(4) | BIT(5),
+       },
+};
+
+static const struct sirfsoc_padmux uart0_nostreamctrl_padmux = {
+       .muxmask_counts = ARRAY_SIZE(uart0_nostreamctrl_muxmask),
+       .muxmask = uart0_nostreamctrl_muxmask,
+};
+
+static const unsigned uart0_nostreamctrl_pins[] = { 68, 39 };
+
+static const struct sirfsoc_muxmask uart1_muxmask[] = {
+       {
+               .group = 1,
+               .mask = BIT(15) | BIT(17),
+       },
+};
+
+static const struct sirfsoc_padmux uart1_padmux = {
+       .muxmask_counts = ARRAY_SIZE(uart1_muxmask),
+       .muxmask = uart1_muxmask,
+};
+
+static const unsigned uart1_pins[] = { 47, 49 };
+
+static const struct sirfsoc_muxmask uart2_muxmask[] = {
+       {
+               .group = 1,
+               .mask = BIT(16) | BIT(18) | BIT(24) | BIT(27),
+       },
+};
+
+static const struct sirfsoc_padmux uart2_padmux = {
+       .muxmask_counts = ARRAY_SIZE(uart2_muxmask),
+       .muxmask = uart2_muxmask,
+       .funcmask = BIT(10),
+       .funcval = BIT(10),
+};
+
+static const unsigned uart2_pins[] = { 48, 50, 56, 59 };
+
+static const struct sirfsoc_muxmask uart2_nostreamctrl_muxmask[] = {
+       {
+               .group = 1,
+               .mask = BIT(16) | BIT(18),
+       },
+};
+
+static const struct sirfsoc_padmux uart2_nostreamctrl_padmux = {
+       .muxmask_counts = ARRAY_SIZE(uart2_nostreamctrl_muxmask),
+       .muxmask = uart2_nostreamctrl_muxmask,
+};
+
+static const unsigned uart2_nostreamctrl_pins[] = { 48, 50 };
+
+static const struct sirfsoc_muxmask sdmmc3_muxmask[] = {
+       {
+               .group = 0,
+               .mask = BIT(30) | BIT(31),
+       }, {
+               .group = 1,
+               .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
+       },
+};
+
+static const struct sirfsoc_padmux sdmmc3_padmux = {
+       .muxmask_counts = ARRAY_SIZE(sdmmc3_muxmask),
+       .muxmask = sdmmc3_muxmask,
+       .funcmask = BIT(7),
+       .funcval = 0,
+};
+
+static const unsigned sdmmc3_pins[] = { 30, 31, 32, 33, 34, 35 };
+
+static const struct sirfsoc_muxmask spi0_muxmask[] = {
+       {
+               .group = 1,
+               .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
+       },
+};
+
+static const struct sirfsoc_padmux spi0_padmux = {
+       .muxmask_counts = ARRAY_SIZE(spi0_muxmask),
+       .muxmask = spi0_muxmask,
+       .funcmask = BIT(7),
+       .funcval = BIT(7),
+};
+
+static const unsigned spi0_pins[] = { 32, 33, 34, 35 };
+
+static const struct sirfsoc_muxmask sdmmc4_muxmask[] = {
+       {
+               .group = 1,
+               .mask = BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9),
+       },
+};
+
+static const struct sirfsoc_padmux sdmmc4_padmux = {
+       .muxmask_counts = ARRAY_SIZE(sdmmc4_muxmask),
+       .muxmask = sdmmc4_muxmask,
+};
+
+static const unsigned sdmmc4_pins[] = { 36, 37, 38, 39, 40, 41 };
+
+static const struct sirfsoc_muxmask cko1_muxmask[] = {
+       {
+               .group = 1,
+               .mask = BIT(10),
+       },
+};
+
+static const struct sirfsoc_padmux cko1_padmux = {
+       .muxmask_counts = ARRAY_SIZE(cko1_muxmask),
+       .muxmask = cko1_muxmask,
+       .funcmask = BIT(3),
+       .funcval = 0,
+};
+
+static const unsigned cko1_pins[] = { 42 };
+
+static const struct sirfsoc_muxmask i2s_muxmask[] = {
+       {
+               .group = 1,
+               .mask =
+                       BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(19)
+                               | BIT(23) | BIT(28),
+       },
+};
+
+static const struct sirfsoc_padmux i2s_padmux = {
+       .muxmask_counts = ARRAY_SIZE(i2s_muxmask),
+       .muxmask = i2s_muxmask,
+       .funcmask = BIT(3) | BIT(9),
+       .funcval = BIT(3),
+};
+
+static const unsigned i2s_pins[] = { 42, 43, 44, 45, 46, 51, 55, 60 };
+
+static const struct sirfsoc_muxmask ac97_muxmask[] = {
+       {
+               .group = 1,
+               .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14),
+       },
+};
+
+static const struct sirfsoc_padmux ac97_padmux = {
+       .muxmask_counts = ARRAY_SIZE(ac97_muxmask),
+       .muxmask = ac97_muxmask,
+       .funcmask = BIT(8),
+       .funcval = 0,
+};
+
+static const unsigned ac97_pins[] = { 33, 34, 35, 36 };
+
+static const struct sirfsoc_muxmask spi1_muxmask[] = {
+       {
+               .group = 1,
+               .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14),
+       },
+};
+
+static const struct sirfsoc_padmux spi1_padmux = {
+       .muxmask_counts = ARRAY_SIZE(spi1_muxmask),
+       .muxmask = spi1_muxmask,
+       .funcmask = BIT(8),
+       .funcval = BIT(8),
+};
+
+static const unsigned spi1_pins[] = { 43, 44, 45, 46 };
+
+static const struct sirfsoc_muxmask sdmmc1_muxmask[] = {
+       {
+               .group = 0,
+               .mask = BIT(27) | BIT(28) | BIT(29),
+       },
+};
+
+static const struct sirfsoc_padmux sdmmc1_padmux = {
+       .muxmask_counts = ARRAY_SIZE(sdmmc1_muxmask),
+       .muxmask = sdmmc1_muxmask,
+};
+
+static const unsigned sdmmc1_pins[] = { 27, 28, 29 };
+
+static const struct sirfsoc_muxmask gps_muxmask[] = {
+       {
+               .group = 0,
+               .mask = BIT(24) | BIT(25) | BIT(26),
+       },
+};
+
+static const struct sirfsoc_padmux gps_padmux = {
+       .muxmask_counts = ARRAY_SIZE(gps_muxmask),
+       .muxmask = gps_muxmask,
+       .funcmask = BIT(12) | BIT(13) | BIT(14),
+       .funcval = BIT(12),
+};
+
+static const unsigned gps_pins[] = { 24, 25, 26 };
+
+static const struct sirfsoc_muxmask sdmmc5_muxmask[] = {
+       {
+               .group = 0,
+               .mask = BIT(24) | BIT(25) | BIT(26),
+       }, {
+               .group = 1,
+               .mask = BIT(29),
+       }, {
+               .group = 2,
+               .mask = BIT(0) | BIT(1),
+       },
+};
+
+static const struct sirfsoc_padmux sdmmc5_padmux = {
+       .muxmask_counts = ARRAY_SIZE(sdmmc5_muxmask),
+       .muxmask = sdmmc5_muxmask,
+       .funcmask = BIT(13) | BIT(14),
+       .funcval = BIT(13) | BIT(14),
+};
+
+static const unsigned sdmmc5_pins[] = { 24, 25, 26, 61, 64, 65 };
+
+static const struct sirfsoc_muxmask usp0_muxmask[] = {
+       {
+               .group = 1,
+               .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23),
+       },
+};
+
+static const struct sirfsoc_padmux usp0_padmux = {
+       .muxmask_counts = ARRAY_SIZE(usp0_muxmask),
+       .muxmask = usp0_muxmask,
+       .funcmask = BIT(1) | BIT(2) | BIT(6) | BIT(9),
+       .funcval = 0,
+};
+
+static const unsigned usp0_pins[] = { 51, 52, 53, 54, 55 };
+
+static const struct sirfsoc_muxmask usp1_muxmask[] = {
+       {
+               .group = 1,
+               .mask = BIT(24) | BIT(25) | BIT(26) | BIT(27) | BIT(28),
+       },
+};
+
+static const struct sirfsoc_padmux usp1_padmux = {
+       .muxmask_counts = ARRAY_SIZE(usp1_muxmask),
+       .muxmask = usp1_muxmask,
+       .funcmask = BIT(1) | BIT(9) | BIT(10) | BIT(11),
+       .funcval = 0,
+};
+
+static const unsigned usp1_pins[] = { 56, 57, 58, 59, 60 };
+
+static const struct sirfsoc_muxmask usp2_muxmask[] = {
+       {
+               .group = 1,
+               .mask = BIT(29) | BIT(30) | BIT(31),
+       }, {
+               .group = 2,
+               .mask = BIT(0) | BIT(1),
+       },
+};
+
+static const struct sirfsoc_padmux usp2_padmux = {
+       .muxmask_counts = ARRAY_SIZE(usp2_muxmask),
+       .muxmask = usp2_muxmask,
+       .funcmask = BIT(13) | BIT(14),
+       .funcval = 0,
+};
+
+static const unsigned usp2_pins[] = { 61, 62, 63, 64, 65 };
+
+static const struct sirfsoc_muxmask nand_muxmask[] = {
+       {
+               .group = 2,
+               .mask = BIT(2) | BIT(3) | BIT(28) | BIT(29) | BIT(30),
+       },
+};
+
+static const struct sirfsoc_padmux nand_padmux = {
+       .muxmask_counts = ARRAY_SIZE(nand_muxmask),
+       .muxmask = nand_muxmask,
+       .funcmask = BIT(5),
+       .funcval = 0,
+};
+
+static const unsigned nand_pins[] = { 64, 65, 92, 93, 94 };
+
+static const struct sirfsoc_padmux sdmmc0_padmux = {
+       .muxmask_counts = 0,
+       .funcmask = BIT(5),
+       .funcval = 0,
+};
+
+static const unsigned sdmmc0_pins[] = { };
+
+static const struct sirfsoc_muxmask sdmmc2_muxmask[] = {
+       {
+               .group = 2,
+               .mask = BIT(2) | BIT(3),
+       },
+};
+
+static const struct sirfsoc_padmux sdmmc2_padmux = {
+       .muxmask_counts = ARRAY_SIZE(sdmmc2_muxmask),
+       .muxmask = sdmmc2_muxmask,
+       .funcmask = BIT(5),
+       .funcval = BIT(5),
+};
+
+static const unsigned sdmmc2_pins[] = { 66, 67 };
+
+static const struct sirfsoc_muxmask cko0_muxmask[] = {
+       {
+               .group = 2,
+               .mask = BIT(14),
+       },
+};
+
+static const struct sirfsoc_padmux cko0_padmux = {
+       .muxmask_counts = ARRAY_SIZE(cko0_muxmask),
+       .muxmask = cko0_muxmask,
+};
+
+static const unsigned cko0_pins[] = { 78 };
+
+static const struct sirfsoc_muxmask vip_muxmask[] = {
+       {
+               .group = 2,
+               .mask = BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19)
+                       | BIT(20) | BIT(21) | BIT(22) | BIT(23) | BIT(24) |
+                       BIT(25),
+       },
+};
+
+static const struct sirfsoc_padmux vip_padmux = {
+       .muxmask_counts = ARRAY_SIZE(vip_muxmask),
+       .muxmask = vip_muxmask,
+       .funcmask = BIT(0),
+       .funcval = 0,
+};
+
+static const unsigned vip_pins[] = { 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89 };
+
+static const struct sirfsoc_muxmask i2c0_muxmask[] = {
+       {
+               .group = 2,
+               .mask = BIT(26) | BIT(27),
+       },
+};
+
+static const struct sirfsoc_padmux i2c0_padmux = {
+       .muxmask_counts = ARRAY_SIZE(i2c0_muxmask),
+       .muxmask = i2c0_muxmask,
+};
+
+static const unsigned i2c0_pins[] = { 90, 91 };
+
+static const struct sirfsoc_muxmask i2c1_muxmask[] = {
+       {
+               .group = 0,
+               .mask = BIT(13) | BIT(15),
+       },
+};
+
+static const struct sirfsoc_padmux i2c1_padmux = {
+       .muxmask_counts = ARRAY_SIZE(i2c1_muxmask),
+       .muxmask = i2c1_muxmask,
+};
+
+static const unsigned i2c1_pins[] = { 13, 15 };
+
+static const struct sirfsoc_muxmask viprom_muxmask[] = {
+       {
+               .group = 2,
+               .mask = BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19)
+                       | BIT(20) | BIT(21) | BIT(22) | BIT(23) | BIT(24) |
+                       BIT(25),
+       }, {
+               .group = 0,
+               .mask = BIT(12),
+       },
+};
+
+static const struct sirfsoc_padmux viprom_padmux = {
+       .muxmask_counts = ARRAY_SIZE(viprom_muxmask),
+       .muxmask = viprom_muxmask,
+       .funcmask = BIT(0),
+       .funcval = BIT(0),
+};
+
+static const unsigned viprom_pins[] = { 12, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89 };
+
+static const struct sirfsoc_muxmask pwm0_muxmask[] = {
+       {
+               .group = 0,
+               .mask = BIT(4),
+       },
+};
+
+static const struct sirfsoc_padmux pwm0_padmux = {
+       .muxmask_counts = ARRAY_SIZE(pwm0_muxmask),
+       .muxmask = pwm0_muxmask,
+       .funcmask = BIT(12),
+       .funcval = 0,
+};
+
+static const unsigned pwm0_pins[] = { 4 };
+
+static const struct sirfsoc_muxmask pwm1_muxmask[] = {
+       {
+               .group = 0,
+               .mask = BIT(5),
+       },
+};
+
+static const struct sirfsoc_padmux pwm1_padmux = {
+       .muxmask_counts = ARRAY_SIZE(pwm1_muxmask),
+       .muxmask = pwm1_muxmask,
+};
+
+static const unsigned pwm1_pins[] = { 5 };
+
+static const struct sirfsoc_muxmask pwm2_muxmask[] = {
+       {
+               .group = 0,
+               .mask = BIT(6),
+       },
+};
+
+static const struct sirfsoc_padmux pwm2_padmux = {
+       .muxmask_counts = ARRAY_SIZE(pwm2_muxmask),
+       .muxmask = pwm2_muxmask,
+};
+
+static const unsigned pwm2_pins[] = { 6 };
+
+static const struct sirfsoc_muxmask pwm3_muxmask[] = {
+       {
+               .group = 0,
+               .mask = BIT(7),
+       },
+};
+
+static const struct sirfsoc_padmux pwm3_padmux = {
+       .muxmask_counts = ARRAY_SIZE(pwm3_muxmask),
+       .muxmask = pwm3_muxmask,
+};
+
+static const unsigned pwm3_pins[] = { 7 };
+
+static const struct sirfsoc_muxmask warm_rst_muxmask[] = {
+       {
+               .group = 0,
+               .mask = BIT(8),
+       },
+};
+
+static const struct sirfsoc_padmux warm_rst_padmux = {
+       .muxmask_counts = ARRAY_SIZE(warm_rst_muxmask),
+       .muxmask = warm_rst_muxmask,
+};
+
+static const unsigned warm_rst_pins[] = { 8 };
+
+static const struct sirfsoc_muxmask usb0_utmi_drvbus_muxmask[] = {
+       {
+               .group = 1,
+               .mask = BIT(22),
+       },
+};
+static const struct sirfsoc_padmux usb0_utmi_drvbus_padmux = {
+       .muxmask_counts = ARRAY_SIZE(usb0_utmi_drvbus_muxmask),
+       .muxmask = usb0_utmi_drvbus_muxmask,
+       .funcmask = BIT(6),
+       .funcval = BIT(6), /* refer to PAD_UTMI_DRVVBUS0_ENABLE */
+};
+
+static const unsigned usb0_utmi_drvbus_pins[] = { 54 };
+
+static const struct sirfsoc_muxmask usb1_utmi_drvbus_muxmask[] = {
+       {
+               .group = 1,
+               .mask = BIT(27),
+       },
+};
+
+static const struct sirfsoc_padmux usb1_utmi_drvbus_padmux = {
+       .muxmask_counts = ARRAY_SIZE(usb1_utmi_drvbus_muxmask),
+       .muxmask = usb1_utmi_drvbus_muxmask,
+       .funcmask = BIT(11),
+       .funcval = BIT(11), /* refer to PAD_UTMI_DRVVBUS1_ENABLE */
+};
+
+static const unsigned usb1_utmi_drvbus_pins[] = { 59 };
+
+static const struct sirfsoc_muxmask pulse_count_muxmask[] = {
+       {
+               .group = 0,
+               .mask = BIT(9) | BIT(10) | BIT(11),
+       },
+};
+
+static const struct sirfsoc_padmux pulse_count_padmux = {
+       .muxmask_counts = ARRAY_SIZE(pulse_count_muxmask),
+       .muxmask = pulse_count_muxmask,
+};
+
+static const unsigned pulse_count_pins[] = { 9, 10, 11 };
+
+#define SIRFSOC_PIN_GROUP(n, p)  \
+       {                       \
+               .name = n,      \
+               .pins = p,      \
+               .num_pins = ARRAY_SIZE(p),      \
+       }
+
+static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
+       SIRFSOC_PIN_GROUP("lcd_16bitsgrp", lcd_16bits_pins),
+       SIRFSOC_PIN_GROUP("lcd_18bitsgrp", lcd_18bits_pins),
+       SIRFSOC_PIN_GROUP("lcd_24bitsgrp", lcd_24bits_pins),
+       SIRFSOC_PIN_GROUP("lcdrom_grp", lcdrom_pins),
+       SIRFSOC_PIN_GROUP("uart0grp", uart0_pins),
+       SIRFSOC_PIN_GROUP("uart1grp", uart1_pins),
+       SIRFSOC_PIN_GROUP("uart2grp", uart2_pins),
+       SIRFSOC_PIN_GROUP("uart2_nostreamctrlgrp", uart2_nostreamctrl_pins),
+       SIRFSOC_PIN_GROUP("usp0grp", usp0_pins),
+       SIRFSOC_PIN_GROUP("usp1grp", usp1_pins),
+       SIRFSOC_PIN_GROUP("usp2grp", usp2_pins),
+       SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins),
+       SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins),
+       SIRFSOC_PIN_GROUP("pwm0grp", pwm0_pins),
+       SIRFSOC_PIN_GROUP("pwm1grp", pwm1_pins),
+       SIRFSOC_PIN_GROUP("pwm2grp", pwm2_pins),
+       SIRFSOC_PIN_GROUP("pwm3grp", pwm3_pins),
+       SIRFSOC_PIN_GROUP("vipgrp", vip_pins),
+       SIRFSOC_PIN_GROUP("vipromgrp", viprom_pins),
+       SIRFSOC_PIN_GROUP("warm_rstgrp", warm_rst_pins),
+       SIRFSOC_PIN_GROUP("cko0_rstgrp", cko0_pins),
+       SIRFSOC_PIN_GROUP("cko1_rstgrp", cko1_pins),
+       SIRFSOC_PIN_GROUP("sdmmc0grp", sdmmc0_pins),
+       SIRFSOC_PIN_GROUP("sdmmc1grp", sdmmc1_pins),
+       SIRFSOC_PIN_GROUP("sdmmc2grp", sdmmc2_pins),
+       SIRFSOC_PIN_GROUP("sdmmc3grp", sdmmc3_pins),
+       SIRFSOC_PIN_GROUP("sdmmc4grp", sdmmc4_pins),
+       SIRFSOC_PIN_GROUP("sdmmc5grp", sdmmc5_pins),
+       SIRFSOC_PIN_GROUP("usb0_utmi_drvbusgrp", usb0_utmi_drvbus_pins),
+       SIRFSOC_PIN_GROUP("usb1_utmi_drvbusgrp", usb1_utmi_drvbus_pins),
+       SIRFSOC_PIN_GROUP("pulse_countgrp", pulse_count_pins),
+       SIRFSOC_PIN_GROUP("i2sgrp", i2s_pins),
+       SIRFSOC_PIN_GROUP("ac97grp", ac97_pins),
+       SIRFSOC_PIN_GROUP("nandgrp", nand_pins),
+       SIRFSOC_PIN_GROUP("spi0grp", spi0_pins),
+       SIRFSOC_PIN_GROUP("spi1grp", spi1_pins),
+       SIRFSOC_PIN_GROUP("gpsgrp", gps_pins),
+};
+
+static int sirfsoc_list_groups(struct pinctrl_dev *pctldev, unsigned selector)
+{
+       if (selector >= ARRAY_SIZE(sirfsoc_pin_groups))
+               return -EINVAL;
+       return 0;
+}
+
+static const char *sirfsoc_get_group_name(struct pinctrl_dev *pctldev,
+                                      unsigned selector)
+{
+       if (selector >= ARRAY_SIZE(sirfsoc_pin_groups))
+               return NULL;
+       return sirfsoc_pin_groups[selector].name;
+}
+
+static int sirfsoc_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
+                              const unsigned **pins,
+                              unsigned *num_pins)
+{
+       if (selector >= ARRAY_SIZE(sirfsoc_pin_groups))
+               return -EINVAL;
+       *pins = sirfsoc_pin_groups[selector].pins;
+       *num_pins = sirfsoc_pin_groups[selector].num_pins;
+       return 0;
+}
+
+static void sirfsoc_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+                  unsigned offset)
+{
+       seq_printf(s, " " DRIVER_NAME);
+}
+
+static struct pinctrl_ops sirfsoc_pctrl_ops = {
+       .list_groups = sirfsoc_list_groups,
+       .get_group_name = sirfsoc_get_group_name,
+       .get_group_pins = sirfsoc_get_group_pins,
+       .pin_dbg_show = sirfsoc_pin_dbg_show,
+};
+
+struct sirfsoc_pmx_func {
+       const char *name;
+       const char * const *groups;
+       const unsigned num_groups;
+       const struct sirfsoc_padmux *padmux;
+};
+
+static const char * const lcd_16bitsgrp[] = { "lcd_16bitsgrp" };
+static const char * const lcd_18bitsgrp[] = { "lcd_18bitsgrp" };
+static const char * const lcd_24bitsgrp[] = { "lcd_24bitsgrp" };
+static const char * const lcdromgrp[] = { "lcdromgrp" };
+static const char * const uart0grp[] = { "uart0grp" };
+static const char * const uart1grp[] = { "uart1grp" };
+static const char * const uart2grp[] = { "uart2grp" };
+static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" };
+static const char * const usp0grp[] = { "usp0grp" };
+static const char * const usp1grp[] = { "usp1grp" };
+static const char * const usp2grp[] = { "usp2grp" };
+static const char * const i2c0grp[] = { "i2c0grp" };
+static const char * const i2c1grp[] = { "i2c1grp" };
+static const char * const pwm0grp[] = { "pwm0grp" };
+static const char * const pwm1grp[] = { "pwm1grp" };
+static const char * const pwm2grp[] = { "pwm2grp" };
+static const char * const pwm3grp[] = { "pwm3grp" };
+static const char * const vipgrp[] = { "vipgrp" };
+static const char * const vipromgrp[] = { "vipromgrp" };
+static const char * const warm_rstgrp[] = { "warm_rstgrp" };
+static const char * const cko0grp[] = { "cko0grp" };
+static const char * const cko1grp[] = { "cko1grp" };
+static const char * const sdmmc0grp[] = { "sdmmc0grp" };
+static const char * const sdmmc1grp[] = { "sdmmc1grp" };
+static const char * const sdmmc2grp[] = { "sdmmc2grp" };
+static const char * const sdmmc3grp[] = { "sdmmc3grp" };
+static const char * const sdmmc4grp[] = { "sdmmc4grp" };
+static const char * const sdmmc5grp[] = { "sdmmc5grp" };
+static const char * const usb0_utmi_drvbusgrp[] = { "usb0_utmi_drvbusgrp" };
+static const char * const usb1_utmi_drvbusgrp[] = { "usb1_utmi_drvbusgrp" };
+static const char * const pulse_countgrp[] = { "pulse_countgrp" };
+static const char * const i2sgrp[] = { "i2sgrp" };
+static const char * const ac97grp[] = { "ac97grp" };
+static const char * const nandgrp[] = { "nandgrp" };
+static const char * const spi0grp[] = { "spi0grp" };
+static const char * const spi1grp[] = { "spi1grp" };
+static const char * const gpsgrp[] = { "gpsgrp" };
+
+#define SIRFSOC_PMX_FUNCTION(n, g, m)          \
+       {                                       \
+               .name = n,                      \
+               .groups = g,                    \
+               .num_groups = ARRAY_SIZE(g),    \
+               .padmux = &m,                   \
+       }
+
+static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
+       SIRFSOC_PMX_FUNCTION("lcd_16bits", lcd_16bitsgrp, lcd_16bits_padmux),
+       SIRFSOC_PMX_FUNCTION("lcd_18bits", lcd_18bitsgrp, lcd_18bits_padmux),
+       SIRFSOC_PMX_FUNCTION("lcd_24bits", lcd_24bitsgrp, lcd_24bits_padmux),
+       SIRFSOC_PMX_FUNCTION("lcdrom", lcdromgrp, lcdrom_padmux),
+       SIRFSOC_PMX_FUNCTION("uart0", uart0grp, uart0_padmux),
+       SIRFSOC_PMX_FUNCTION("uart1", uart1grp, uart1_padmux),
+       SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux),
+       SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux),
+       SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux),
+       SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux),
+       SIRFSOC_PMX_FUNCTION("usp2", usp2grp, usp2_padmux),
+       SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux),
+       SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux),
+       SIRFSOC_PMX_FUNCTION("pwm0", pwm0grp, pwm0_padmux),
+       SIRFSOC_PMX_FUNCTION("pwm1", pwm1grp, pwm1_padmux),
+       SIRFSOC_PMX_FUNCTION("pwm2", pwm2grp, pwm2_padmux),
+       SIRFSOC_PMX_FUNCTION("pwm3", pwm3grp, pwm3_padmux),
+       SIRFSOC_PMX_FUNCTION("vip", vipgrp, vip_padmux),
+       SIRFSOC_PMX_FUNCTION("viprom", vipromgrp, viprom_padmux),
+       SIRFSOC_PMX_FUNCTION("warm_rst", warm_rstgrp, warm_rst_padmux),
+       SIRFSOC_PMX_FUNCTION("cko0", cko0grp, cko0_padmux),
+       SIRFSOC_PMX_FUNCTION("cko1", cko1grp, cko1_padmux),
+       SIRFSOC_PMX_FUNCTION("sdmmc0", sdmmc0grp, sdmmc0_padmux),
+       SIRFSOC_PMX_FUNCTION("sdmmc1", sdmmc1grp, sdmmc1_padmux),
+       SIRFSOC_PMX_FUNCTION("sdmmc2", sdmmc2grp, sdmmc2_padmux),
+       SIRFSOC_PMX_FUNCTION("sdmmc3", sdmmc3grp, sdmmc3_padmux),
+       SIRFSOC_PMX_FUNCTION("sdmmc4", sdmmc4grp, sdmmc4_padmux),
+       SIRFSOC_PMX_FUNCTION("sdmmc5", sdmmc5grp, sdmmc5_padmux),
+       SIRFSOC_PMX_FUNCTION("usb0_utmi_drvbus", usb0_utmi_drvbusgrp, usb0_utmi_drvbus_padmux),
+       SIRFSOC_PMX_FUNCTION("usb1_utmi_drvbus", usb1_utmi_drvbusgrp, usb1_utmi_drvbus_padmux),
+       SIRFSOC_PMX_FUNCTION("pulse_count", pulse_countgrp, pulse_count_padmux),
+       SIRFSOC_PMX_FUNCTION("i2s", i2sgrp, i2s_padmux),
+       SIRFSOC_PMX_FUNCTION("ac97", ac97grp, ac97_padmux),
+       SIRFSOC_PMX_FUNCTION("nand", nandgrp, nand_padmux),
+       SIRFSOC_PMX_FUNCTION("spi0", spi0grp, spi0_padmux),
+       SIRFSOC_PMX_FUNCTION("spi1", spi1grp, spi1_padmux),
+       SIRFSOC_PMX_FUNCTION("gps", gpsgrp, gps_padmux),
+};
+
+static void sirfsoc_pinmux_endisable(struct sirfsoc_pmx *spmx, unsigned selector,
+       bool enable)
+{
+       int i;
+       const struct sirfsoc_padmux *mux = sirfsoc_pmx_functions[selector].padmux;
+       const struct sirfsoc_muxmask *mask = mux->muxmask;
+
+       for (i = 0; i < mux->muxmask_counts; i++) {
+               u32 muxval;
+               muxval = readl(spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(mask[i].group));
+               if (enable)
+                       muxval = muxval & ~mask[i].mask;
+               else
+                       muxval = muxval | mask[i].mask;
+               writel(muxval, spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(mask[i].group));
+       }
+
+       if (mux->funcmask && enable) {
+               u32 func_en_val;
+               func_en_val =
+                       readl(spmx->rsc_virtbase + SIRFSOC_RSC_PIN_MUX);
+               func_en_val =
+                       (func_en_val & ~mux->funcmask) | (mux->
+                               funcval);
+               writel(func_en_val, spmx->rsc_virtbase + SIRFSOC_RSC_PIN_MUX);
+       }
+}
+
+static int sirfsoc_pinmux_enable(struct pinctrl_dev *pmxdev, unsigned selector,
+       unsigned group)
+{
+       struct sirfsoc_pmx *spmx;
+
+       spmx = pinctrl_dev_get_drvdata(pmxdev);
+       sirfsoc_pinmux_endisable(spmx, selector, true);
+
+       return 0;
+}
+
+static void sirfsoc_pinmux_disable(struct pinctrl_dev *pmxdev, unsigned selector,
+       unsigned group)
+{
+       struct sirfsoc_pmx *spmx;
+
+       spmx = pinctrl_dev_get_drvdata(pmxdev);
+       sirfsoc_pinmux_endisable(spmx, selector, false);
+}
+
+static int sirfsoc_pinmux_list_funcs(struct pinctrl_dev *pmxdev, unsigned selector)
+{
+       if (selector >= ARRAY_SIZE(sirfsoc_pmx_functions))
+               return -EINVAL;
+       return 0;
+}
+
+static const char *sirfsoc_pinmux_get_func_name(struct pinctrl_dev *pctldev,
+                                         unsigned selector)
+{
+       return sirfsoc_pmx_functions[selector].name;
+}
+
+static int sirfsoc_pinmux_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
+                              const char * const **groups,
+                              unsigned * const num_groups)
+{
+       *groups = sirfsoc_pmx_functions[selector].groups;
+       *num_groups = sirfsoc_pmx_functions[selector].num_groups;
+       return 0;
+}
+
+static int sirfsoc_pinmux_request_gpio(struct pinctrl_dev *pmxdev,
+       struct pinctrl_gpio_range *range, unsigned offset)
+{
+       struct sirfsoc_pmx *spmx;
+
+       int group = range->id;
+
+       u32 muxval;
+
+       spmx = pinctrl_dev_get_drvdata(pmxdev);
+
+       muxval = readl(spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group));
+       muxval = muxval | (1 << (offset - range->pin_base));
+       writel(muxval, spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group));
+
+       return 0;
+}
+
+static struct pinmux_ops sirfsoc_pinmux_ops = {
+       .list_functions = sirfsoc_pinmux_list_funcs,
+       .enable = sirfsoc_pinmux_enable,
+       .disable = sirfsoc_pinmux_disable,
+       .get_function_name = sirfsoc_pinmux_get_func_name,
+       .get_function_groups = sirfsoc_pinmux_get_groups,
+       .gpio_request_enable = sirfsoc_pinmux_request_gpio,
+};
+
+static struct pinctrl_desc sirfsoc_pinmux_desc = {
+       .name = DRIVER_NAME,
+       .pins = sirfsoc_pads,
+       .npins = ARRAY_SIZE(sirfsoc_pads),
+       .pctlops = &sirfsoc_pctrl_ops,
+       .pmxops = &sirfsoc_pinmux_ops,
+       .owner = THIS_MODULE,
+};
+
+/*
+ * Todo: bind irq_chip to every pinctrl_gpio_range
+ */
+static struct pinctrl_gpio_range sirfsoc_gpio_ranges[] = {
+       {
+               .name = "sirfsoc-gpio*",
+               .id = 0,
+               .base = 0,
+               .pin_base = 0,
+               .npins = 32,
+       }, {
+               .name = "sirfsoc-gpio*",
+               .id = 1,
+               .base = 32,
+               .pin_base = 32,
+               .npins = 32,
+       }, {
+               .name = "sirfsoc-gpio*",
+               .id = 2,
+               .base = 64,
+               .pin_base = 64,
+               .npins = 32,
+       }, {
+               .name = "sirfsoc-gpio*",
+               .id = 3,
+               .base = 96,
+               .pin_base = 96,
+               .npins = 19,
+       },
+};
+
+static void __iomem *sirfsoc_rsc_of_iomap(void)
+{
+       const struct of_device_id rsc_ids[]  = {
+               { .compatible = "sirf,prima2-rsc" },
+               {}
+       };
+       struct device_node *np;
+
+       np = of_find_matching_node(NULL, rsc_ids);
+       if (!np)
+               panic("unable to find compatible rsc node in dtb\n");
+
+       return of_iomap(np, 0);
+}
+
+static int __devinit sirfsoc_pinmux_probe(struct platform_device *pdev)
+{
+       int ret;
+       struct sirfsoc_pmx *spmx;
+       struct device_node *np = pdev->dev.of_node;
+       int i;
+
+       /* Create state holders etc for this driver */
+       spmx = devm_kzalloc(&pdev->dev, sizeof(*spmx), GFP_KERNEL);
+       if (!spmx)
+               return -ENOMEM;
+
+       spmx->dev = &pdev->dev;
+
+       platform_set_drvdata(pdev, spmx);
+
+       spmx->gpio_virtbase = of_iomap(np, 0);
+       if (!spmx->gpio_virtbase) {
+               ret = -ENOMEM;
+               dev_err(&pdev->dev, "can't map gpio registers\n");
+               goto out_no_gpio_remap;
+       }
+
+       spmx->rsc_virtbase = sirfsoc_rsc_of_iomap();
+       if (!spmx->rsc_virtbase) {
+               ret = -ENOMEM;
+               dev_err(&pdev->dev, "can't map rsc registers\n");
+               goto out_no_rsc_remap;
+       }
+
+       /* Now register the pin controller and all pins it handles */
+       spmx->pmx = pinctrl_register(&sirfsoc_pinmux_desc, &pdev->dev, spmx);
+       if (!spmx->pmx) {
+               dev_err(&pdev->dev, "could not register SIRFSOC pinmux driver\n");
+               ret = -EINVAL;
+               goto out_no_pmx;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(sirfsoc_gpio_ranges); i++)
+               pinctrl_add_gpio_range(spmx->pmx, &sirfsoc_gpio_ranges[i]);
+
+       dev_info(&pdev->dev, "initialized SIRFSOC pinmux driver\n");
+
+       return 0;
+
+out_no_pmx:
+       iounmap(spmx->rsc_virtbase);
+out_no_rsc_remap:
+       iounmap(spmx->gpio_virtbase);
+out_no_gpio_remap:
+       platform_set_drvdata(pdev, NULL);
+       devm_kfree(&pdev->dev, spmx);
+       return ret;
+}
+
+static const struct of_device_id pinmux_ids[]  = {
+       { .compatible = "sirf,prima2-gpio-pinmux" },
+       {}
+};
+
+static struct platform_driver sirfsoc_pinmux_driver = {
+       .driver = {
+               .name = DRIVER_NAME,
+               .owner = THIS_MODULE,
+               .of_match_table = pinmux_ids,
+       },
+       .probe = sirfsoc_pinmux_probe,
+};
+
+static int __init sirfsoc_pinmux_init(void)
+{
+       return platform_driver_register(&sirfsoc_pinmux_driver);
+}
+arch_initcall(sirfsoc_pinmux_init);
+
+MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
+       "Barry Song <baohua.song@csr.com>");
+MODULE_DESCRIPTION("SIRFSOC pin control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
new file mode 100644 (file)
index 0000000..c8d02f1
--- /dev/null
@@ -0,0 +1,1156 @@
+/*
+ * Driver for the U300 pin controller
+ *
+ * Based on the original U300 padmux functions
+ * Copyright (C) 2009-2011 ST-Ericsson AB
+ * Author: Martin Persson <martin.persson@stericsson.com>
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * The DB3350 design and control registers are oriented around pads rather than
+ * pins, so we enumerate the pads we can mux rather than actual pins. The pads
+ * are connected to different pins in different packaging types, so it would
+ * be confusing.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+/*
+ * Register definitions for the U300 Padmux control registers in the
+ * system controller
+ */
+
+/* PAD MUX Control register 1 (LOW) 16bit (R/W) */
+#define U300_SYSCON_PMC1LR                                     0x007C
+#define U300_SYSCON_PMC1LR_MASK                                        0xFFFF
+#define U300_SYSCON_PMC1LR_CDI_MASK                            0xC000
+#define U300_SYSCON_PMC1LR_CDI_CDI                             0x0000
+#define U300_SYSCON_PMC1LR_CDI_EMIF                            0x4000
+/* For BS335 */
+#define U300_SYSCON_PMC1LR_CDI_CDI2                            0x8000
+#define U300_SYSCON_PMC1LR_CDI_WCDMA_APP_GPIO                  0xC000
+/* For BS365 */
+#define U300_SYSCON_PMC1LR_CDI_GPIO                            0x8000
+#define U300_SYSCON_PMC1LR_CDI_WCDMA                           0xC000
+/* Common defs */
+#define U300_SYSCON_PMC1LR_PDI_MASK                            0x3000
+#define U300_SYSCON_PMC1LR_PDI_PDI                             0x0000
+#define U300_SYSCON_PMC1LR_PDI_EGG                             0x1000
+#define U300_SYSCON_PMC1LR_PDI_WCDMA                           0x3000
+#define U300_SYSCON_PMC1LR_MMCSD_MASK                          0x0C00
+#define U300_SYSCON_PMC1LR_MMCSD_MMCSD                         0x0000
+#define U300_SYSCON_PMC1LR_MMCSD_MSPRO                         0x0400
+#define U300_SYSCON_PMC1LR_MMCSD_DSP                           0x0800
+#define U300_SYSCON_PMC1LR_MMCSD_WCDMA                         0x0C00
+#define U300_SYSCON_PMC1LR_ETM_MASK                            0x0300
+#define U300_SYSCON_PMC1LR_ETM_ACC                             0x0000
+#define U300_SYSCON_PMC1LR_ETM_APP                             0x0100
+#define U300_SYSCON_PMC1LR_EMIF_1_CS2_MASK                     0x00C0
+#define U300_SYSCON_PMC1LR_EMIF_1_CS2_STATIC                   0x0000
+#define U300_SYSCON_PMC1LR_EMIF_1_CS2_NFIF                     0x0040
+#define U300_SYSCON_PMC1LR_EMIF_1_CS2_SDRAM                    0x0080
+#define U300_SYSCON_PMC1LR_EMIF_1_CS2_STATIC_2GB               0x00C0
+#define U300_SYSCON_PMC1LR_EMIF_1_CS1_MASK                     0x0030
+#define U300_SYSCON_PMC1LR_EMIF_1_CS1_STATIC                   0x0000
+#define U300_SYSCON_PMC1LR_EMIF_1_CS1_NFIF                     0x0010
+#define U300_SYSCON_PMC1LR_EMIF_1_CS1_SDRAM                    0x0020
+#define U300_SYSCON_PMC1LR_EMIF_1_CS1_SEMI                     0x0030
+#define U300_SYSCON_PMC1LR_EMIF_1_CS0_MASK                     0x000C
+#define U300_SYSCON_PMC1LR_EMIF_1_CS0_STATIC                   0x0000
+#define U300_SYSCON_PMC1LR_EMIF_1_CS0_NFIF                     0x0004
+#define U300_SYSCON_PMC1LR_EMIF_1_CS0_SDRAM                    0x0008
+#define U300_SYSCON_PMC1LR_EMIF_1_CS0_SEMI                     0x000C
+#define U300_SYSCON_PMC1LR_EMIF_1_MASK                         0x0003
+#define U300_SYSCON_PMC1LR_EMIF_1_STATIC                       0x0000
+#define U300_SYSCON_PMC1LR_EMIF_1_SDRAM0                       0x0001
+#define U300_SYSCON_PMC1LR_EMIF_1_SDRAM1                       0x0002
+#define U300_SYSCON_PMC1LR_EMIF_1                              0x0003
+/* PAD MUX Control register 2 (HIGH) 16bit (R/W) */
+#define U300_SYSCON_PMC1HR                                     0x007E
+#define U300_SYSCON_PMC1HR_MASK                                        0xFFFF
+#define U300_SYSCON_PMC1HR_MISC_2_MASK                         0xC000
+#define U300_SYSCON_PMC1HR_MISC_2_APP_GPIO                     0x0000
+#define U300_SYSCON_PMC1HR_MISC_2_MSPRO                                0x4000
+#define U300_SYSCON_PMC1HR_MISC_2_DSP                          0x8000
+#define U300_SYSCON_PMC1HR_MISC_2_AAIF                         0xC000
+#define U300_SYSCON_PMC1HR_APP_GPIO_2_MASK                     0x3000
+#define U300_SYSCON_PMC1HR_APP_GPIO_2_APP_GPIO                 0x0000
+#define U300_SYSCON_PMC1HR_APP_GPIO_2_NFIF                     0x1000
+#define U300_SYSCON_PMC1HR_APP_GPIO_2_DSP                      0x2000
+#define U300_SYSCON_PMC1HR_APP_GPIO_2_AAIF                     0x3000
+#define U300_SYSCON_PMC1HR_APP_GPIO_1_MASK                     0x0C00
+#define U300_SYSCON_PMC1HR_APP_GPIO_1_APP_GPIO                 0x0000
+#define U300_SYSCON_PMC1HR_APP_GPIO_1_MMC                      0x0400
+#define U300_SYSCON_PMC1HR_APP_GPIO_1_DSP                      0x0800
+#define U300_SYSCON_PMC1HR_APP_GPIO_1_AAIF                     0x0C00
+#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_MASK                   0x0300
+#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_APP_GPIO               0x0000
+#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_SPI                    0x0100
+#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_AAIF                   0x0300
+#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_MASK                   0x00C0
+#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_APP_GPIO               0x0000
+#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_SPI                    0x0040
+#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_AAIF                   0x00C0
+#define U300_SYSCON_PMC1HR_APP_SPI_2_MASK                      0x0030
+#define U300_SYSCON_PMC1HR_APP_SPI_2_APP_GPIO                  0x0000
+#define U300_SYSCON_PMC1HR_APP_SPI_2_SPI                       0x0010
+#define U300_SYSCON_PMC1HR_APP_SPI_2_DSP                       0x0020
+#define U300_SYSCON_PMC1HR_APP_SPI_2_AAIF                      0x0030
+#define U300_SYSCON_PMC1HR_APP_UART0_2_MASK                    0x000C
+#define U300_SYSCON_PMC1HR_APP_UART0_2_APP_GPIO                        0x0000
+#define U300_SYSCON_PMC1HR_APP_UART0_2_UART0                   0x0004
+#define U300_SYSCON_PMC1HR_APP_UART0_2_NFIF_CS                 0x0008
+#define U300_SYSCON_PMC1HR_APP_UART0_2_AAIF                    0x000C
+#define U300_SYSCON_PMC1HR_APP_UART0_1_MASK                    0x0003
+#define U300_SYSCON_PMC1HR_APP_UART0_1_APP_GPIO                        0x0000
+#define U300_SYSCON_PMC1HR_APP_UART0_1_UART0                   0x0001
+#define U300_SYSCON_PMC1HR_APP_UART0_1_AAIF                    0x0003
+/* Padmux 2 control */
+#define U300_SYSCON_PMC2R                                      0x100
+#define U300_SYSCON_PMC2R_APP_MISC_0_MASK                      0x00C0
+#define U300_SYSCON_PMC2R_APP_MISC_0_APP_GPIO                  0x0000
+#define U300_SYSCON_PMC2R_APP_MISC_0_EMIF_SDRAM                        0x0040
+#define U300_SYSCON_PMC2R_APP_MISC_0_MMC                       0x0080
+#define U300_SYSCON_PMC2R_APP_MISC_0_CDI2                      0x00C0
+#define U300_SYSCON_PMC2R_APP_MISC_1_MASK                      0x0300
+#define U300_SYSCON_PMC2R_APP_MISC_1_APP_GPIO                  0x0000
+#define U300_SYSCON_PMC2R_APP_MISC_1_EMIF_SDRAM                        0x0100
+#define U300_SYSCON_PMC2R_APP_MISC_1_MMC                       0x0200
+#define U300_SYSCON_PMC2R_APP_MISC_1_CDI2                      0x0300
+#define U300_SYSCON_PMC2R_APP_MISC_2_MASK                      0x0C00
+#define U300_SYSCON_PMC2R_APP_MISC_2_APP_GPIO                  0x0000
+#define U300_SYSCON_PMC2R_APP_MISC_2_EMIF_SDRAM                        0x0400
+#define U300_SYSCON_PMC2R_APP_MISC_2_MMC                       0x0800
+#define U300_SYSCON_PMC2R_APP_MISC_2_CDI2                      0x0C00
+#define U300_SYSCON_PMC2R_APP_MISC_3_MASK                      0x3000
+#define U300_SYSCON_PMC2R_APP_MISC_3_APP_GPIO                  0x0000
+#define U300_SYSCON_PMC2R_APP_MISC_3_EMIF_SDRAM                        0x1000
+#define U300_SYSCON_PMC2R_APP_MISC_3_MMC                       0x2000
+#define U300_SYSCON_PMC2R_APP_MISC_3_CDI2                      0x3000
+#define U300_SYSCON_PMC2R_APP_MISC_4_MASK                      0xC000
+#define U300_SYSCON_PMC2R_APP_MISC_4_APP_GPIO                  0x0000
+#define U300_SYSCON_PMC2R_APP_MISC_4_EMIF_SDRAM                        0x4000
+#define U300_SYSCON_PMC2R_APP_MISC_4_MMC                       0x8000
+#define U300_SYSCON_PMC2R_APP_MISC_4_ACC_GPIO                  0xC000
+/* TODO: More SYSCON registers missing */
+#define U300_SYSCON_PMC3R                                      0x10C
+#define U300_SYSCON_PMC3R_APP_MISC_11_MASK                     0xC000
+#define U300_SYSCON_PMC3R_APP_MISC_11_SPI                      0x4000
+#define U300_SYSCON_PMC3R_APP_MISC_10_MASK                     0x3000
+#define U300_SYSCON_PMC3R_APP_MISC_10_SPI                      0x1000
+/* TODO: Missing other configs */
+#define U300_SYSCON_PMC4R                                      0x168
+#define U300_SYSCON_PMC4R_APP_MISC_12_MASK                     0x0003
+#define U300_SYSCON_PMC4R_APP_MISC_12_APP_GPIO                 0x0000
+#define U300_SYSCON_PMC4R_APP_MISC_13_MASK                     0x000C
+#define U300_SYSCON_PMC4R_APP_MISC_13_CDI                      0x0000
+#define U300_SYSCON_PMC4R_APP_MISC_13_SMIA                     0x0004
+#define U300_SYSCON_PMC4R_APP_MISC_13_SMIA2                    0x0008
+#define U300_SYSCON_PMC4R_APP_MISC_13_APP_GPIO                 0x000C
+#define U300_SYSCON_PMC4R_APP_MISC_14_MASK                     0x0030
+#define U300_SYSCON_PMC4R_APP_MISC_14_CDI                      0x0000
+#define U300_SYSCON_PMC4R_APP_MISC_14_SMIA                     0x0010
+#define U300_SYSCON_PMC4R_APP_MISC_14_CDI2                     0x0020
+#define U300_SYSCON_PMC4R_APP_MISC_14_APP_GPIO                 0x0030
+#define U300_SYSCON_PMC4R_APP_MISC_16_MASK                     0x0300
+#define U300_SYSCON_PMC4R_APP_MISC_16_APP_GPIO_13              0x0000
+#define U300_SYSCON_PMC4R_APP_MISC_16_APP_UART1_CTS            0x0100
+#define U300_SYSCON_PMC4R_APP_MISC_16_EMIF_1_STATIC_CS5_N      0x0200
+
+#define DRIVER_NAME "pinmux-u300"
+
+/*
+ * The DB3350 has 467 pads, I have enumerated the pads clockwise around the
+ * edges of the silicon, finger by finger. LTCORNER upper left is pad 0.
+ * Data taken from the PadRing chart, arranged like this:
+ *
+ *   0 ..... 104
+ * 466        105
+ *   .        .
+ *   .        .
+ * 358        224
+ *  357 .... 225
+ */
+#define U300_NUM_PADS 467
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc u300_pads[] = {
+       /* Pads along the top edge of the chip */
+       PINCTRL_PIN(0, "P PAD VDD 28"),
+       PINCTRL_PIN(1, "P PAD GND 28"),
+       PINCTRL_PIN(2, "PO SIM RST N"),
+       PINCTRL_PIN(3, "VSSIO 25"),
+       PINCTRL_PIN(4, "VSSA ADDA ESDSUB"),
+       PINCTRL_PIN(5, "PWR VSSCOMMON"),
+       PINCTRL_PIN(6, "PI ADC I1 POS"),
+       PINCTRL_PIN(7, "PI ADC I1 NEG"),
+       PINCTRL_PIN(8, "PWR VSSAD0"),
+       PINCTRL_PIN(9, "PWR VCCAD0"),
+       PINCTRL_PIN(10, "PI ADC Q1 NEG"),
+       PINCTRL_PIN(11, "PI ADC Q1 POS"),
+       PINCTRL_PIN(12, "PWR VDDAD"),
+       PINCTRL_PIN(13, "PWR GNDAD"),
+       PINCTRL_PIN(14, "PI ADC I2 POS"),
+       PINCTRL_PIN(15, "PI ADC I2 NEG"),
+       PINCTRL_PIN(16, "PWR VSSAD1"),
+       PINCTRL_PIN(17, "PWR VCCAD1"),
+       PINCTRL_PIN(18, "PI ADC Q2 NEG"),
+       PINCTRL_PIN(19, "PI ADC Q2 POS"),
+       PINCTRL_PIN(20, "VSSA ADDA ESDSUB"),
+       PINCTRL_PIN(21, "PWR VCCGPAD"),
+       PINCTRL_PIN(22, "PI TX POW"),
+       PINCTRL_PIN(23, "PWR VSSGPAD"),
+       PINCTRL_PIN(24, "PO DAC I POS"),
+       PINCTRL_PIN(25, "PO DAC I NEG"),
+       PINCTRL_PIN(26, "PO DAC Q POS"),
+       PINCTRL_PIN(27, "PO DAC Q NEG"),
+       PINCTRL_PIN(28, "PWR VSSDA"),
+       PINCTRL_PIN(29, "PWR VCCDA"),
+       PINCTRL_PIN(30, "VSSA ADDA ESDSUB"),
+       PINCTRL_PIN(31, "P PAD VDDIO 11"),
+       PINCTRL_PIN(32, "PI PLL 26 FILTVDD"),
+       PINCTRL_PIN(33, "PI PLL 26 VCONT"),
+       PINCTRL_PIN(34, "PWR AGNDPLL2V5 32 13"),
+       PINCTRL_PIN(35, "PWR AVDDPLL2V5 32 13"),
+       PINCTRL_PIN(36, "VDDA PLL ESD"),
+       PINCTRL_PIN(37, "VSSA PLL ESD"),
+       PINCTRL_PIN(38, "VSS PLL"),
+       PINCTRL_PIN(39, "VDDC PLL"),
+       PINCTRL_PIN(40, "PWR AGNDPLL2V5 26 60"),
+       PINCTRL_PIN(41, "PWR AVDDPLL2V5 26 60"),
+       PINCTRL_PIN(42, "PWR AVDDPLL2V5 26 208"),
+       PINCTRL_PIN(43, "PWR AGNDPLL2V5 26 208"),
+       PINCTRL_PIN(44, "PWR AVDDPLL2V5 13 208"),
+       PINCTRL_PIN(45, "PWR AGNDPLL2V5 13 208"),
+       PINCTRL_PIN(46, "P PAD VSSIO 11"),
+       PINCTRL_PIN(47, "P PAD VSSIO 12"),
+       PINCTRL_PIN(48, "PI POW RST N"),
+       PINCTRL_PIN(49, "VDDC IO"),
+       PINCTRL_PIN(50, "P PAD VDDIO 16"),
+       PINCTRL_PIN(51, "PO RF WCDMA EN 4"),
+       PINCTRL_PIN(52, "PO RF WCDMA EN 3"),
+       PINCTRL_PIN(53, "PO RF WCDMA EN 2"),
+       PINCTRL_PIN(54, "PO RF WCDMA EN 1"),
+       PINCTRL_PIN(55, "PO RF WCDMA EN 0"),
+       PINCTRL_PIN(56, "PO GSM PA ENABLE"),
+       PINCTRL_PIN(57, "PO RF DATA STRB"),
+       PINCTRL_PIN(58, "PO RF DATA2"),
+       PINCTRL_PIN(59, "PIO RF DATA1"),
+       PINCTRL_PIN(60, "PIO RF DATA0"),
+       PINCTRL_PIN(61, "P PAD VDD 11"),
+       PINCTRL_PIN(62, "P PAD GND 11"),
+       PINCTRL_PIN(63, "P PAD VSSIO 16"),
+       PINCTRL_PIN(64, "P PAD VDDIO 18"),
+       PINCTRL_PIN(65, "PO RF CTRL STRB2"),
+       PINCTRL_PIN(66, "PO RF CTRL STRB1"),
+       PINCTRL_PIN(67, "PO RF CTRL STRB0"),
+       PINCTRL_PIN(68, "PIO RF CTRL DATA"),
+       PINCTRL_PIN(69, "PO RF CTRL CLK"),
+       PINCTRL_PIN(70, "PO TX ADC STRB"),
+       PINCTRL_PIN(71, "PO ANT SW 2"),
+       PINCTRL_PIN(72, "PO ANT SW 3"),
+       PINCTRL_PIN(73, "PO ANT SW 0"),
+       PINCTRL_PIN(74, "PO ANT SW 1"),
+       PINCTRL_PIN(75, "PO M CLKRQ"),
+       PINCTRL_PIN(76, "PI M CLK"),
+       PINCTRL_PIN(77, "PI RTC CLK"),
+       PINCTRL_PIN(78, "P PAD VDD 8"),
+       PINCTRL_PIN(79, "P PAD GND 8"),
+       PINCTRL_PIN(80, "P PAD VSSIO 13"),
+       PINCTRL_PIN(81, "P PAD VDDIO 13"),
+       PINCTRL_PIN(82, "PO SYS 1 CLK"),
+       PINCTRL_PIN(83, "PO SYS 2 CLK"),
+       PINCTRL_PIN(84, "PO SYS 0 CLK"),
+       PINCTRL_PIN(85, "PI SYS 0 CLKRQ"),
+       PINCTRL_PIN(86, "PO PWR MNGT CTRL 1"),
+       PINCTRL_PIN(87, "PO PWR MNGT CTRL 0"),
+       PINCTRL_PIN(88, "PO RESOUT2 RST N"),
+       PINCTRL_PIN(89, "PO RESOUT1 RST N"),
+       PINCTRL_PIN(90, "PO RESOUT0 RST N"),
+       PINCTRL_PIN(91, "PI SERVICE N"),
+       PINCTRL_PIN(92, "P PAD VDD 29"),
+       PINCTRL_PIN(93, "P PAD GND 29"),
+       PINCTRL_PIN(94, "P PAD VSSIO 8"),
+       PINCTRL_PIN(95, "P PAD VDDIO 8"),
+       PINCTRL_PIN(96, "PI EXT IRQ1 N"),
+       PINCTRL_PIN(97, "PI EXT IRQ0 N"),
+       PINCTRL_PIN(98, "PIO DC ON"),
+       PINCTRL_PIN(99, "PIO ACC APP I2C DATA"),
+       PINCTRL_PIN(100, "PIO ACC APP I2C CLK"),
+       PINCTRL_PIN(101, "P PAD VDD 12"),
+       PINCTRL_PIN(102, "P PAD GND 12"),
+       PINCTRL_PIN(103, "P PAD VSSIO 14"),
+       PINCTRL_PIN(104, "P PAD VDDIO 14"),
+       /* Pads along the right edge of the chip */
+       PINCTRL_PIN(105, "PIO APP I2C1 DATA"),
+       PINCTRL_PIN(106, "PIO APP I2C1 CLK"),
+       PINCTRL_PIN(107, "PO KEY OUT0"),
+       PINCTRL_PIN(108, "PO KEY OUT1"),
+       PINCTRL_PIN(109, "PO KEY OUT2"),
+       PINCTRL_PIN(110, "PO KEY OUT3"),
+       PINCTRL_PIN(111, "PO KEY OUT4"),
+       PINCTRL_PIN(112, "PI KEY IN0"),
+       PINCTRL_PIN(113, "PI KEY IN1"),
+       PINCTRL_PIN(114, "PI KEY IN2"),
+       PINCTRL_PIN(115, "P PAD VDDIO 15"),
+       PINCTRL_PIN(116, "P PAD VSSIO 15"),
+       PINCTRL_PIN(117, "P PAD GND 13"),
+       PINCTRL_PIN(118, "P PAD VDD 13"),
+       PINCTRL_PIN(119, "PI KEY IN3"),
+       PINCTRL_PIN(120, "PI KEY IN4"),
+       PINCTRL_PIN(121, "PI KEY IN5"),
+       PINCTRL_PIN(122, "PIO APP PCM I2S1 DATA B"),
+       PINCTRL_PIN(123, "PIO APP PCM I2S1 DATA A"),
+       PINCTRL_PIN(124, "PIO APP PCM I2S1 WS"),
+       PINCTRL_PIN(125, "PIO APP PCM I2S1 CLK"),
+       PINCTRL_PIN(126, "PIO APP PCM I2S0 DATA B"),
+       PINCTRL_PIN(127, "PIO APP PCM I2S0 DATA A"),
+       PINCTRL_PIN(128, "PIO APP PCM I2S0 WS"),
+       PINCTRL_PIN(129, "PIO APP PCM I2S0 CLK"),
+       PINCTRL_PIN(130, "P PAD VDD 17"),
+       PINCTRL_PIN(131, "P PAD GND 17"),
+       PINCTRL_PIN(132, "P PAD VSSIO 19"),
+       PINCTRL_PIN(133, "P PAD VDDIO 19"),
+       PINCTRL_PIN(134, "UART0 RTS"),
+       PINCTRL_PIN(135, "UART0 CTS"),
+       PINCTRL_PIN(136, "UART0 TX"),
+       PINCTRL_PIN(137, "UART0 RX"),
+       PINCTRL_PIN(138, "PIO ACC SPI DO"),
+       PINCTRL_PIN(139, "PIO ACC SPI DI"),
+       PINCTRL_PIN(140, "PIO ACC SPI CS0 N"),
+       PINCTRL_PIN(141, "PIO ACC SPI CS1 N"),
+       PINCTRL_PIN(142, "PIO ACC SPI CS2 N"),
+       PINCTRL_PIN(143, "PIO ACC SPI CLK"),
+       PINCTRL_PIN(144, "PO PDI EXT RST N"),
+       PINCTRL_PIN(145, "P PAD VDDIO 22"),
+       PINCTRL_PIN(146, "P PAD VSSIO 22"),
+       PINCTRL_PIN(147, "P PAD GND 18"),
+       PINCTRL_PIN(148, "P PAD VDD 18"),
+       PINCTRL_PIN(149, "PIO PDI C0"),
+       PINCTRL_PIN(150, "PIO PDI C1"),
+       PINCTRL_PIN(151, "PIO PDI C2"),
+       PINCTRL_PIN(152, "PIO PDI C3"),
+       PINCTRL_PIN(153, "PIO PDI C4"),
+       PINCTRL_PIN(154, "PIO PDI C5"),
+       PINCTRL_PIN(155, "PIO PDI D0"),
+       PINCTRL_PIN(156, "PIO PDI D1"),
+       PINCTRL_PIN(157, "PIO PDI D2"),
+       PINCTRL_PIN(158, "PIO PDI D3"),
+       PINCTRL_PIN(159, "P PAD VDDIO 21"),
+       PINCTRL_PIN(160, "P PAD VSSIO 21"),
+       PINCTRL_PIN(161, "PIO PDI D4"),
+       PINCTRL_PIN(162, "PIO PDI D5"),
+       PINCTRL_PIN(163, "PIO PDI D6"),
+       PINCTRL_PIN(164, "PIO PDI D7"),
+       PINCTRL_PIN(165, "PIO MS INS"),
+       PINCTRL_PIN(166, "MMC DATA DIR LS"),
+       PINCTRL_PIN(167, "MMC DATA 3"),
+       PINCTRL_PIN(168, "MMC DATA 2"),
+       PINCTRL_PIN(169, "MMC DATA 1"),
+       PINCTRL_PIN(170, "MMC DATA 0"),
+       PINCTRL_PIN(171, "MMC CMD DIR LS"),
+       PINCTRL_PIN(172, "P PAD VDD 27"),
+       PINCTRL_PIN(173, "P PAD GND 27"),
+       PINCTRL_PIN(174, "P PAD VSSIO 20"),
+       PINCTRL_PIN(175, "P PAD VDDIO 20"),
+       PINCTRL_PIN(176, "MMC CMD"),
+       PINCTRL_PIN(177, "MMC CLK"),
+       PINCTRL_PIN(178, "PIO APP GPIO 14"),
+       PINCTRL_PIN(179, "PIO APP GPIO 13"),
+       PINCTRL_PIN(180, "PIO APP GPIO 11"),
+       PINCTRL_PIN(181, "PIO APP GPIO 25"),
+       PINCTRL_PIN(182, "PIO APP GPIO 24"),
+       PINCTRL_PIN(183, "PIO APP GPIO 23"),
+       PINCTRL_PIN(184, "PIO APP GPIO 22"),
+       PINCTRL_PIN(185, "PIO APP GPIO 21"),
+       PINCTRL_PIN(186, "PIO APP GPIO 20"),
+       PINCTRL_PIN(187, "P PAD VDD 19"),
+       PINCTRL_PIN(188, "P PAD GND 19"),
+       PINCTRL_PIN(189, "P PAD VSSIO 23"),
+       PINCTRL_PIN(190, "P PAD VDDIO 23"),
+       PINCTRL_PIN(191, "PIO APP GPIO 19"),
+       PINCTRL_PIN(192, "PIO APP GPIO 18"),
+       PINCTRL_PIN(193, "PIO APP GPIO 17"),
+       PINCTRL_PIN(194, "PIO APP GPIO 16"),
+       PINCTRL_PIN(195, "PI CI D1"),
+       PINCTRL_PIN(196, "PI CI D0"),
+       PINCTRL_PIN(197, "PI CI HSYNC"),
+       PINCTRL_PIN(198, "PI CI VSYNC"),
+       PINCTRL_PIN(199, "PI CI EXT CLK"),
+       PINCTRL_PIN(200, "PO CI EXT RST N"),
+       PINCTRL_PIN(201, "P PAD VSSIO 43"),
+       PINCTRL_PIN(202, "P PAD VDDIO 43"),
+       PINCTRL_PIN(203, "PI CI D6"),
+       PINCTRL_PIN(204, "PI CI D7"),
+       PINCTRL_PIN(205, "PI CI D2"),
+       PINCTRL_PIN(206, "PI CI D3"),
+       PINCTRL_PIN(207, "PI CI D4"),
+       PINCTRL_PIN(208, "PI CI D5"),
+       PINCTRL_PIN(209, "PI CI D8"),
+       PINCTRL_PIN(210, "PI CI D9"),
+       PINCTRL_PIN(211, "P PAD VDD 20"),
+       PINCTRL_PIN(212, "P PAD GND 20"),
+       PINCTRL_PIN(213, "P PAD VSSIO 24"),
+       PINCTRL_PIN(214, "P PAD VDDIO 24"),
+       PINCTRL_PIN(215, "P PAD VDDIO 26"),
+       PINCTRL_PIN(216, "PO EMIF 1 A26"),
+       PINCTRL_PIN(217, "PO EMIF 1 A25"),
+       PINCTRL_PIN(218, "P PAD VSSIO 26"),
+       PINCTRL_PIN(219, "PO EMIF 1 A24"),
+       PINCTRL_PIN(220, "PO EMIF 1 A23"),
+       /* Pads along the bottom edge of the chip */
+       PINCTRL_PIN(221, "PO EMIF 1 A22"),
+       PINCTRL_PIN(222, "PO EMIF 1 A21"),
+       PINCTRL_PIN(223, "P PAD VDD 21"),
+       PINCTRL_PIN(224, "P PAD GND 21"),
+       PINCTRL_PIN(225, "P PAD VSSIO 27"),
+       PINCTRL_PIN(226, "P PAD VDDIO 27"),
+       PINCTRL_PIN(227, "PO EMIF 1 A20"),
+       PINCTRL_PIN(228, "PO EMIF 1 A19"),
+       PINCTRL_PIN(229, "PO EMIF 1 A18"),
+       PINCTRL_PIN(230, "PO EMIF 1 A17"),
+       PINCTRL_PIN(231, "P PAD VDDIO 28"),
+       PINCTRL_PIN(232, "P PAD VSSIO 28"),
+       PINCTRL_PIN(233, "PO EMIF 1 A16"),
+       PINCTRL_PIN(234, "PIO EMIF 1 D15"),
+       PINCTRL_PIN(235, "PO EMIF 1 A15"),
+       PINCTRL_PIN(236, "PIO EMIF 1 D14"),
+       PINCTRL_PIN(237, "P PAD VDD 22"),
+       PINCTRL_PIN(238, "P PAD GND 22"),
+       PINCTRL_PIN(239, "P PAD VSSIO 29"),
+       PINCTRL_PIN(240, "P PAD VDDIO 29"),
+       PINCTRL_PIN(241, "PO EMIF 1 A14"),
+       PINCTRL_PIN(242, "PIO EMIF 1 D13"),
+       PINCTRL_PIN(243, "PO EMIF 1 A13"),
+       PINCTRL_PIN(244, "PIO EMIF 1 D12"),
+       PINCTRL_PIN(245, "P PAD VSSIO 30"),
+       PINCTRL_PIN(246, "P PAD VDDIO 30"),
+       PINCTRL_PIN(247, "PO EMIF 1 A12"),
+       PINCTRL_PIN(248, "PIO EMIF 1 D11"),
+       PINCTRL_PIN(249, "PO EMIF 1 A11"),
+       PINCTRL_PIN(250, "PIO EMIF 1 D10"),
+       PINCTRL_PIN(251, "P PAD VSSIO 31"),
+       PINCTRL_PIN(252, "P PAD VDDIO 31"),
+       PINCTRL_PIN(253, "PO EMIF 1 A10"),
+       PINCTRL_PIN(254, "PIO EMIF 1 D09"),
+       PINCTRL_PIN(255, "PO EMIF 1 A09"),
+       PINCTRL_PIN(256, "P PAD VDDIO 32"),
+       PINCTRL_PIN(257, "P PAD VSSIO 32"),
+       PINCTRL_PIN(258, "P PAD GND 24"),
+       PINCTRL_PIN(259, "P PAD VDD 24"),
+       PINCTRL_PIN(260, "PIO EMIF 1 D08"),
+       PINCTRL_PIN(261, "PO EMIF 1 A08"),
+       PINCTRL_PIN(262, "PIO EMIF 1 D07"),
+       PINCTRL_PIN(263, "PO EMIF 1 A07"),
+       PINCTRL_PIN(264, "P PAD VDDIO 33"),
+       PINCTRL_PIN(265, "P PAD VSSIO 33"),
+       PINCTRL_PIN(266, "PIO EMIF 1 D06"),
+       PINCTRL_PIN(267, "PO EMIF 1 A06"),
+       PINCTRL_PIN(268, "PIO EMIF 1 D05"),
+       PINCTRL_PIN(269, "PO EMIF 1 A05"),
+       PINCTRL_PIN(270, "P PAD VDDIO 34"),
+       PINCTRL_PIN(271, "P PAD VSSIO 34"),
+       PINCTRL_PIN(272, "PIO EMIF 1 D04"),
+       PINCTRL_PIN(273, "PO EMIF 1 A04"),
+       PINCTRL_PIN(274, "PIO EMIF 1 D03"),
+       PINCTRL_PIN(275, "PO EMIF 1 A03"),
+       PINCTRL_PIN(276, "P PAD VDDIO 35"),
+       PINCTRL_PIN(277, "P PAD VSSIO 35"),
+       PINCTRL_PIN(278, "P PAD GND 23"),
+       PINCTRL_PIN(279, "P PAD VDD 23"),
+       PINCTRL_PIN(280, "PIO EMIF 1 D02"),
+       PINCTRL_PIN(281, "PO EMIF 1 A02"),
+       PINCTRL_PIN(282, "PIO EMIF 1 D01"),
+       PINCTRL_PIN(283, "PO EMIF 1 A01"),
+       PINCTRL_PIN(284, "P PAD VDDIO 36"),
+       PINCTRL_PIN(285, "P PAD VSSIO 36"),
+       PINCTRL_PIN(286, "PIO EMIF 1 D00"),
+       PINCTRL_PIN(287, "PO EMIF 1 BE1 N"),
+       PINCTRL_PIN(288, "PO EMIF 1 BE0 N"),
+       PINCTRL_PIN(289, "PO EMIF 1 ADV N"),
+       PINCTRL_PIN(290, "P PAD VDDIO 37"),
+       PINCTRL_PIN(291, "P PAD VSSIO 37"),
+       PINCTRL_PIN(292, "PO EMIF 1 SD CKE0"),
+       PINCTRL_PIN(293, "PO EMIF 1 OE N"),
+       PINCTRL_PIN(294, "PO EMIF 1 WE N"),
+       PINCTRL_PIN(295, "P PAD VDDIO 38"),
+       PINCTRL_PIN(296, "P PAD VSSIO 38"),
+       PINCTRL_PIN(297, "PO EMIF 1 CLK"),
+       PINCTRL_PIN(298, "PIO EMIF 1 SD CLK"),
+       PINCTRL_PIN(299, "P PAD VSSIO 45 (not bonded)"),
+       PINCTRL_PIN(300, "P PAD VDDIO 42"),
+       PINCTRL_PIN(301, "P PAD VSSIO 42"),
+       PINCTRL_PIN(302, "P PAD GND 31"),
+       PINCTRL_PIN(303, "P PAD VDD 31"),
+       PINCTRL_PIN(304, "PI EMIF 1 RET CLK"),
+       PINCTRL_PIN(305, "PI EMIF 1 WAIT N"),
+       PINCTRL_PIN(306, "PI EMIF 1 NFIF READY"),
+       PINCTRL_PIN(307, "PO EMIF 1 SD CKE1"),
+       PINCTRL_PIN(308, "PO EMIF 1 CS3 N"),
+       PINCTRL_PIN(309, "P PAD VDD 25"),
+       PINCTRL_PIN(310, "P PAD GND 25"),
+       PINCTRL_PIN(311, "P PAD VSSIO 39"),
+       PINCTRL_PIN(312, "P PAD VDDIO 39"),
+       PINCTRL_PIN(313, "PO EMIF 1 CS2 N"),
+       PINCTRL_PIN(314, "PO EMIF 1 CS1 N"),
+       PINCTRL_PIN(315, "PO EMIF 1 CS0 N"),
+       PINCTRL_PIN(316, "PO ETM TRACE PKT0"),
+       PINCTRL_PIN(317, "PO ETM TRACE PKT1"),
+       PINCTRL_PIN(318, "PO ETM TRACE PKT2"),
+       PINCTRL_PIN(319, "P PAD VDD 30"),
+       PINCTRL_PIN(320, "P PAD GND 30"),
+       PINCTRL_PIN(321, "P PAD VSSIO 44"),
+       PINCTRL_PIN(322, "P PAD VDDIO 44"),
+       PINCTRL_PIN(323, "PO ETM TRACE PKT3"),
+       PINCTRL_PIN(324, "PO ETM TRACE PKT4"),
+       PINCTRL_PIN(325, "PO ETM TRACE PKT5"),
+       PINCTRL_PIN(326, "PO ETM TRACE PKT6"),
+       PINCTRL_PIN(327, "PO ETM TRACE PKT7"),
+       PINCTRL_PIN(328, "PO ETM PIPE STAT0"),
+       PINCTRL_PIN(329, "P PAD VDD 26"),
+       PINCTRL_PIN(330, "P PAD GND 26"),
+       PINCTRL_PIN(331, "P PAD VSSIO 40"),
+       PINCTRL_PIN(332, "P PAD VDDIO 40"),
+       PINCTRL_PIN(333, "PO ETM PIPE STAT1"),
+       PINCTRL_PIN(334, "PO ETM PIPE STAT2"),
+       PINCTRL_PIN(335, "PO ETM TRACE CLK"),
+       PINCTRL_PIN(336, "PO ETM TRACE SYNC"),
+       PINCTRL_PIN(337, "PIO ACC GPIO 33"),
+       PINCTRL_PIN(338, "PIO ACC GPIO 32"),
+       PINCTRL_PIN(339, "PIO ACC GPIO 30"),
+       PINCTRL_PIN(340, "PIO ACC GPIO 29"),
+       PINCTRL_PIN(341, "P PAD VDDIO 17"),
+       PINCTRL_PIN(342, "P PAD VSSIO 17"),
+       PINCTRL_PIN(343, "P PAD GND 15"),
+       PINCTRL_PIN(344, "P PAD VDD 15"),
+       PINCTRL_PIN(345, "PIO ACC GPIO 28"),
+       PINCTRL_PIN(346, "PIO ACC GPIO 27"),
+       PINCTRL_PIN(347, "PIO ACC GPIO 16"),
+       PINCTRL_PIN(348, "PI TAP TMS"),
+       PINCTRL_PIN(349, "PI TAP TDI"),
+       PINCTRL_PIN(350, "PO TAP TDO"),
+       PINCTRL_PIN(351, "PI TAP RST N"),
+       /* Pads along the left edge of the chip */
+       PINCTRL_PIN(352, "PI EMU MODE 0"),
+       PINCTRL_PIN(353, "PO TAP RET CLK"),
+       PINCTRL_PIN(354, "PI TAP CLK"),
+       PINCTRL_PIN(355, "PO EMIF 0 SD CS N"),
+       PINCTRL_PIN(356, "PO EMIF 0 SD CAS N"),
+       PINCTRL_PIN(357, "PO EMIF 0 SD WE N"),
+       PINCTRL_PIN(358, "P PAD VDDIO 1"),
+       PINCTRL_PIN(359, "P PAD VSSIO 1"),
+       PINCTRL_PIN(360, "P PAD GND 1"),
+       PINCTRL_PIN(361, "P PAD VDD 1"),
+       PINCTRL_PIN(362, "PO EMIF 0 SD CKE"),
+       PINCTRL_PIN(363, "PO EMIF 0 SD DQML"),
+       PINCTRL_PIN(364, "PO EMIF 0 SD DQMU"),
+       PINCTRL_PIN(365, "PO EMIF 0 SD RAS N"),
+       PINCTRL_PIN(366, "PIO EMIF 0 D15"),
+       PINCTRL_PIN(367, "PO EMIF 0 A15"),
+       PINCTRL_PIN(368, "PIO EMIF 0 D14"),
+       PINCTRL_PIN(369, "PO EMIF 0 A14"),
+       PINCTRL_PIN(370, "PIO EMIF 0 D13"),
+       PINCTRL_PIN(371, "PO EMIF 0 A13"),
+       PINCTRL_PIN(372, "P PAD VDDIO 2"),
+       PINCTRL_PIN(373, "P PAD VSSIO 2"),
+       PINCTRL_PIN(374, "P PAD GND 2"),
+       PINCTRL_PIN(375, "P PAD VDD 2"),
+       PINCTRL_PIN(376, "PIO EMIF 0 D12"),
+       PINCTRL_PIN(377, "PO EMIF 0 A12"),
+       PINCTRL_PIN(378, "PIO EMIF 0 D11"),
+       PINCTRL_PIN(379, "PO EMIF 0 A11"),
+       PINCTRL_PIN(380, "PIO EMIF 0 D10"),
+       PINCTRL_PIN(381, "PO EMIF 0 A10"),
+       PINCTRL_PIN(382, "PIO EMIF 0 D09"),
+       PINCTRL_PIN(383, "PO EMIF 0 A09"),
+       PINCTRL_PIN(384, "PIO EMIF 0 D08"),
+       PINCTRL_PIN(385, "PO EMIF 0 A08"),
+       PINCTRL_PIN(386, "PIO EMIF 0 D07"),
+       PINCTRL_PIN(387, "PO EMIF 0 A07"),
+       PINCTRL_PIN(388, "P PAD VDDIO 3"),
+       PINCTRL_PIN(389, "P PAD VSSIO 3"),
+       PINCTRL_PIN(390, "P PAD GND 3"),
+       PINCTRL_PIN(391, "P PAD VDD 3"),
+       PINCTRL_PIN(392, "PO EFUSE RDOUT1"),
+       PINCTRL_PIN(393, "PIO EMIF 0 D06"),
+       PINCTRL_PIN(394, "PO EMIF 0 A06"),
+       PINCTRL_PIN(395, "PIO EMIF 0 D05"),
+       PINCTRL_PIN(396, "PO EMIF 0 A05"),
+       PINCTRL_PIN(397, "PIO EMIF 0 D04"),
+       PINCTRL_PIN(398, "PO EMIF 0 A04"),
+       PINCTRL_PIN(399, "A PADS/A VDDCO1v82v5 GND 80U SF LIN VDDCO AF"),
+       PINCTRL_PIN(400, "PWR VDDCO AF"),
+       PINCTRL_PIN(401, "PWR EFUSE HV1"),
+       PINCTRL_PIN(402, "P PAD VSSIO 4"),
+       PINCTRL_PIN(403, "P PAD VDDIO 4"),
+       PINCTRL_PIN(404, "P PAD GND 4"),
+       PINCTRL_PIN(405, "P PAD VDD 4"),
+       PINCTRL_PIN(406, "PIO EMIF 0 D03"),
+       PINCTRL_PIN(407, "PO EMIF 0 A03"),
+       PINCTRL_PIN(408, "PWR EFUSE HV2"),
+       PINCTRL_PIN(409, "PWR EFUSE HV3"),
+       PINCTRL_PIN(410, "PIO EMIF 0 D02"),
+       PINCTRL_PIN(411, "PO EMIF 0 A02"),
+       PINCTRL_PIN(412, "PIO EMIF 0 D01"),
+       PINCTRL_PIN(413, "P PAD VDDIO 5"),
+       PINCTRL_PIN(414, "P PAD VSSIO 5"),
+       PINCTRL_PIN(415, "P PAD GND 5"),
+       PINCTRL_PIN(416, "P PAD VDD 5"),
+       PINCTRL_PIN(417, "PO EMIF 0 A01"),
+       PINCTRL_PIN(418, "PIO EMIF 0 D00"),
+       PINCTRL_PIN(419, "IF 0 SD CLK"),
+       PINCTRL_PIN(420, "APP SPI CLK"),
+       PINCTRL_PIN(421, "APP SPI DO"),
+       PINCTRL_PIN(422, "APP SPI DI"),
+       PINCTRL_PIN(423, "APP SPI CS0"),
+       PINCTRL_PIN(424, "APP SPI CS1"),
+       PINCTRL_PIN(425, "APP SPI CS2"),
+       PINCTRL_PIN(426, "PIO APP GPIO 10"),
+       PINCTRL_PIN(427, "P PAD VDDIO 41"),
+       PINCTRL_PIN(428, "P PAD VSSIO 41"),
+       PINCTRL_PIN(429, "P PAD GND 6"),
+       PINCTRL_PIN(430, "P PAD VDD 6"),
+       PINCTRL_PIN(431, "PIO ACC SDIO0 CMD"),
+       PINCTRL_PIN(432, "PIO ACC SDIO0 CK"),
+       PINCTRL_PIN(433, "PIO ACC SDIO0 D3"),
+       PINCTRL_PIN(434, "PIO ACC SDIO0 D2"),
+       PINCTRL_PIN(435, "PIO ACC SDIO0 D1"),
+       PINCTRL_PIN(436, "PIO ACC SDIO0 D0"),
+       PINCTRL_PIN(437, "PIO USB PU"),
+       PINCTRL_PIN(438, "PIO USB SP"),
+       PINCTRL_PIN(439, "PIO USB DAT VP"),
+       PINCTRL_PIN(440, "PIO USB SE0 VM"),
+       PINCTRL_PIN(441, "PIO USB OE"),
+       PINCTRL_PIN(442, "PIO USB SUSP"),
+       PINCTRL_PIN(443, "P PAD VSSIO 6"),
+       PINCTRL_PIN(444, "P PAD VDDIO 6"),
+       PINCTRL_PIN(445, "PIO USB PUEN"),
+       PINCTRL_PIN(446, "PIO ACC UART0 RX"),
+       PINCTRL_PIN(447, "PIO ACC UART0 TX"),
+       PINCTRL_PIN(448, "PIO ACC UART0 CTS"),
+       PINCTRL_PIN(449, "PIO ACC UART0 RTS"),
+       PINCTRL_PIN(450, "PIO ACC UART3 RX"),
+       PINCTRL_PIN(451, "PIO ACC UART3 TX"),
+       PINCTRL_PIN(452, "PIO ACC UART3 CTS"),
+       PINCTRL_PIN(453, "PIO ACC UART3 RTS"),
+       PINCTRL_PIN(454, "PIO ACC IRDA TX"),
+       PINCTRL_PIN(455, "P PAD VDDIO 7"),
+       PINCTRL_PIN(456, "P PAD VSSIO 7"),
+       PINCTRL_PIN(457, "P PAD GND 7"),
+       PINCTRL_PIN(458, "P PAD VDD 7"),
+       PINCTRL_PIN(459, "PIO ACC IRDA RX"),
+       PINCTRL_PIN(460, "PIO ACC PCM I2S CLK"),
+       PINCTRL_PIN(461, "PIO ACC PCM I2S WS"),
+       PINCTRL_PIN(462, "PIO ACC PCM I2S DATA A"),
+       PINCTRL_PIN(463, "PIO ACC PCM I2S DATA B"),
+       PINCTRL_PIN(464, "PO SIM CLK"),
+       PINCTRL_PIN(465, "PIO ACC IRDA SD"),
+       PINCTRL_PIN(466, "PIO SIM DATA"),
+};
+
+/**
+ * @dev: a pointer back to containing device
+ * @virtbase: the offset to the controller in virtual memory
+ */
+struct u300_pmx {
+       struct device *dev;
+       struct pinctrl_dev *pctl;
+       u32 phybase;
+       u32 physize;
+       void __iomem *virtbase;
+};
+
+/**
+ * u300_pmx_registers - the array of registers read/written for each pinmux
+ * shunt setting
+ */
+const u32 u300_pmx_registers[] = {
+       U300_SYSCON_PMC1LR,
+       U300_SYSCON_PMC1HR,
+       U300_SYSCON_PMC2R,
+       U300_SYSCON_PMC3R,
+       U300_SYSCON_PMC4R,
+};
+
+/**
+ * struct u300_pin_group - describes a U300 pin group
+ * @name: the name of this specific pin group
+ * @pins: an array of discrete physical pins used in this group, taken
+ *     from the driver-local pin enumeration space
+ * @num_pins: the number of pins in this group array, i.e. the number of
+ *     elements in .pins so we can iterate over that array
+ */
+struct u300_pin_group {
+       const char *name;
+       const unsigned int *pins;
+       const unsigned num_pins;
+};
+
+/**
+ * struct pmx_onmask - mask bits to enable/disable padmux
+ * @mask: mask bits to disable
+ * @val: mask bits to enable
+ *
+ * onmask lazy dog:
+ * onmask = {
+ *   {"PMC1LR" mask, "PMC1LR" value},
+ *   {"PMC1HR" mask, "PMC1HR" value},
+ *   {"PMC2R"  mask, "PMC2R"  value},
+ *   {"PMC3R"  mask, "PMC3R"  value},
+ *   {"PMC4R"  mask, "PMC4R"  value}
+ * }
+ */
+struct u300_pmx_mask {
+       u16 mask;
+       u16 bits;
+};
+
+/* The chip power pins are VDD, GND, VDDIO and VSSIO */
+static const unsigned power_pins[] = { 0, 1, 3, 31, 46, 47, 49, 50, 61, 62, 63,
+       64, 78, 79, 80, 81, 92, 93, 94, 95, 101, 102, 103, 104, 115, 116, 117,
+       118, 130, 131, 132, 133, 145, 146, 147, 148, 159, 160, 172, 173, 174,
+       175, 187, 188, 189, 190, 201, 202, 211, 212, 213, 214, 215, 218, 223,
+       224, 225, 226, 231, 232, 237, 238, 239, 240, 245, 246, 251, 252, 256,
+       257, 258, 259, 264, 265, 270, 271, 276, 277, 278, 279, 284, 285, 290,
+       291, 295, 296, 299, 300, 301, 302, 303, 309, 310, 311, 312, 319, 320,
+       321, 322, 329, 330, 331, 332, 341, 342, 343, 344, 358, 359, 360, 361,
+       372, 373, 374, 375, 388, 389, 390, 391, 402, 403, 404, 405, 413, 414,
+       415, 416, 427, 428, 429, 430, 443, 444, 455, 456, 457, 458 };
+static const unsigned emif0_pins[] = { 355, 356, 357, 362, 363, 364, 365, 366,
+       367, 368, 369, 370, 371, 376, 377, 378, 379, 380, 381, 382, 383, 384,
+       385, 386, 387, 393, 394, 395, 396, 397, 398, 406, 407, 410, 411, 412,
+       417, 418 };
+static const unsigned emif1_pins[] = { 216, 217, 219, 220, 221, 222, 227, 228,
+       229, 230, 233, 234, 235, 236, 241, 242, 243, 244, 247, 248, 249, 250,
+       253, 254, 255, 260, 261, 262, 263, 266, 267, 268, 269, 272, 273, 274,
+       275, 280, 281, 282, 283, 286, 287, 288, 289, 292, 293, 294, 297, 298,
+       304, 305, 306, 307, 308, 313, 314, 315 };
+static const unsigned uart0_pins[] = { 134, 135, 136, 137 };
+static const unsigned mmc0_pins[] = { 166, 167, 168, 169, 170, 171, 176, 177 };
+static const unsigned spi0_pins[] = { 420, 421, 422, 423, 424, 425 };
+
+static const struct u300_pmx_mask emif0_mask[] = {
+       {0, 0},
+       {0, 0},
+       {0, 0},
+       {0, 0},
+       {0, 0},
+};
+
+static const struct u300_pmx_mask emif1_mask[] = {
+       /*
+        * This connects the SDRAM to CS2 and a NAND flash to
+        * CS0 on the EMIF.
+        */
+       {
+               U300_SYSCON_PMC1LR_EMIF_1_CS2_MASK |
+               U300_SYSCON_PMC1LR_EMIF_1_CS1_MASK |
+               U300_SYSCON_PMC1LR_EMIF_1_CS0_MASK |
+               U300_SYSCON_PMC1LR_EMIF_1_MASK,
+               U300_SYSCON_PMC1LR_EMIF_1_CS2_SDRAM |
+               U300_SYSCON_PMC1LR_EMIF_1_CS1_STATIC |
+               U300_SYSCON_PMC1LR_EMIF_1_CS0_NFIF |
+               U300_SYSCON_PMC1LR_EMIF_1_SDRAM0
+       },
+       {0, 0},
+       {0, 0},
+       {0, 0},
+       {0, 0},
+};
+
+static const struct u300_pmx_mask uart0_mask[] = {
+       {0, 0},
+       {
+               U300_SYSCON_PMC1HR_APP_UART0_1_MASK |
+               U300_SYSCON_PMC1HR_APP_UART0_2_MASK,
+               U300_SYSCON_PMC1HR_APP_UART0_1_UART0 |
+               U300_SYSCON_PMC1HR_APP_UART0_2_UART0
+       },
+       {0, 0},
+       {0, 0},
+       {0, 0},
+};
+
+static const struct u300_pmx_mask mmc0_mask[] = {
+       { U300_SYSCON_PMC1LR_MMCSD_MASK, U300_SYSCON_PMC1LR_MMCSD_MMCSD},
+       {0, 0},
+       {0, 0},
+       {0, 0},
+       { U300_SYSCON_PMC4R_APP_MISC_12_MASK,
+         U300_SYSCON_PMC4R_APP_MISC_12_APP_GPIO }
+};
+
+static const struct u300_pmx_mask spi0_mask[] = {
+       {0, 0},
+       {
+               U300_SYSCON_PMC1HR_APP_SPI_2_MASK |
+               U300_SYSCON_PMC1HR_APP_SPI_CS_1_MASK |
+               U300_SYSCON_PMC1HR_APP_SPI_CS_2_MASK,
+               U300_SYSCON_PMC1HR_APP_SPI_2_SPI |
+               U300_SYSCON_PMC1HR_APP_SPI_CS_1_SPI |
+               U300_SYSCON_PMC1HR_APP_SPI_CS_2_SPI
+       },
+       {0, 0},
+       {0, 0},
+       {0, 0}
+};
+
+static const struct u300_pin_group u300_pin_groups[] = {
+       {
+               .name = "powergrp",
+               .pins = power_pins,
+               .num_pins = ARRAY_SIZE(power_pins),
+       },
+       {
+               .name = "emif0grp",
+               .pins = emif0_pins,
+               .num_pins = ARRAY_SIZE(emif0_pins),
+       },
+       {
+               .name = "emif1grp",
+               .pins = emif1_pins,
+               .num_pins = ARRAY_SIZE(emif1_pins),
+       },
+       {
+               .name = "uart0grp",
+               .pins = uart0_pins,
+               .num_pins = ARRAY_SIZE(uart0_pins),
+       },
+       {
+               .name = "mmc0grp",
+               .pins = mmc0_pins,
+               .num_pins = ARRAY_SIZE(mmc0_pins),
+       },
+       {
+               .name = "spi0grp",
+               .pins = spi0_pins,
+               .num_pins = ARRAY_SIZE(spi0_pins),
+       },
+};
+
+static int u300_list_groups(struct pinctrl_dev *pctldev, unsigned selector)
+{
+       if (selector >= ARRAY_SIZE(u300_pin_groups))
+               return -EINVAL;
+       return 0;
+}
+
+static const char *u300_get_group_name(struct pinctrl_dev *pctldev,
+                                      unsigned selector)
+{
+       if (selector >= ARRAY_SIZE(u300_pin_groups))
+               return NULL;
+       return u300_pin_groups[selector].name;
+}
+
+static int u300_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
+                              const unsigned **pins,
+                              unsigned *num_pins)
+{
+       if (selector >= ARRAY_SIZE(u300_pin_groups))
+               return -EINVAL;
+       *pins = u300_pin_groups[selector].pins;
+       *num_pins = u300_pin_groups[selector].num_pins;
+       return 0;
+}
+
+static void u300_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+                  unsigned offset)
+{
+       seq_printf(s, " " DRIVER_NAME);
+}
+
+static struct pinctrl_ops u300_pctrl_ops = {
+       .list_groups = u300_list_groups,
+       .get_group_name = u300_get_group_name,
+       .get_group_pins = u300_get_group_pins,
+       .pin_dbg_show = u300_pin_dbg_show,
+};
+
+/*
+ * Here we define the available functions and their corresponding pin groups
+ */
+
+/**
+ * struct u300_pmx_func - describes U300 pinmux functions
+ * @name: the name of this specific function
+ * @groups: corresponding pin groups
+ * @onmask: bits to set to enable this when doing pin muxing
+ */
+struct u300_pmx_func {
+       const char *name;
+       const char * const *groups;
+       const unsigned num_groups;
+       const struct u300_pmx_mask *mask;
+};
+
+static const char * const powergrps[] = { "powergrp" };
+static const char * const emif0grps[] = { "emif0grp" };
+static const char * const emif1grps[] = { "emif1grp" };
+static const char * const uart0grps[] = { "uart0grp" };
+static const char * const mmc0grps[] = { "mmc0grp" };
+static const char * const spi0grps[] = { "spi0grp" };
+
+static const struct u300_pmx_func u300_pmx_functions[] = {
+       {
+               .name = "power",
+               .groups = powergrps,
+               .num_groups = ARRAY_SIZE(powergrps),
+               /* Mask is N/A */
+       },
+       {
+               .name = "emif0",
+               .groups = emif0grps,
+               .num_groups = ARRAY_SIZE(emif0grps),
+               .mask = emif0_mask,
+       },
+       {
+               .name = "emif1",
+               .groups = emif1grps,
+               .num_groups = ARRAY_SIZE(emif1grps),
+               .mask = emif1_mask,
+       },
+       {
+               .name = "uart0",
+               .groups = uart0grps,
+               .num_groups = ARRAY_SIZE(uart0grps),
+               .mask = uart0_mask,
+       },
+       {
+               .name = "mmc0",
+               .groups = mmc0grps,
+               .num_groups = ARRAY_SIZE(mmc0grps),
+               .mask = mmc0_mask,
+       },
+       {
+               .name = "spi0",
+               .groups = spi0grps,
+               .num_groups = ARRAY_SIZE(spi0grps),
+               .mask = spi0_mask,
+       },
+};
+
+static void u300_pmx_endisable(struct u300_pmx *upmx, unsigned selector,
+                              bool enable)
+{
+       u16 regval, val, mask;
+       int i;
+       const struct u300_pmx_mask *upmx_mask;
+
+       upmx_mask = u300_pmx_functions[selector].mask;
+       for (i = 0; i < ARRAY_SIZE(u300_pmx_registers); i++) {
+               if (enable)
+                       val = upmx_mask->bits;
+               else
+                       val = 0;
+
+               mask = upmx_mask->mask;
+               if (mask != 0) {
+                       regval = readw(upmx->virtbase + u300_pmx_registers[i]);
+                       regval &= ~mask;
+                       regval |= val;
+                       writew(regval, upmx->virtbase + u300_pmx_registers[i]);
+               }
+               upmx_mask++;
+       }
+}
+
+static int u300_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
+                          unsigned group)
+{
+       struct u300_pmx *upmx;
+
+       /* There is nothing to do with the power pins */
+       if (selector == 0)
+               return 0;
+
+       upmx = pinctrl_dev_get_drvdata(pctldev);
+       u300_pmx_endisable(upmx, selector, true);
+
+       return 0;
+}
+
+static void u300_pmx_disable(struct pinctrl_dev *pctldev, unsigned selector,
+                            unsigned group)
+{
+       struct u300_pmx *upmx;
+
+       /* There is nothing to do with the power pins */
+       if (selector == 0)
+               return;
+
+       upmx = pinctrl_dev_get_drvdata(pctldev);
+       u300_pmx_endisable(upmx, selector, false);
+}
+
+static int u300_pmx_list_funcs(struct pinctrl_dev *pctldev, unsigned selector)
+{
+       if (selector >= ARRAY_SIZE(u300_pmx_functions))
+               return -EINVAL;
+       return 0;
+}
+
+static const char *u300_pmx_get_func_name(struct pinctrl_dev *pctldev,
+                                         unsigned selector)
+{
+       return u300_pmx_functions[selector].name;
+}
+
+static int u300_pmx_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
+                              const char * const **groups,
+                              unsigned * const num_groups)
+{
+       *groups = u300_pmx_functions[selector].groups;
+       *num_groups = u300_pmx_functions[selector].num_groups;
+       return 0;
+}
+
+static struct pinmux_ops u300_pmx_ops = {
+       .list_functions = u300_pmx_list_funcs,
+       .get_function_name = u300_pmx_get_func_name,
+       .get_function_groups = u300_pmx_get_groups,
+       .enable = u300_pmx_enable,
+       .disable = u300_pmx_disable,
+};
+
+/*
+ * GPIO ranges handled by the application-side COH901XXX GPIO controller
+ * Very many pins can be converted into GPIO pins, but we only list those
+ * that are useful in practice to cut down on tables.
+ */
+#define U300_GPIO_RANGE(a, b, c) { .name = "COH901XXX", .id = a, .base= a, \
+                       .pin_base = b, .npins = c }
+
+static struct pinctrl_gpio_range u300_gpio_ranges[] = {
+       U300_GPIO_RANGE(10, 426, 1),
+       U300_GPIO_RANGE(11, 180, 1),
+       U300_GPIO_RANGE(12, 165, 1), /* MS/MMC card insertion */
+       U300_GPIO_RANGE(13, 179, 1),
+       U300_GPIO_RANGE(14, 178, 1),
+       U300_GPIO_RANGE(16, 194, 1),
+       U300_GPIO_RANGE(17, 193, 1),
+       U300_GPIO_RANGE(18, 192, 1),
+       U300_GPIO_RANGE(19, 191, 1),
+       U300_GPIO_RANGE(20, 186, 1),
+       U300_GPIO_RANGE(21, 185, 1),
+       U300_GPIO_RANGE(22, 184, 1),
+       U300_GPIO_RANGE(23, 183, 1),
+       U300_GPIO_RANGE(24, 182, 1),
+       U300_GPIO_RANGE(25, 181, 1),
+};
+
+static struct pinctrl_desc u300_pmx_desc = {
+       .name = DRIVER_NAME,
+       .pins = u300_pads,
+       .npins = ARRAY_SIZE(u300_pads),
+       .pctlops = &u300_pctrl_ops,
+       .pmxops = &u300_pmx_ops,
+       .owner = THIS_MODULE,
+};
+
+static int __init u300_pmx_probe(struct platform_device *pdev)
+{
+       struct u300_pmx *upmx;
+       struct resource *res;
+       int ret;
+       int i;
+
+       /* Create state holders etc for this driver */
+       upmx = devm_kzalloc(&pdev->dev, sizeof(*upmx), GFP_KERNEL);
+       if (!upmx)
+               return -ENOMEM;
+
+       upmx->dev = &pdev->dev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               ret = -ENOENT;
+               goto out_no_resource;
+       }
+       upmx->phybase = res->start;
+       upmx->physize = resource_size(res);
+
+       if (request_mem_region(upmx->phybase, upmx->physize,
+                              DRIVER_NAME) == NULL) {
+               ret = -ENOMEM;
+               goto out_no_memregion;
+       }
+
+       upmx->virtbase = ioremap(upmx->phybase, upmx->physize);
+       if (!upmx->virtbase) {
+               ret = -ENOMEM;
+               goto out_no_remap;
+       }
+
+       upmx->pctl = pinctrl_register(&u300_pmx_desc, &pdev->dev, upmx);
+       if (!upmx->pctl) {
+               dev_err(&pdev->dev, "could not register U300 pinmux driver\n");
+               ret = -EINVAL;
+               goto out_no_pmx;
+       }
+
+       /* We will handle a range of GPIO pins */
+       for (i = 0; i < ARRAY_SIZE(u300_gpio_ranges); i++)
+               pinctrl_add_gpio_range(upmx->pctl, &u300_gpio_ranges[i]);
+
+       platform_set_drvdata(pdev, upmx);
+
+       dev_info(&pdev->dev, "initialized U300 pinmux driver\n");
+
+       return 0;
+
+out_no_pmx:
+       iounmap(upmx->virtbase);
+out_no_remap:
+       platform_set_drvdata(pdev, NULL);
+out_no_memregion:
+       release_mem_region(upmx->phybase, upmx->physize);
+out_no_resource:
+       devm_kfree(&pdev->dev, upmx);
+       return ret;
+}
+
+static int __exit u300_pmx_remove(struct platform_device *pdev)
+{
+       struct u300_pmx *upmx = platform_get_drvdata(pdev);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(u300_gpio_ranges); i++)
+               pinctrl_remove_gpio_range(upmx->pctl, &u300_gpio_ranges[i]);
+       pinctrl_unregister(upmx->pctl);
+       iounmap(upmx->virtbase);
+       release_mem_region(upmx->phybase, upmx->physize);
+       platform_set_drvdata(pdev, NULL);
+       devm_kfree(&pdev->dev, upmx);
+
+       return 0;
+}
+
+static struct platform_driver u300_pmx_driver = {
+       .driver = {
+               .name = DRIVER_NAME,
+               .owner = THIS_MODULE,
+       },
+       .remove = __exit_p(u300_pmx_remove),
+};
+
+static int __init u300_pmx_init(void)
+{
+       return platform_driver_probe(&u300_pmx_driver, u300_pmx_probe);
+}
+arch_initcall(u300_pmx_init);
+
+static void __exit u300_pmx_exit(void)
+{
+       platform_driver_unregister(&u300_pmx_driver);
+}
+module_exit(u300_pmx_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("U300 pin control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinmux-sirf.c b/drivers/pinctrl/pinmux-sirf.c
deleted file mode 100644 (file)
index d76cae6..0000000
+++ /dev/null
@@ -1,1215 +0,0 @@
-/*
- * pinmux driver for CSR SiRFprimaII
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- *
- * Licensed under GPLv2 or later.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/bitops.h>
-
-#define DRIVER_NAME "pinmux-sirf"
-
-#define SIRFSOC_NUM_PADS    622
-#define SIRFSOC_GPIO_PAD_EN(g) ((g)*0x100 + 0x84)
-#define SIRFSOC_RSC_PIN_MUX 0x4
-
-/*
- * pad list for the pinmux subsystem
- * refer to CS-131858-DC-6A.xls
- */
-static const struct pinctrl_pin_desc sirfsoc_pads[] = {
-       PINCTRL_PIN(4, "pwm0"),
-       PINCTRL_PIN(5, "pwm1"),
-       PINCTRL_PIN(6, "pwm2"),
-       PINCTRL_PIN(7, "pwm3"),
-       PINCTRL_PIN(8, "warm_rst_b"),
-       PINCTRL_PIN(9, "odo_0"),
-       PINCTRL_PIN(10, "odo_1"),
-       PINCTRL_PIN(11, "dr_dir"),
-       PINCTRL_PIN(13, "scl_1"),
-       PINCTRL_PIN(15, "sda_1"),
-       PINCTRL_PIN(16, "x_ldd[16]"),
-       PINCTRL_PIN(17, "x_ldd[17]"),
-       PINCTRL_PIN(18, "x_ldd[18]"),
-       PINCTRL_PIN(19, "x_ldd[19]"),
-       PINCTRL_PIN(20, "x_ldd[20]"),
-       PINCTRL_PIN(21, "x_ldd[21]"),
-       PINCTRL_PIN(22, "x_ldd[22]"),
-       PINCTRL_PIN(23, "x_ldd[23], lcdrom_frdy"),
-       PINCTRL_PIN(24, "gps_sgn"),
-       PINCTRL_PIN(25, "gps_mag"),
-       PINCTRL_PIN(26, "gps_clk"),
-       PINCTRL_PIN(27, "sd_cd_b_1"),
-       PINCTRL_PIN(28, "sd_vcc_on_1"),
-       PINCTRL_PIN(29, "sd_wp_b_1"),
-       PINCTRL_PIN(30, "sd_clk_3"),
-       PINCTRL_PIN(31, "sd_cmd_3"),
-
-       PINCTRL_PIN(32, "x_sd_dat_3[0]"),
-       PINCTRL_PIN(33, "x_sd_dat_3[1]"),
-       PINCTRL_PIN(34, "x_sd_dat_3[2]"),
-       PINCTRL_PIN(35, "x_sd_dat_3[3]"),
-       PINCTRL_PIN(36, "x_sd_clk_4"),
-       PINCTRL_PIN(37, "x_sd_cmd_4"),
-       PINCTRL_PIN(38, "x_sd_dat_4[0]"),
-       PINCTRL_PIN(39, "x_sd_dat_4[1]"),
-       PINCTRL_PIN(40, "x_sd_dat_4[2]"),
-       PINCTRL_PIN(41, "x_sd_dat_4[3]"),
-       PINCTRL_PIN(42, "x_cko_1"),
-       PINCTRL_PIN(43, "x_ac97_bit_clk"),
-       PINCTRL_PIN(44, "x_ac97_dout"),
-       PINCTRL_PIN(45, "x_ac97_din"),
-       PINCTRL_PIN(46, "x_ac97_sync"),
-       PINCTRL_PIN(47, "x_txd_1"),
-       PINCTRL_PIN(48, "x_txd_2"),
-       PINCTRL_PIN(49, "x_rxd_1"),
-       PINCTRL_PIN(50, "x_rxd_2"),
-       PINCTRL_PIN(51, "x_usclk_0"),
-       PINCTRL_PIN(52, "x_utxd_0"),
-       PINCTRL_PIN(53, "x_urxd_0"),
-       PINCTRL_PIN(54, "x_utfs_0"),
-       PINCTRL_PIN(55, "x_urfs_0"),
-       PINCTRL_PIN(56, "x_usclk_1"),
-       PINCTRL_PIN(57, "x_utxd_1"),
-       PINCTRL_PIN(58, "x_urxd_1"),
-       PINCTRL_PIN(59, "x_utfs_1"),
-       PINCTRL_PIN(60, "x_urfs_1"),
-       PINCTRL_PIN(61, "x_usclk_2"),
-       PINCTRL_PIN(62, "x_utxd_2"),
-       PINCTRL_PIN(63, "x_urxd_2"),
-
-       PINCTRL_PIN(64, "x_utfs_2"),
-       PINCTRL_PIN(65, "x_urfs_2"),
-       PINCTRL_PIN(66, "x_df_we_b"),
-       PINCTRL_PIN(67, "x_df_re_b"),
-       PINCTRL_PIN(68, "x_txd_0"),
-       PINCTRL_PIN(69, "x_rxd_0"),
-       PINCTRL_PIN(78, "x_cko_0"),
-       PINCTRL_PIN(79, "x_vip_pxd[7]"),
-       PINCTRL_PIN(80, "x_vip_pxd[6]"),
-       PINCTRL_PIN(81, "x_vip_pxd[5]"),
-       PINCTRL_PIN(82, "x_vip_pxd[4]"),
-       PINCTRL_PIN(83, "x_vip_pxd[3]"),
-       PINCTRL_PIN(84, "x_vip_pxd[2]"),
-       PINCTRL_PIN(85, "x_vip_pxd[1]"),
-       PINCTRL_PIN(86, "x_vip_pxd[0]"),
-       PINCTRL_PIN(87, "x_vip_vsync"),
-       PINCTRL_PIN(88, "x_vip_hsync"),
-       PINCTRL_PIN(89, "x_vip_pxclk"),
-       PINCTRL_PIN(90, "x_sda_0"),
-       PINCTRL_PIN(91, "x_scl_0"),
-       PINCTRL_PIN(92, "x_df_ry_by"),
-       PINCTRL_PIN(93, "x_df_cs_b[1]"),
-       PINCTRL_PIN(94, "x_df_cs_b[0]"),
-       PINCTRL_PIN(95, "x_l_pclk"),
-
-       PINCTRL_PIN(96, "x_l_lck"),
-       PINCTRL_PIN(97, "x_l_fck"),
-       PINCTRL_PIN(98, "x_l_de"),
-       PINCTRL_PIN(99, "x_ldd[0]"),
-       PINCTRL_PIN(100, "x_ldd[1]"),
-       PINCTRL_PIN(101, "x_ldd[2]"),
-       PINCTRL_PIN(102, "x_ldd[3]"),
-       PINCTRL_PIN(103, "x_ldd[4]"),
-       PINCTRL_PIN(104, "x_ldd[5]"),
-       PINCTRL_PIN(105, "x_ldd[6]"),
-       PINCTRL_PIN(106, "x_ldd[7]"),
-       PINCTRL_PIN(107, "x_ldd[8]"),
-       PINCTRL_PIN(108, "x_ldd[9]"),
-       PINCTRL_PIN(109, "x_ldd[10]"),
-       PINCTRL_PIN(110, "x_ldd[11]"),
-       PINCTRL_PIN(111, "x_ldd[12]"),
-       PINCTRL_PIN(112, "x_ldd[13]"),
-       PINCTRL_PIN(113, "x_ldd[14]"),
-       PINCTRL_PIN(114, "x_ldd[15]"),
-};
-
-/**
- * @dev: a pointer back to containing device
- * @virtbase: the offset to the controller in virtual memory
- */
-struct sirfsoc_pmx {
-       struct device *dev;
-       struct pinctrl_dev *pmx;
-       void __iomem *gpio_virtbase;
-       void __iomem *rsc_virtbase;
-};
-
-/* SIRFSOC_GPIO_PAD_EN set */
-struct sirfsoc_muxmask {
-       unsigned long group;
-       unsigned long mask;
-};
-
-struct sirfsoc_padmux {
-       unsigned long muxmask_counts;
-       const struct sirfsoc_muxmask *muxmask;
-       /* RSC_PIN_MUX set */
-       unsigned long funcmask;
-       unsigned long funcval;
-};
-
- /**
- * struct sirfsoc_pin_group - describes a SiRFprimaII pin group
- * @name: the name of this specific pin group
- * @pins: an array of discrete physical pins used in this group, taken
- *     from the driver-local pin enumeration space
- * @num_pins: the number of pins in this group array, i.e. the number of
- *     elements in .pins so we can iterate over that array
- */
-struct sirfsoc_pin_group {
-       const char *name;
-       const unsigned int *pins;
-       const unsigned num_pins;
-};
-
-static const struct sirfsoc_muxmask lcd_16bits_sirfsoc_muxmask[] = {
-       {
-               .group = 3,
-               .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) |
-                       BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
-                       BIT(17) | BIT(18),
-       }, {
-               .group = 2,
-               .mask = BIT(31),
-       },
-};
-
-static const struct sirfsoc_padmux lcd_16bits_padmux = {
-       .muxmask_counts = ARRAY_SIZE(lcd_16bits_sirfsoc_muxmask),
-       .muxmask = lcd_16bits_sirfsoc_muxmask,
-       .funcmask = BIT(4),
-       .funcval = 0,
-};
-
-static const unsigned lcd_16bits_pins[] = { 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
-       105, 106, 107, 108, 109, 110, 111, 112, 113, 114 };
-
-static const struct sirfsoc_muxmask lcd_18bits_muxmask[] = {
-       {
-               .group = 3,
-               .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) |
-                       BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
-                       BIT(17) | BIT(18),
-       }, {
-               .group = 2,
-               .mask = BIT(31),
-       }, {
-               .group = 0,
-               .mask = BIT(16) | BIT(17),
-       },
-};
-
-static const struct sirfsoc_padmux lcd_18bits_padmux = {
-       .muxmask_counts = ARRAY_SIZE(lcd_18bits_muxmask),
-       .muxmask = lcd_18bits_muxmask,
-       .funcmask = BIT(4),
-       .funcval = 0,
-};
-
-static const unsigned lcd_18bits_pins[] = { 16, 17, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
-       105, 106, 107, 108, 109, 110, 111, 112, 113, 114};
-
-static const struct sirfsoc_muxmask lcd_24bits_muxmask[] = {
-       {
-               .group = 3,
-               .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) |
-                       BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
-                       BIT(17) | BIT(18),
-       }, {
-               .group = 2,
-               .mask = BIT(31),
-       }, {
-               .group = 0,
-               .mask = BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23),
-       },
-};
-
-static const struct sirfsoc_padmux lcd_24bits_padmux = {
-       .muxmask_counts = ARRAY_SIZE(lcd_24bits_muxmask),
-       .muxmask = lcd_24bits_muxmask,
-       .funcmask = BIT(4),
-       .funcval = 0,
-};
-
-static const unsigned lcd_24bits_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
-       105, 106, 107, 108, 109, 110, 111, 112, 113, 114 };
-
-static const struct sirfsoc_muxmask lcdrom_muxmask[] = {
-       {
-               .group = 3,
-               .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) |
-                       BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
-                       BIT(17) | BIT(18),
-       }, {
-               .group = 2,
-               .mask = BIT(31),
-       }, {
-               .group = 0,
-               .mask = BIT(23),
-       },
-};
-
-static const struct sirfsoc_padmux lcdrom_padmux = {
-       .muxmask_counts = ARRAY_SIZE(lcdrom_muxmask),
-       .muxmask = lcdrom_muxmask,
-       .funcmask = BIT(4),
-       .funcval = BIT(4),
-};
-
-static const unsigned lcdrom_pins[] = { 23, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
-       105, 106, 107, 108, 109, 110, 111, 112, 113, 114 };
-
-static const struct sirfsoc_muxmask uart0_muxmask[] = {
-       {
-               .group = 2,
-               .mask = BIT(4) | BIT(5),
-       }, {
-               .group = 1,
-               .mask = BIT(23) | BIT(28),
-       },
-};
-
-static const struct sirfsoc_padmux uart0_padmux = {
-       .muxmask_counts = ARRAY_SIZE(uart0_muxmask),
-       .muxmask = uart0_muxmask,
-       .funcmask = BIT(9),
-       .funcval = BIT(9),
-};
-
-static const unsigned uart0_pins[] = { 55, 60, 68, 69 };
-
-static const struct sirfsoc_muxmask uart0_nostreamctrl_muxmask[] = {
-       {
-               .group = 2,
-               .mask = BIT(4) | BIT(5),
-       },
-};
-
-static const struct sirfsoc_padmux uart0_nostreamctrl_padmux = {
-       .muxmask_counts = ARRAY_SIZE(uart0_nostreamctrl_muxmask),
-       .muxmask = uart0_nostreamctrl_muxmask,
-};
-
-static const unsigned uart0_nostreamctrl_pins[] = { 68, 39 };
-
-static const struct sirfsoc_muxmask uart1_muxmask[] = {
-       {
-               .group = 1,
-               .mask = BIT(15) | BIT(17),
-       },
-};
-
-static const struct sirfsoc_padmux uart1_padmux = {
-       .muxmask_counts = ARRAY_SIZE(uart1_muxmask),
-       .muxmask = uart1_muxmask,
-};
-
-static const unsigned uart1_pins[] = { 47, 49 };
-
-static const struct sirfsoc_muxmask uart2_muxmask[] = {
-       {
-               .group = 1,
-               .mask = BIT(16) | BIT(18) | BIT(24) | BIT(27),
-       },
-};
-
-static const struct sirfsoc_padmux uart2_padmux = {
-       .muxmask_counts = ARRAY_SIZE(uart2_muxmask),
-       .muxmask = uart2_muxmask,
-       .funcmask = BIT(10),
-       .funcval = BIT(10),
-};
-
-static const unsigned uart2_pins[] = { 48, 50, 56, 59 };
-
-static const struct sirfsoc_muxmask uart2_nostreamctrl_muxmask[] = {
-       {
-               .group = 1,
-               .mask = BIT(16) | BIT(18),
-       },
-};
-
-static const struct sirfsoc_padmux uart2_nostreamctrl_padmux = {
-       .muxmask_counts = ARRAY_SIZE(uart2_nostreamctrl_muxmask),
-       .muxmask = uart2_nostreamctrl_muxmask,
-};
-
-static const unsigned uart2_nostreamctrl_pins[] = { 48, 50 };
-
-static const struct sirfsoc_muxmask sdmmc3_muxmask[] = {
-       {
-               .group = 0,
-               .mask = BIT(30) | BIT(31),
-       }, {
-               .group = 1,
-               .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
-       },
-};
-
-static const struct sirfsoc_padmux sdmmc3_padmux = {
-       .muxmask_counts = ARRAY_SIZE(sdmmc3_muxmask),
-       .muxmask = sdmmc3_muxmask,
-       .funcmask = BIT(7),
-       .funcval = 0,
-};
-
-static const unsigned sdmmc3_pins[] = { 30, 31, 32, 33, 34, 35 };
-
-static const struct sirfsoc_muxmask spi0_muxmask[] = {
-       {
-               .group = 1,
-               .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
-       },
-};
-
-static const struct sirfsoc_padmux spi0_padmux = {
-       .muxmask_counts = ARRAY_SIZE(spi0_muxmask),
-       .muxmask = spi0_muxmask,
-       .funcmask = BIT(7),
-       .funcval = BIT(7),
-};
-
-static const unsigned spi0_pins[] = { 32, 33, 34, 35 };
-
-static const struct sirfsoc_muxmask sdmmc4_muxmask[] = {
-       {
-               .group = 1,
-               .mask = BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9),
-       },
-};
-
-static const struct sirfsoc_padmux sdmmc4_padmux = {
-       .muxmask_counts = ARRAY_SIZE(sdmmc4_muxmask),
-       .muxmask = sdmmc4_muxmask,
-};
-
-static const unsigned sdmmc4_pins[] = { 36, 37, 38, 39, 40, 41 };
-
-static const struct sirfsoc_muxmask cko1_muxmask[] = {
-       {
-               .group = 1,
-               .mask = BIT(10),
-       },
-};
-
-static const struct sirfsoc_padmux cko1_padmux = {
-       .muxmask_counts = ARRAY_SIZE(cko1_muxmask),
-       .muxmask = cko1_muxmask,
-       .funcmask = BIT(3),
-       .funcval = 0,
-};
-
-static const unsigned cko1_pins[] = { 42 };
-
-static const struct sirfsoc_muxmask i2s_muxmask[] = {
-       {
-               .group = 1,
-               .mask =
-                       BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(19)
-                               | BIT(23) | BIT(28),
-       },
-};
-
-static const struct sirfsoc_padmux i2s_padmux = {
-       .muxmask_counts = ARRAY_SIZE(i2s_muxmask),
-       .muxmask = i2s_muxmask,
-       .funcmask = BIT(3) | BIT(9),
-       .funcval = BIT(3),
-};
-
-static const unsigned i2s_pins[] = { 42, 43, 44, 45, 46, 51, 55, 60 };
-
-static const struct sirfsoc_muxmask ac97_muxmask[] = {
-       {
-               .group = 1,
-               .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14),
-       },
-};
-
-static const struct sirfsoc_padmux ac97_padmux = {
-       .muxmask_counts = ARRAY_SIZE(ac97_muxmask),
-       .muxmask = ac97_muxmask,
-       .funcmask = BIT(8),
-       .funcval = 0,
-};
-
-static const unsigned ac97_pins[] = { 33, 34, 35, 36 };
-
-static const struct sirfsoc_muxmask spi1_muxmask[] = {
-       {
-               .group = 1,
-               .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14),
-       },
-};
-
-static const struct sirfsoc_padmux spi1_padmux = {
-       .muxmask_counts = ARRAY_SIZE(spi1_muxmask),
-       .muxmask = spi1_muxmask,
-       .funcmask = BIT(8),
-       .funcval = BIT(8),
-};
-
-static const unsigned spi1_pins[] = { 33, 34, 35, 36 };
-
-static const struct sirfsoc_muxmask sdmmc1_muxmask[] = {
-       {
-               .group = 0,
-               .mask = BIT(27) | BIT(28) | BIT(29),
-       },
-};
-
-static const struct sirfsoc_padmux sdmmc1_padmux = {
-       .muxmask_counts = ARRAY_SIZE(sdmmc1_muxmask),
-       .muxmask = sdmmc1_muxmask,
-};
-
-static const unsigned sdmmc1_pins[] = { 27, 28, 29 };
-
-static const struct sirfsoc_muxmask gps_muxmask[] = {
-       {
-               .group = 0,
-               .mask = BIT(24) | BIT(25) | BIT(26),
-       },
-};
-
-static const struct sirfsoc_padmux gps_padmux = {
-       .muxmask_counts = ARRAY_SIZE(gps_muxmask),
-       .muxmask = gps_muxmask,
-       .funcmask = BIT(12) | BIT(13) | BIT(14),
-       .funcval = BIT(12),
-};
-
-static const unsigned gps_pins[] = { 24, 25, 26 };
-
-static const struct sirfsoc_muxmask sdmmc5_muxmask[] = {
-       {
-               .group = 0,
-               .mask = BIT(24) | BIT(25) | BIT(26),
-       }, {
-               .group = 1,
-               .mask = BIT(29),
-       }, {
-               .group = 2,
-               .mask = BIT(0) | BIT(1),
-       },
-};
-
-static const struct sirfsoc_padmux sdmmc5_padmux = {
-       .muxmask_counts = ARRAY_SIZE(sdmmc5_muxmask),
-       .muxmask = sdmmc5_muxmask,
-       .funcmask = BIT(13) | BIT(14),
-       .funcval = BIT(13) | BIT(14),
-};
-
-static const unsigned sdmmc5_pins[] = { 24, 25, 26, 61, 64, 65 };
-
-static const struct sirfsoc_muxmask usp0_muxmask[] = {
-       {
-               .group = 1,
-               .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23),
-       },
-};
-
-static const struct sirfsoc_padmux usp0_padmux = {
-       .muxmask_counts = ARRAY_SIZE(usp0_muxmask),
-       .muxmask = usp0_muxmask,
-       .funcmask = BIT(1) | BIT(2) | BIT(6) | BIT(9),
-       .funcval = 0,
-};
-
-static const unsigned usp0_pins[] = { 51, 52, 53, 54, 55 };
-
-static const struct sirfsoc_muxmask usp1_muxmask[] = {
-       {
-               .group = 1,
-               .mask = BIT(24) | BIT(25) | BIT(26) | BIT(27) | BIT(28),
-       },
-};
-
-static const struct sirfsoc_padmux usp1_padmux = {
-       .muxmask_counts = ARRAY_SIZE(usp1_muxmask),
-       .muxmask = usp1_muxmask,
-       .funcmask = BIT(1) | BIT(9) | BIT(10) | BIT(11),
-       .funcval = 0,
-};
-
-static const unsigned usp1_pins[] = { 56, 57, 58, 59, 60 };
-
-static const struct sirfsoc_muxmask usp2_muxmask[] = {
-       {
-               .group = 1,
-               .mask = BIT(29) | BIT(30) | BIT(31),
-       }, {
-               .group = 2,
-               .mask = BIT(0) | BIT(1),
-       },
-};
-
-static const struct sirfsoc_padmux usp2_padmux = {
-       .muxmask_counts = ARRAY_SIZE(usp2_muxmask),
-       .muxmask = usp2_muxmask,
-       .funcmask = BIT(13) | BIT(14),
-       .funcval = 0,
-};
-
-static const unsigned usp2_pins[] = { 61, 62, 63, 64, 65 };
-
-static const struct sirfsoc_muxmask nand_muxmask[] = {
-       {
-               .group = 2,
-               .mask = BIT(2) | BIT(3) | BIT(28) | BIT(29) | BIT(30),
-       },
-};
-
-static const struct sirfsoc_padmux nand_padmux = {
-       .muxmask_counts = ARRAY_SIZE(nand_muxmask),
-       .muxmask = nand_muxmask,
-       .funcmask = BIT(5),
-       .funcval = 0,
-};
-
-static const unsigned nand_pins[] = { 64, 65, 92, 93, 94 };
-
-static const struct sirfsoc_padmux sdmmc0_padmux = {
-       .muxmask_counts = 0,
-       .funcmask = BIT(5),
-       .funcval = 0,
-};
-
-static const unsigned sdmmc0_pins[] = { };
-
-static const struct sirfsoc_muxmask sdmmc2_muxmask[] = {
-       {
-               .group = 2,
-               .mask = BIT(2) | BIT(3),
-       },
-};
-
-static const struct sirfsoc_padmux sdmmc2_padmux = {
-       .muxmask_counts = ARRAY_SIZE(sdmmc2_muxmask),
-       .muxmask = sdmmc2_muxmask,
-       .funcmask = BIT(5),
-       .funcval = BIT(5),
-};
-
-static const unsigned sdmmc2_pins[] = { 66, 67 };
-
-static const struct sirfsoc_muxmask cko0_muxmask[] = {
-       {
-               .group = 2,
-               .mask = BIT(14),
-       },
-};
-
-static const struct sirfsoc_padmux cko0_padmux = {
-       .muxmask_counts = ARRAY_SIZE(cko0_muxmask),
-       .muxmask = cko0_muxmask,
-};
-
-static const unsigned cko0_pins[] = { 78 };
-
-static const struct sirfsoc_muxmask vip_muxmask[] = {
-       {
-               .group = 2,
-               .mask = BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19)
-                       | BIT(20) | BIT(21) | BIT(22) | BIT(23) | BIT(24) |
-                       BIT(25),
-       },
-};
-
-static const struct sirfsoc_padmux vip_padmux = {
-       .muxmask_counts = ARRAY_SIZE(vip_muxmask),
-       .muxmask = vip_muxmask,
-       .funcmask = BIT(0),
-       .funcval = 0,
-};
-
-static const unsigned vip_pins[] = { 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89 };
-
-static const struct sirfsoc_muxmask i2c0_muxmask[] = {
-       {
-               .group = 2,
-               .mask = BIT(26) | BIT(27),
-       },
-};
-
-static const struct sirfsoc_padmux i2c0_padmux = {
-       .muxmask_counts = ARRAY_SIZE(i2c0_muxmask),
-       .muxmask = i2c0_muxmask,
-};
-
-static const unsigned i2c0_pins[] = { 90, 91 };
-
-static const struct sirfsoc_muxmask i2c1_muxmask[] = {
-       {
-               .group = 0,
-               .mask = BIT(13) | BIT(15),
-       },
-};
-
-static const struct sirfsoc_padmux i2c1_padmux = {
-       .muxmask_counts = ARRAY_SIZE(i2c1_muxmask),
-       .muxmask = i2c1_muxmask,
-};
-
-static const unsigned i2c1_pins[] = { 13, 15 };
-
-static const struct sirfsoc_muxmask viprom_muxmask[] = {
-       {
-               .group = 2,
-               .mask = BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19)
-                       | BIT(20) | BIT(21) | BIT(22) | BIT(23) | BIT(24) |
-                       BIT(25),
-       }, {
-               .group = 0,
-               .mask = BIT(12),
-       },
-};
-
-static const struct sirfsoc_padmux viprom_padmux = {
-       .muxmask_counts = ARRAY_SIZE(viprom_muxmask),
-       .muxmask = viprom_muxmask,
-       .funcmask = BIT(0),
-       .funcval = BIT(0),
-};
-
-static const unsigned viprom_pins[] = { 12, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89 };
-
-static const struct sirfsoc_muxmask pwm0_muxmask[] = {
-       {
-               .group = 0,
-               .mask = BIT(4),
-       },
-};
-
-static const struct sirfsoc_padmux pwm0_padmux = {
-       .muxmask_counts = ARRAY_SIZE(pwm0_muxmask),
-       .muxmask = pwm0_muxmask,
-       .funcmask = BIT(12),
-       .funcval = 0,
-};
-
-static const unsigned pwm0_pins[] = { 4 };
-
-static const struct sirfsoc_muxmask pwm1_muxmask[] = {
-       {
-               .group = 0,
-               .mask = BIT(5),
-       },
-};
-
-static const struct sirfsoc_padmux pwm1_padmux = {
-       .muxmask_counts = ARRAY_SIZE(pwm1_muxmask),
-       .muxmask = pwm1_muxmask,
-};
-
-static const unsigned pwm1_pins[] = { 5 };
-
-static const struct sirfsoc_muxmask pwm2_muxmask[] = {
-       {
-               .group = 0,
-               .mask = BIT(6),
-       },
-};
-
-static const struct sirfsoc_padmux pwm2_padmux = {
-       .muxmask_counts = ARRAY_SIZE(pwm2_muxmask),
-       .muxmask = pwm2_muxmask,
-};
-
-static const unsigned pwm2_pins[] = { 6 };
-
-static const struct sirfsoc_muxmask pwm3_muxmask[] = {
-       {
-               .group = 0,
-               .mask = BIT(7),
-       },
-};
-
-static const struct sirfsoc_padmux pwm3_padmux = {
-       .muxmask_counts = ARRAY_SIZE(pwm3_muxmask),
-       .muxmask = pwm3_muxmask,
-};
-
-static const unsigned pwm3_pins[] = { 7 };
-
-static const struct sirfsoc_muxmask warm_rst_muxmask[] = {
-       {
-               .group = 0,
-               .mask = BIT(8),
-       },
-};
-
-static const struct sirfsoc_padmux warm_rst_padmux = {
-       .muxmask_counts = ARRAY_SIZE(warm_rst_muxmask),
-       .muxmask = warm_rst_muxmask,
-};
-
-static const unsigned warm_rst_pins[] = { 8 };
-
-static const struct sirfsoc_muxmask usb0_utmi_drvbus_muxmask[] = {
-       {
-               .group = 1,
-               .mask = BIT(22),
-       },
-};
-static const struct sirfsoc_padmux usb0_utmi_drvbus_padmux = {
-       .muxmask_counts = ARRAY_SIZE(usb0_utmi_drvbus_muxmask),
-       .muxmask = usb0_utmi_drvbus_muxmask,
-       .funcmask = BIT(6),
-       .funcval = BIT(6), /* refer to PAD_UTMI_DRVVBUS0_ENABLE */
-};
-
-static const unsigned usb0_utmi_drvbus_pins[] = { 54 };
-
-static const struct sirfsoc_muxmask usb1_utmi_drvbus_muxmask[] = {
-       {
-               .group = 1,
-               .mask = BIT(27),
-       },
-};
-
-static const struct sirfsoc_padmux usb1_utmi_drvbus_padmux = {
-       .muxmask_counts = ARRAY_SIZE(usb1_utmi_drvbus_muxmask),
-       .muxmask = usb1_utmi_drvbus_muxmask,
-       .funcmask = BIT(11),
-       .funcval = BIT(11), /* refer to PAD_UTMI_DRVVBUS1_ENABLE */
-};
-
-static const unsigned usb1_utmi_drvbus_pins[] = { 59 };
-
-static const struct sirfsoc_muxmask pulse_count_muxmask[] = {
-       {
-               .group = 0,
-               .mask = BIT(9) | BIT(10) | BIT(11),
-       },
-};
-
-static const struct sirfsoc_padmux pulse_count_padmux = {
-       .muxmask_counts = ARRAY_SIZE(pulse_count_muxmask),
-       .muxmask = pulse_count_muxmask,
-};
-
-static const unsigned pulse_count_pins[] = { 9, 10, 11 };
-
-#define SIRFSOC_PIN_GROUP(n, p)  \
-       {                       \
-               .name = n,      \
-               .pins = p,      \
-               .num_pins = ARRAY_SIZE(p),      \
-       }
-
-static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
-       SIRFSOC_PIN_GROUP("lcd_16bitsgrp", lcd_16bits_pins),
-       SIRFSOC_PIN_GROUP("lcd_18bitsgrp", lcd_18bits_pins),
-       SIRFSOC_PIN_GROUP("lcd_24bitsgrp", lcd_24bits_pins),
-       SIRFSOC_PIN_GROUP("lcdrom_grp", lcdrom_pins),
-       SIRFSOC_PIN_GROUP("uart0grp", uart0_pins),
-       SIRFSOC_PIN_GROUP("uart1grp", uart1_pins),
-       SIRFSOC_PIN_GROUP("uart2grp", uart2_pins),
-       SIRFSOC_PIN_GROUP("uart2_nostreamctrlgrp", uart2_nostreamctrl_pins),
-       SIRFSOC_PIN_GROUP("usp0grp", usp0_pins),
-       SIRFSOC_PIN_GROUP("usp1grp", usp1_pins),
-       SIRFSOC_PIN_GROUP("usp2grp", usp2_pins),
-       SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins),
-       SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins),
-       SIRFSOC_PIN_GROUP("pwm0grp", pwm0_pins),
-       SIRFSOC_PIN_GROUP("pwm1grp", pwm1_pins),
-       SIRFSOC_PIN_GROUP("pwm2grp", pwm2_pins),
-       SIRFSOC_PIN_GROUP("pwm3grp", pwm3_pins),
-       SIRFSOC_PIN_GROUP("vipgrp", vip_pins),
-       SIRFSOC_PIN_GROUP("vipromgrp", viprom_pins),
-       SIRFSOC_PIN_GROUP("warm_rstgrp", warm_rst_pins),
-       SIRFSOC_PIN_GROUP("cko0_rstgrp", cko0_pins),
-       SIRFSOC_PIN_GROUP("cko1_rstgrp", cko1_pins),
-       SIRFSOC_PIN_GROUP("sdmmc0grp", sdmmc0_pins),
-       SIRFSOC_PIN_GROUP("sdmmc1grp", sdmmc1_pins),
-       SIRFSOC_PIN_GROUP("sdmmc2grp", sdmmc2_pins),
-       SIRFSOC_PIN_GROUP("sdmmc3grp", sdmmc3_pins),
-       SIRFSOC_PIN_GROUP("sdmmc4grp", sdmmc4_pins),
-       SIRFSOC_PIN_GROUP("sdmmc5grp", sdmmc5_pins),
-       SIRFSOC_PIN_GROUP("usb0_utmi_drvbusgrp", usb0_utmi_drvbus_pins),
-       SIRFSOC_PIN_GROUP("usb1_utmi_drvbusgrp", usb1_utmi_drvbus_pins),
-       SIRFSOC_PIN_GROUP("pulse_countgrp", pulse_count_pins),
-       SIRFSOC_PIN_GROUP("i2sgrp", i2s_pins),
-       SIRFSOC_PIN_GROUP("ac97grp", ac97_pins),
-       SIRFSOC_PIN_GROUP("nandgrp", nand_pins),
-       SIRFSOC_PIN_GROUP("spi0grp", spi0_pins),
-       SIRFSOC_PIN_GROUP("spi1grp", spi1_pins),
-       SIRFSOC_PIN_GROUP("gpsgrp", gps_pins),
-};
-
-static int sirfsoc_list_groups(struct pinctrl_dev *pctldev, unsigned selector)
-{
-       if (selector >= ARRAY_SIZE(sirfsoc_pin_groups))
-               return -EINVAL;
-       return 0;
-}
-
-static const char *sirfsoc_get_group_name(struct pinctrl_dev *pctldev,
-                                      unsigned selector)
-{
-       if (selector >= ARRAY_SIZE(sirfsoc_pin_groups))
-               return NULL;
-       return sirfsoc_pin_groups[selector].name;
-}
-
-static int sirfsoc_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
-                              const unsigned **pins,
-                              unsigned *num_pins)
-{
-       if (selector >= ARRAY_SIZE(sirfsoc_pin_groups))
-               return -EINVAL;
-       *pins = sirfsoc_pin_groups[selector].pins;
-       *num_pins = sirfsoc_pin_groups[selector].num_pins;
-       return 0;
-}
-
-static void sirfsoc_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
-                  unsigned offset)
-{
-       seq_printf(s, " " DRIVER_NAME);
-}
-
-static struct pinctrl_ops sirfsoc_pctrl_ops = {
-       .list_groups = sirfsoc_list_groups,
-       .get_group_name = sirfsoc_get_group_name,
-       .get_group_pins = sirfsoc_get_group_pins,
-       .pin_dbg_show = sirfsoc_pin_dbg_show,
-};
-
-struct sirfsoc_pmx_func {
-       const char *name;
-       const char * const *groups;
-       const unsigned num_groups;
-       const struct sirfsoc_padmux *padmux;
-};
-
-static const char * const lcd_16bitsgrp[] = { "lcd_16bitsgrp" };
-static const char * const lcd_18bitsgrp[] = { "lcd_18bitsgrp" };
-static const char * const lcd_24bitsgrp[] = { "lcd_24bitsgrp" };
-static const char * const lcdromgrp[] = { "lcdromgrp" };
-static const char * const uart0grp[] = { "uart0grp" };
-static const char * const uart1grp[] = { "uart1grp" };
-static const char * const uart2grp[] = { "uart2grp" };
-static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" };
-static const char * const usp0grp[] = { "usp0grp" };
-static const char * const usp1grp[] = { "usp1grp" };
-static const char * const usp2grp[] = { "usp2grp" };
-static const char * const i2c0grp[] = { "i2c0grp" };
-static const char * const i2c1grp[] = { "i2c1grp" };
-static const char * const pwm0grp[] = { "pwm0grp" };
-static const char * const pwm1grp[] = { "pwm1grp" };
-static const char * const pwm2grp[] = { "pwm2grp" };
-static const char * const pwm3grp[] = { "pwm3grp" };
-static const char * const vipgrp[] = { "vipgrp" };
-static const char * const vipromgrp[] = { "vipromgrp" };
-static const char * const warm_rstgrp[] = { "warm_rstgrp" };
-static const char * const cko0grp[] = { "cko0grp" };
-static const char * const cko1grp[] = { "cko1grp" };
-static const char * const sdmmc0grp[] = { "sdmmc0grp" };
-static const char * const sdmmc1grp[] = { "sdmmc1grp" };
-static const char * const sdmmc2grp[] = { "sdmmc2grp" };
-static const char * const sdmmc3grp[] = { "sdmmc3grp" };
-static const char * const sdmmc4grp[] = { "sdmmc4grp" };
-static const char * const sdmmc5grp[] = { "sdmmc5grp" };
-static const char * const usb0_utmi_drvbusgrp[] = { "usb0_utmi_drvbusgrp" };
-static const char * const usb1_utmi_drvbusgrp[] = { "usb1_utmi_drvbusgrp" };
-static const char * const pulse_countgrp[] = { "pulse_countgrp" };
-static const char * const i2sgrp[] = { "i2sgrp" };
-static const char * const ac97grp[] = { "ac97grp" };
-static const char * const nandgrp[] = { "nandgrp" };
-static const char * const spi0grp[] = { "spi0grp" };
-static const char * const spi1grp[] = { "spi1grp" };
-static const char * const gpsgrp[] = { "gpsgrp" };
-
-#define SIRFSOC_PMX_FUNCTION(n, g, m)          \
-       {                                       \
-               .name = n,                      \
-               .groups = g,                    \
-               .num_groups = ARRAY_SIZE(g),    \
-               .padmux = &m,                   \
-       }
-
-static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
-       SIRFSOC_PMX_FUNCTION("lcd_16bits", lcd_16bitsgrp, lcd_16bits_padmux),
-       SIRFSOC_PMX_FUNCTION("lcd_18bits", lcd_18bitsgrp, lcd_18bits_padmux),
-       SIRFSOC_PMX_FUNCTION("lcd_24bits", lcd_24bitsgrp, lcd_24bits_padmux),
-       SIRFSOC_PMX_FUNCTION("lcdrom", lcdromgrp, lcdrom_padmux),
-       SIRFSOC_PMX_FUNCTION("uart0", uart0grp, uart0_padmux),
-       SIRFSOC_PMX_FUNCTION("uart1", uart1grp, uart1_padmux),
-       SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux),
-       SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux),
-       SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux),
-       SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux),
-       SIRFSOC_PMX_FUNCTION("usp2", usp2grp, usp2_padmux),
-       SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux),
-       SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux),
-       SIRFSOC_PMX_FUNCTION("pwm0", pwm0grp, pwm0_padmux),
-       SIRFSOC_PMX_FUNCTION("pwm1", pwm1grp, pwm1_padmux),
-       SIRFSOC_PMX_FUNCTION("pwm2", pwm2grp, pwm2_padmux),
-       SIRFSOC_PMX_FUNCTION("pwm3", pwm3grp, pwm3_padmux),
-       SIRFSOC_PMX_FUNCTION("vip", vipgrp, vip_padmux),
-       SIRFSOC_PMX_FUNCTION("viprom", vipromgrp, viprom_padmux),
-       SIRFSOC_PMX_FUNCTION("warm_rst", warm_rstgrp, warm_rst_padmux),
-       SIRFSOC_PMX_FUNCTION("cko0", cko0grp, cko0_padmux),
-       SIRFSOC_PMX_FUNCTION("cko1", cko1grp, cko1_padmux),
-       SIRFSOC_PMX_FUNCTION("sdmmc0", sdmmc0grp, sdmmc0_padmux),
-       SIRFSOC_PMX_FUNCTION("sdmmc1", sdmmc1grp, sdmmc1_padmux),
-       SIRFSOC_PMX_FUNCTION("sdmmc2", sdmmc2grp, sdmmc2_padmux),
-       SIRFSOC_PMX_FUNCTION("sdmmc3", sdmmc3grp, sdmmc3_padmux),
-       SIRFSOC_PMX_FUNCTION("sdmmc4", sdmmc4grp, sdmmc4_padmux),
-       SIRFSOC_PMX_FUNCTION("sdmmc5", sdmmc5grp, sdmmc5_padmux),
-       SIRFSOC_PMX_FUNCTION("usb0_utmi_drvbus", usb0_utmi_drvbusgrp, usb0_utmi_drvbus_padmux),
-       SIRFSOC_PMX_FUNCTION("usb1_utmi_drvbus", usb1_utmi_drvbusgrp, usb1_utmi_drvbus_padmux),
-       SIRFSOC_PMX_FUNCTION("pulse_count", pulse_countgrp, pulse_count_padmux),
-       SIRFSOC_PMX_FUNCTION("i2s", i2sgrp, i2s_padmux),
-       SIRFSOC_PMX_FUNCTION("ac97", ac97grp, ac97_padmux),
-       SIRFSOC_PMX_FUNCTION("nand", nandgrp, nand_padmux),
-       SIRFSOC_PMX_FUNCTION("spi0", spi0grp, spi0_padmux),
-       SIRFSOC_PMX_FUNCTION("spi1", spi1grp, spi1_padmux),
-       SIRFSOC_PMX_FUNCTION("gps", gpsgrp, gps_padmux),
-};
-
-static void sirfsoc_pinmux_endisable(struct sirfsoc_pmx *spmx, unsigned selector,
-       bool enable)
-{
-       int i;
-       const struct sirfsoc_padmux *mux = sirfsoc_pmx_functions[selector].padmux;
-       const struct sirfsoc_muxmask *mask = mux->muxmask;
-
-       for (i = 0; i < mux->muxmask_counts; i++) {
-               u32 muxval;
-               muxval = readl(spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(mask[i].group));
-               if (enable)
-                       muxval = muxval & ~mask[i].mask;
-               else
-                       muxval = muxval | mask[i].mask;
-               writel(muxval, spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(mask[i].group));
-       }
-
-       if (mux->funcmask && enable) {
-               u32 func_en_val;
-               func_en_val =
-                       readl(spmx->rsc_virtbase + SIRFSOC_RSC_PIN_MUX);
-               func_en_val =
-                       (func_en_val & ~mux->funcmask) | (mux->
-                               funcval);
-               writel(func_en_val, spmx->rsc_virtbase + SIRFSOC_RSC_PIN_MUX);
-       }
-}
-
-static int sirfsoc_pinmux_enable(struct pinctrl_dev *pmxdev, unsigned selector,
-       unsigned group)
-{
-       struct sirfsoc_pmx *spmx;
-
-       spmx = pinctrl_dev_get_drvdata(pmxdev);
-       sirfsoc_pinmux_endisable(spmx, selector, true);
-
-       return 0;
-}
-
-static void sirfsoc_pinmux_disable(struct pinctrl_dev *pmxdev, unsigned selector,
-       unsigned group)
-{
-       struct sirfsoc_pmx *spmx;
-
-       spmx = pinctrl_dev_get_drvdata(pmxdev);
-       sirfsoc_pinmux_endisable(spmx, selector, false);
-}
-
-static int sirfsoc_pinmux_list_funcs(struct pinctrl_dev *pmxdev, unsigned selector)
-{
-       if (selector >= ARRAY_SIZE(sirfsoc_pmx_functions))
-               return -EINVAL;
-       return 0;
-}
-
-static const char *sirfsoc_pinmux_get_func_name(struct pinctrl_dev *pctldev,
-                                         unsigned selector)
-{
-       return sirfsoc_pmx_functions[selector].name;
-}
-
-static int sirfsoc_pinmux_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
-                              const char * const **groups,
-                              unsigned * const num_groups)
-{
-       *groups = sirfsoc_pmx_functions[selector].groups;
-       *num_groups = sirfsoc_pmx_functions[selector].num_groups;
-       return 0;
-}
-
-static int sirfsoc_pinmux_request_gpio(struct pinctrl_dev *pmxdev,
-       struct pinctrl_gpio_range *range, unsigned offset)
-{
-       struct sirfsoc_pmx *spmx;
-
-       int group = range->id;
-
-       u32 muxval;
-
-       spmx = pinctrl_dev_get_drvdata(pmxdev);
-
-       muxval = readl(spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group));
-       muxval = muxval | (1 << offset);
-       writel(muxval, spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group));
-
-       return 0;
-}
-
-static struct pinmux_ops sirfsoc_pinmux_ops = {
-       .list_functions = sirfsoc_pinmux_list_funcs,
-       .enable = sirfsoc_pinmux_enable,
-       .disable = sirfsoc_pinmux_disable,
-       .get_function_name = sirfsoc_pinmux_get_func_name,
-       .get_function_groups = sirfsoc_pinmux_get_groups,
-       .gpio_request_enable = sirfsoc_pinmux_request_gpio,
-};
-
-static struct pinctrl_desc sirfsoc_pinmux_desc = {
-       .name = DRIVER_NAME,
-       .pins = sirfsoc_pads,
-       .npins = ARRAY_SIZE(sirfsoc_pads),
-       .maxpin = SIRFSOC_NUM_PADS - 1,
-       .pctlops = &sirfsoc_pctrl_ops,
-       .pmxops = &sirfsoc_pinmux_ops,
-       .owner = THIS_MODULE,
-};
-
-/*
- * Todo: bind irq_chip to every pinctrl_gpio_range
- */
-static struct pinctrl_gpio_range sirfsoc_gpio_ranges[] = {
-       {
-               .name = "sirfsoc-gpio*",
-               .id = 0,
-               .base = 0,
-               .npins = 32,
-       }, {
-               .name = "sirfsoc-gpio*",
-               .id = 1,
-               .base = 32,
-               .npins = 32,
-       }, {
-               .name = "sirfsoc-gpio*",
-               .id = 2,
-               .base = 64,
-               .npins = 32,
-       }, {
-               .name = "sirfsoc-gpio*",
-               .id = 3,
-               .base = 96,
-               .npins = 19,
-       },
-};
-
-static void __iomem *sirfsoc_rsc_of_iomap(void)
-{
-       const struct of_device_id rsc_ids[]  = {
-               { .compatible = "sirf,prima2-rsc" },
-               {}
-       };
-       struct device_node *np;
-
-       np = of_find_matching_node(NULL, rsc_ids);
-       if (!np)
-               panic("unable to find compatible rsc node in dtb\n");
-
-       return of_iomap(np, 0);
-}
-
-static int __devinit sirfsoc_pinmux_probe(struct platform_device *pdev)
-{
-       int ret;
-       struct sirfsoc_pmx *spmx;
-       struct device_node *np = pdev->dev.of_node;
-       int i;
-
-       /* Create state holders etc for this driver */
-       spmx = devm_kzalloc(&pdev->dev, sizeof(*spmx), GFP_KERNEL);
-       if (!spmx)
-               return -ENOMEM;
-
-       spmx->dev = &pdev->dev;
-
-       platform_set_drvdata(pdev, spmx);
-
-       spmx->gpio_virtbase = of_iomap(np, 0);
-       if (!spmx->gpio_virtbase) {
-               ret = -ENOMEM;
-               dev_err(&pdev->dev, "can't map gpio registers\n");
-               goto out_no_gpio_remap;
-       }
-
-       spmx->rsc_virtbase = sirfsoc_rsc_of_iomap();
-       if (!spmx->rsc_virtbase) {
-               ret = -ENOMEM;
-               dev_err(&pdev->dev, "can't map rsc registers\n");
-               goto out_no_rsc_remap;
-       }
-
-       /* Now register the pin controller and all pins it handles */
-       spmx->pmx = pinctrl_register(&sirfsoc_pinmux_desc, &pdev->dev, spmx);
-       if (!spmx->pmx) {
-               dev_err(&pdev->dev, "could not register SIRFSOC pinmux driver\n");
-               ret = -EINVAL;
-               goto out_no_pmx;
-       }
-
-       for (i = 0; i < ARRAY_SIZE(sirfsoc_gpio_ranges); i++)
-               pinctrl_add_gpio_range(spmx->pmx, &sirfsoc_gpio_ranges[i]);
-
-       dev_info(&pdev->dev, "initialized SIRFSOC pinmux driver\n");
-
-       return 0;
-
-out_no_pmx:
-       iounmap(spmx->rsc_virtbase);
-out_no_rsc_remap:
-       iounmap(spmx->gpio_virtbase);
-out_no_gpio_remap:
-       platform_set_drvdata(pdev, NULL);
-       devm_kfree(&pdev->dev, spmx);
-       return ret;
-}
-
-static const struct of_device_id pinmux_ids[]  = {
-       { .compatible = "sirf,prima2-gpio-pinmux" },
-       {}
-};
-
-static struct platform_driver sirfsoc_pinmux_driver = {
-       .driver = {
-               .name = DRIVER_NAME,
-               .owner = THIS_MODULE,
-               .of_match_table = pinmux_ids,
-       },
-       .probe = sirfsoc_pinmux_probe,
-};
-
-static int __init sirfsoc_pinmux_init(void)
-{
-       return platform_driver_register(&sirfsoc_pinmux_driver);
-}
-arch_initcall(sirfsoc_pinmux_init);
-
-MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
-       "Barry Song <baohua.song@csr.com>");
-MODULE_DESCRIPTION("SIRFSOC pin control driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinmux-u300.c b/drivers/pinctrl/pinmux-u300.c
deleted file mode 100644 (file)
index 4858a64..0000000
+++ /dev/null
@@ -1,1135 +0,0 @@
-/*
- * Driver for the U300 pin controller
- *
- * Based on the original U300 padmux functions
- * Copyright (C) 2009-2011 ST-Ericsson AB
- * Author: Martin Persson <martin.persson@stericsson.com>
- * Author: Linus Walleij <linus.walleij@linaro.org>
- *
- * The DB3350 design and control registers are oriented around pads rather than
- * pins, so we enumerate the pads we can mux rather than actual pins. The pads
- * are connected to different pins in different packaging types, so it would
- * be confusing.
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
-
-/*
- * Register definitions for the U300 Padmux control registers in the
- * system controller
- */
-
-/* PAD MUX Control register 1 (LOW) 16bit (R/W) */
-#define U300_SYSCON_PMC1LR                                     0x007C
-#define U300_SYSCON_PMC1LR_MASK                                        0xFFFF
-#define U300_SYSCON_PMC1LR_CDI_MASK                            0xC000
-#define U300_SYSCON_PMC1LR_CDI_CDI                             0x0000
-#define U300_SYSCON_PMC1LR_CDI_EMIF                            0x4000
-/* For BS335 */
-#define U300_SYSCON_PMC1LR_CDI_CDI2                            0x8000
-#define U300_SYSCON_PMC1LR_CDI_WCDMA_APP_GPIO                  0xC000
-/* For BS365 */
-#define U300_SYSCON_PMC1LR_CDI_GPIO                            0x8000
-#define U300_SYSCON_PMC1LR_CDI_WCDMA                           0xC000
-/* Common defs */
-#define U300_SYSCON_PMC1LR_PDI_MASK                            0x3000
-#define U300_SYSCON_PMC1LR_PDI_PDI                             0x0000
-#define U300_SYSCON_PMC1LR_PDI_EGG                             0x1000
-#define U300_SYSCON_PMC1LR_PDI_WCDMA                           0x3000
-#define U300_SYSCON_PMC1LR_MMCSD_MASK                          0x0C00
-#define U300_SYSCON_PMC1LR_MMCSD_MMCSD                         0x0000
-#define U300_SYSCON_PMC1LR_MMCSD_MSPRO                         0x0400
-#define U300_SYSCON_PMC1LR_MMCSD_DSP                           0x0800
-#define U300_SYSCON_PMC1LR_MMCSD_WCDMA                         0x0C00
-#define U300_SYSCON_PMC1LR_ETM_MASK                            0x0300
-#define U300_SYSCON_PMC1LR_ETM_ACC                             0x0000
-#define U300_SYSCON_PMC1LR_ETM_APP                             0x0100
-#define U300_SYSCON_PMC1LR_EMIF_1_CS2_MASK                     0x00C0
-#define U300_SYSCON_PMC1LR_EMIF_1_CS2_STATIC                   0x0000
-#define U300_SYSCON_PMC1LR_EMIF_1_CS2_NFIF                     0x0040
-#define U300_SYSCON_PMC1LR_EMIF_1_CS2_SDRAM                    0x0080
-#define U300_SYSCON_PMC1LR_EMIF_1_CS2_STATIC_2GB               0x00C0
-#define U300_SYSCON_PMC1LR_EMIF_1_CS1_MASK                     0x0030
-#define U300_SYSCON_PMC1LR_EMIF_1_CS1_STATIC                   0x0000
-#define U300_SYSCON_PMC1LR_EMIF_1_CS1_NFIF                     0x0010
-#define U300_SYSCON_PMC1LR_EMIF_1_CS1_SDRAM                    0x0020
-#define U300_SYSCON_PMC1LR_EMIF_1_CS1_SEMI                     0x0030
-#define U300_SYSCON_PMC1LR_EMIF_1_CS0_MASK                     0x000C
-#define U300_SYSCON_PMC1LR_EMIF_1_CS0_STATIC                   0x0000
-#define U300_SYSCON_PMC1LR_EMIF_1_CS0_NFIF                     0x0004
-#define U300_SYSCON_PMC1LR_EMIF_1_CS0_SDRAM                    0x0008
-#define U300_SYSCON_PMC1LR_EMIF_1_CS0_SEMI                     0x000C
-#define U300_SYSCON_PMC1LR_EMIF_1_MASK                         0x0003
-#define U300_SYSCON_PMC1LR_EMIF_1_STATIC                       0x0000
-#define U300_SYSCON_PMC1LR_EMIF_1_SDRAM0                       0x0001
-#define U300_SYSCON_PMC1LR_EMIF_1_SDRAM1                       0x0002
-#define U300_SYSCON_PMC1LR_EMIF_1                              0x0003
-/* PAD MUX Control register 2 (HIGH) 16bit (R/W) */
-#define U300_SYSCON_PMC1HR                                     0x007E
-#define U300_SYSCON_PMC1HR_MASK                                        0xFFFF
-#define U300_SYSCON_PMC1HR_MISC_2_MASK                         0xC000
-#define U300_SYSCON_PMC1HR_MISC_2_APP_GPIO                     0x0000
-#define U300_SYSCON_PMC1HR_MISC_2_MSPRO                                0x4000
-#define U300_SYSCON_PMC1HR_MISC_2_DSP                          0x8000
-#define U300_SYSCON_PMC1HR_MISC_2_AAIF                         0xC000
-#define U300_SYSCON_PMC1HR_APP_GPIO_2_MASK                     0x3000
-#define U300_SYSCON_PMC1HR_APP_GPIO_2_APP_GPIO                 0x0000
-#define U300_SYSCON_PMC1HR_APP_GPIO_2_NFIF                     0x1000
-#define U300_SYSCON_PMC1HR_APP_GPIO_2_DSP                      0x2000
-#define U300_SYSCON_PMC1HR_APP_GPIO_2_AAIF                     0x3000
-#define U300_SYSCON_PMC1HR_APP_GPIO_1_MASK                     0x0C00
-#define U300_SYSCON_PMC1HR_APP_GPIO_1_APP_GPIO                 0x0000
-#define U300_SYSCON_PMC1HR_APP_GPIO_1_MMC                      0x0400
-#define U300_SYSCON_PMC1HR_APP_GPIO_1_DSP                      0x0800
-#define U300_SYSCON_PMC1HR_APP_GPIO_1_AAIF                     0x0C00
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_MASK                   0x0300
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_APP_GPIO               0x0000
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_SPI                    0x0100
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_2_AAIF                   0x0300
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_MASK                   0x00C0
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_APP_GPIO               0x0000
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_SPI                    0x0040
-#define U300_SYSCON_PMC1HR_APP_SPI_CS_1_AAIF                   0x00C0
-#define U300_SYSCON_PMC1HR_APP_SPI_2_MASK                      0x0030
-#define U300_SYSCON_PMC1HR_APP_SPI_2_APP_GPIO                  0x0000
-#define U300_SYSCON_PMC1HR_APP_SPI_2_SPI                       0x0010
-#define U300_SYSCON_PMC1HR_APP_SPI_2_DSP                       0x0020
-#define U300_SYSCON_PMC1HR_APP_SPI_2_AAIF                      0x0030
-#define U300_SYSCON_PMC1HR_APP_UART0_2_MASK                    0x000C
-#define U300_SYSCON_PMC1HR_APP_UART0_2_APP_GPIO                        0x0000
-#define U300_SYSCON_PMC1HR_APP_UART0_2_UART0                   0x0004
-#define U300_SYSCON_PMC1HR_APP_UART0_2_NFIF_CS                 0x0008
-#define U300_SYSCON_PMC1HR_APP_UART0_2_AAIF                    0x000C
-#define U300_SYSCON_PMC1HR_APP_UART0_1_MASK                    0x0003
-#define U300_SYSCON_PMC1HR_APP_UART0_1_APP_GPIO                        0x0000
-#define U300_SYSCON_PMC1HR_APP_UART0_1_UART0                   0x0001
-#define U300_SYSCON_PMC1HR_APP_UART0_1_AAIF                    0x0003
-/* Padmux 2 control */
-#define U300_SYSCON_PMC2R                                      0x100
-#define U300_SYSCON_PMC2R_APP_MISC_0_MASK                      0x00C0
-#define U300_SYSCON_PMC2R_APP_MISC_0_APP_GPIO                  0x0000
-#define U300_SYSCON_PMC2R_APP_MISC_0_EMIF_SDRAM                        0x0040
-#define U300_SYSCON_PMC2R_APP_MISC_0_MMC                       0x0080
-#define U300_SYSCON_PMC2R_APP_MISC_0_CDI2                      0x00C0
-#define U300_SYSCON_PMC2R_APP_MISC_1_MASK                      0x0300
-#define U300_SYSCON_PMC2R_APP_MISC_1_APP_GPIO                  0x0000
-#define U300_SYSCON_PMC2R_APP_MISC_1_EMIF_SDRAM                        0x0100
-#define U300_SYSCON_PMC2R_APP_MISC_1_MMC                       0x0200
-#define U300_SYSCON_PMC2R_APP_MISC_1_CDI2                      0x0300
-#define U300_SYSCON_PMC2R_APP_MISC_2_MASK                      0x0C00
-#define U300_SYSCON_PMC2R_APP_MISC_2_APP_GPIO                  0x0000
-#define U300_SYSCON_PMC2R_APP_MISC_2_EMIF_SDRAM                        0x0400
-#define U300_SYSCON_PMC2R_APP_MISC_2_MMC                       0x0800
-#define U300_SYSCON_PMC2R_APP_MISC_2_CDI2                      0x0C00
-#define U300_SYSCON_PMC2R_APP_MISC_3_MASK                      0x3000
-#define U300_SYSCON_PMC2R_APP_MISC_3_APP_GPIO                  0x0000
-#define U300_SYSCON_PMC2R_APP_MISC_3_EMIF_SDRAM                        0x1000
-#define U300_SYSCON_PMC2R_APP_MISC_3_MMC                       0x2000
-#define U300_SYSCON_PMC2R_APP_MISC_3_CDI2                      0x3000
-#define U300_SYSCON_PMC2R_APP_MISC_4_MASK                      0xC000
-#define U300_SYSCON_PMC2R_APP_MISC_4_APP_GPIO                  0x0000
-#define U300_SYSCON_PMC2R_APP_MISC_4_EMIF_SDRAM                        0x4000
-#define U300_SYSCON_PMC2R_APP_MISC_4_MMC                       0x8000
-#define U300_SYSCON_PMC2R_APP_MISC_4_ACC_GPIO                  0xC000
-/* TODO: More SYSCON registers missing */
-#define U300_SYSCON_PMC3R                                      0x10C
-#define U300_SYSCON_PMC3R_APP_MISC_11_MASK                     0xC000
-#define U300_SYSCON_PMC3R_APP_MISC_11_SPI                      0x4000
-#define U300_SYSCON_PMC3R_APP_MISC_10_MASK                     0x3000
-#define U300_SYSCON_PMC3R_APP_MISC_10_SPI                      0x1000
-/* TODO: Missing other configs */
-#define U300_SYSCON_PMC4R                                      0x168
-#define U300_SYSCON_PMC4R_APP_MISC_12_MASK                     0x0003
-#define U300_SYSCON_PMC4R_APP_MISC_12_APP_GPIO                 0x0000
-#define U300_SYSCON_PMC4R_APP_MISC_13_MASK                     0x000C
-#define U300_SYSCON_PMC4R_APP_MISC_13_CDI                      0x0000
-#define U300_SYSCON_PMC4R_APP_MISC_13_SMIA                     0x0004
-#define U300_SYSCON_PMC4R_APP_MISC_13_SMIA2                    0x0008
-#define U300_SYSCON_PMC4R_APP_MISC_13_APP_GPIO                 0x000C
-#define U300_SYSCON_PMC4R_APP_MISC_14_MASK                     0x0030
-#define U300_SYSCON_PMC4R_APP_MISC_14_CDI                      0x0000
-#define U300_SYSCON_PMC4R_APP_MISC_14_SMIA                     0x0010
-#define U300_SYSCON_PMC4R_APP_MISC_14_CDI2                     0x0020
-#define U300_SYSCON_PMC4R_APP_MISC_14_APP_GPIO                 0x0030
-#define U300_SYSCON_PMC4R_APP_MISC_16_MASK                     0x0300
-#define U300_SYSCON_PMC4R_APP_MISC_16_APP_GPIO_13              0x0000
-#define U300_SYSCON_PMC4R_APP_MISC_16_APP_UART1_CTS            0x0100
-#define U300_SYSCON_PMC4R_APP_MISC_16_EMIF_1_STATIC_CS5_N      0x0200
-
-#define DRIVER_NAME "pinmux-u300"
-
-/*
- * The DB3350 has 467 pads, I have enumerated the pads clockwise around the
- * edges of the silicon, finger by finger. LTCORNER upper left is pad 0.
- * Data taken from the PadRing chart, arranged like this:
- *
- *   0 ..... 104
- * 466        105
- *   .        .
- *   .        .
- * 358        224
- *  357 .... 225
- */
-#define U300_NUM_PADS 467
-
-/* Pad names for the pinmux subsystem */
-static const struct pinctrl_pin_desc u300_pads[] = {
-       /* Pads along the top edge of the chip */
-       PINCTRL_PIN(0, "P PAD VDD 28"),
-       PINCTRL_PIN(1, "P PAD GND 28"),
-       PINCTRL_PIN(2, "PO SIM RST N"),
-       PINCTRL_PIN(3, "VSSIO 25"),
-       PINCTRL_PIN(4, "VSSA ADDA ESDSUB"),
-       PINCTRL_PIN(5, "PWR VSSCOMMON"),
-       PINCTRL_PIN(6, "PI ADC I1 POS"),
-       PINCTRL_PIN(7, "PI ADC I1 NEG"),
-       PINCTRL_PIN(8, "PWR VSSAD0"),
-       PINCTRL_PIN(9, "PWR VCCAD0"),
-       PINCTRL_PIN(10, "PI ADC Q1 NEG"),
-       PINCTRL_PIN(11, "PI ADC Q1 POS"),
-       PINCTRL_PIN(12, "PWR VDDAD"),
-       PINCTRL_PIN(13, "PWR GNDAD"),
-       PINCTRL_PIN(14, "PI ADC I2 POS"),
-       PINCTRL_PIN(15, "PI ADC I2 NEG"),
-       PINCTRL_PIN(16, "PWR VSSAD1"),
-       PINCTRL_PIN(17, "PWR VCCAD1"),
-       PINCTRL_PIN(18, "PI ADC Q2 NEG"),
-       PINCTRL_PIN(19, "PI ADC Q2 POS"),
-       PINCTRL_PIN(20, "VSSA ADDA ESDSUB"),
-       PINCTRL_PIN(21, "PWR VCCGPAD"),
-       PINCTRL_PIN(22, "PI TX POW"),
-       PINCTRL_PIN(23, "PWR VSSGPAD"),
-       PINCTRL_PIN(24, "PO DAC I POS"),
-       PINCTRL_PIN(25, "PO DAC I NEG"),
-       PINCTRL_PIN(26, "PO DAC Q POS"),
-       PINCTRL_PIN(27, "PO DAC Q NEG"),
-       PINCTRL_PIN(28, "PWR VSSDA"),
-       PINCTRL_PIN(29, "PWR VCCDA"),
-       PINCTRL_PIN(30, "VSSA ADDA ESDSUB"),
-       PINCTRL_PIN(31, "P PAD VDDIO 11"),
-       PINCTRL_PIN(32, "PI PLL 26 FILTVDD"),
-       PINCTRL_PIN(33, "PI PLL 26 VCONT"),
-       PINCTRL_PIN(34, "PWR AGNDPLL2V5 32 13"),
-       PINCTRL_PIN(35, "PWR AVDDPLL2V5 32 13"),
-       PINCTRL_PIN(36, "VDDA PLL ESD"),
-       PINCTRL_PIN(37, "VSSA PLL ESD"),
-       PINCTRL_PIN(38, "VSS PLL"),
-       PINCTRL_PIN(39, "VDDC PLL"),
-       PINCTRL_PIN(40, "PWR AGNDPLL2V5 26 60"),
-       PINCTRL_PIN(41, "PWR AVDDPLL2V5 26 60"),
-       PINCTRL_PIN(42, "PWR AVDDPLL2V5 26 208"),
-       PINCTRL_PIN(43, "PWR AGNDPLL2V5 26 208"),
-       PINCTRL_PIN(44, "PWR AVDDPLL2V5 13 208"),
-       PINCTRL_PIN(45, "PWR AGNDPLL2V5 13 208"),
-       PINCTRL_PIN(46, "P PAD VSSIO 11"),
-       PINCTRL_PIN(47, "P PAD VSSIO 12"),
-       PINCTRL_PIN(48, "PI POW RST N"),
-       PINCTRL_PIN(49, "VDDC IO"),
-       PINCTRL_PIN(50, "P PAD VDDIO 16"),
-       PINCTRL_PIN(51, "PO RF WCDMA EN 4"),
-       PINCTRL_PIN(52, "PO RF WCDMA EN 3"),
-       PINCTRL_PIN(53, "PO RF WCDMA EN 2"),
-       PINCTRL_PIN(54, "PO RF WCDMA EN 1"),
-       PINCTRL_PIN(55, "PO RF WCDMA EN 0"),
-       PINCTRL_PIN(56, "PO GSM PA ENABLE"),
-       PINCTRL_PIN(57, "PO RF DATA STRB"),
-       PINCTRL_PIN(58, "PO RF DATA2"),
-       PINCTRL_PIN(59, "PIO RF DATA1"),
-       PINCTRL_PIN(60, "PIO RF DATA0"),
-       PINCTRL_PIN(61, "P PAD VDD 11"),
-       PINCTRL_PIN(62, "P PAD GND 11"),
-       PINCTRL_PIN(63, "P PAD VSSIO 16"),
-       PINCTRL_PIN(64, "P PAD VDDIO 18"),
-       PINCTRL_PIN(65, "PO RF CTRL STRB2"),
-       PINCTRL_PIN(66, "PO RF CTRL STRB1"),
-       PINCTRL_PIN(67, "PO RF CTRL STRB0"),
-       PINCTRL_PIN(68, "PIO RF CTRL DATA"),
-       PINCTRL_PIN(69, "PO RF CTRL CLK"),
-       PINCTRL_PIN(70, "PO TX ADC STRB"),
-       PINCTRL_PIN(71, "PO ANT SW 2"),
-       PINCTRL_PIN(72, "PO ANT SW 3"),
-       PINCTRL_PIN(73, "PO ANT SW 0"),
-       PINCTRL_PIN(74, "PO ANT SW 1"),
-       PINCTRL_PIN(75, "PO M CLKRQ"),
-       PINCTRL_PIN(76, "PI M CLK"),
-       PINCTRL_PIN(77, "PI RTC CLK"),
-       PINCTRL_PIN(78, "P PAD VDD 8"),
-       PINCTRL_PIN(79, "P PAD GND 8"),
-       PINCTRL_PIN(80, "P PAD VSSIO 13"),
-       PINCTRL_PIN(81, "P PAD VDDIO 13"),
-       PINCTRL_PIN(82, "PO SYS 1 CLK"),
-       PINCTRL_PIN(83, "PO SYS 2 CLK"),
-       PINCTRL_PIN(84, "PO SYS 0 CLK"),
-       PINCTRL_PIN(85, "PI SYS 0 CLKRQ"),
-       PINCTRL_PIN(86, "PO PWR MNGT CTRL 1"),
-       PINCTRL_PIN(87, "PO PWR MNGT CTRL 0"),
-       PINCTRL_PIN(88, "PO RESOUT2 RST N"),
-       PINCTRL_PIN(89, "PO RESOUT1 RST N"),
-       PINCTRL_PIN(90, "PO RESOUT0 RST N"),
-       PINCTRL_PIN(91, "PI SERVICE N"),
-       PINCTRL_PIN(92, "P PAD VDD 29"),
-       PINCTRL_PIN(93, "P PAD GND 29"),
-       PINCTRL_PIN(94, "P PAD VSSIO 8"),
-       PINCTRL_PIN(95, "P PAD VDDIO 8"),
-       PINCTRL_PIN(96, "PI EXT IRQ1 N"),
-       PINCTRL_PIN(97, "PI EXT IRQ0 N"),
-       PINCTRL_PIN(98, "PIO DC ON"),
-       PINCTRL_PIN(99, "PIO ACC APP I2C DATA"),
-       PINCTRL_PIN(100, "PIO ACC APP I2C CLK"),
-       PINCTRL_PIN(101, "P PAD VDD 12"),
-       PINCTRL_PIN(102, "P PAD GND 12"),
-       PINCTRL_PIN(103, "P PAD VSSIO 14"),
-       PINCTRL_PIN(104, "P PAD VDDIO 14"),
-       /* Pads along the right edge of the chip */
-       PINCTRL_PIN(105, "PIO APP I2C1 DATA"),
-       PINCTRL_PIN(106, "PIO APP I2C1 CLK"),
-       PINCTRL_PIN(107, "PO KEY OUT0"),
-       PINCTRL_PIN(108, "PO KEY OUT1"),
-       PINCTRL_PIN(109, "PO KEY OUT2"),
-       PINCTRL_PIN(110, "PO KEY OUT3"),
-       PINCTRL_PIN(111, "PO KEY OUT4"),
-       PINCTRL_PIN(112, "PI KEY IN0"),
-       PINCTRL_PIN(113, "PI KEY IN1"),
-       PINCTRL_PIN(114, "PI KEY IN2"),
-       PINCTRL_PIN(115, "P PAD VDDIO 15"),
-       PINCTRL_PIN(116, "P PAD VSSIO 15"),
-       PINCTRL_PIN(117, "P PAD GND 13"),
-       PINCTRL_PIN(118, "P PAD VDD 13"),
-       PINCTRL_PIN(119, "PI KEY IN3"),
-       PINCTRL_PIN(120, "PI KEY IN4"),
-       PINCTRL_PIN(121, "PI KEY IN5"),
-       PINCTRL_PIN(122, "PIO APP PCM I2S1 DATA B"),
-       PINCTRL_PIN(123, "PIO APP PCM I2S1 DATA A"),
-       PINCTRL_PIN(124, "PIO APP PCM I2S1 WS"),
-       PINCTRL_PIN(125, "PIO APP PCM I2S1 CLK"),
-       PINCTRL_PIN(126, "PIO APP PCM I2S0 DATA B"),
-       PINCTRL_PIN(127, "PIO APP PCM I2S0 DATA A"),
-       PINCTRL_PIN(128, "PIO APP PCM I2S0 WS"),
-       PINCTRL_PIN(129, "PIO APP PCM I2S0 CLK"),
-       PINCTRL_PIN(130, "P PAD VDD 17"),
-       PINCTRL_PIN(131, "P PAD GND 17"),
-       PINCTRL_PIN(132, "P PAD VSSIO 19"),
-       PINCTRL_PIN(133, "P PAD VDDIO 19"),
-       PINCTRL_PIN(134, "UART0 RTS"),
-       PINCTRL_PIN(135, "UART0 CTS"),
-       PINCTRL_PIN(136, "UART0 TX"),
-       PINCTRL_PIN(137, "UART0 RX"),
-       PINCTRL_PIN(138, "PIO ACC SPI DO"),
-       PINCTRL_PIN(139, "PIO ACC SPI DI"),
-       PINCTRL_PIN(140, "PIO ACC SPI CS0 N"),
-       PINCTRL_PIN(141, "PIO ACC SPI CS1 N"),
-       PINCTRL_PIN(142, "PIO ACC SPI CS2 N"),
-       PINCTRL_PIN(143, "PIO ACC SPI CLK"),
-       PINCTRL_PIN(144, "PO PDI EXT RST N"),
-       PINCTRL_PIN(145, "P PAD VDDIO 22"),
-       PINCTRL_PIN(146, "P PAD VSSIO 22"),
-       PINCTRL_PIN(147, "P PAD GND 18"),
-       PINCTRL_PIN(148, "P PAD VDD 18"),
-       PINCTRL_PIN(149, "PIO PDI C0"),
-       PINCTRL_PIN(150, "PIO PDI C1"),
-       PINCTRL_PIN(151, "PIO PDI C2"),
-       PINCTRL_PIN(152, "PIO PDI C3"),
-       PINCTRL_PIN(153, "PIO PDI C4"),
-       PINCTRL_PIN(154, "PIO PDI C5"),
-       PINCTRL_PIN(155, "PIO PDI D0"),
-       PINCTRL_PIN(156, "PIO PDI D1"),
-       PINCTRL_PIN(157, "PIO PDI D2"),
-       PINCTRL_PIN(158, "PIO PDI D3"),
-       PINCTRL_PIN(159, "P PAD VDDIO 21"),
-       PINCTRL_PIN(160, "P PAD VSSIO 21"),
-       PINCTRL_PIN(161, "PIO PDI D4"),
-       PINCTRL_PIN(162, "PIO PDI D5"),
-       PINCTRL_PIN(163, "PIO PDI D6"),
-       PINCTRL_PIN(164, "PIO PDI D7"),
-       PINCTRL_PIN(165, "PIO MS INS"),
-       PINCTRL_PIN(166, "MMC DATA DIR LS"),
-       PINCTRL_PIN(167, "MMC DATA 3"),
-       PINCTRL_PIN(168, "MMC DATA 2"),
-       PINCTRL_PIN(169, "MMC DATA 1"),
-       PINCTRL_PIN(170, "MMC DATA 0"),
-       PINCTRL_PIN(171, "MMC CMD DIR LS"),
-       PINCTRL_PIN(172, "P PAD VDD 27"),
-       PINCTRL_PIN(173, "P PAD GND 27"),
-       PINCTRL_PIN(174, "P PAD VSSIO 20"),
-       PINCTRL_PIN(175, "P PAD VDDIO 20"),
-       PINCTRL_PIN(176, "MMC CMD"),
-       PINCTRL_PIN(177, "MMC CLK"),
-       PINCTRL_PIN(178, "PIO APP GPIO 14"),
-       PINCTRL_PIN(179, "PIO APP GPIO 13"),
-       PINCTRL_PIN(180, "PIO APP GPIO 11"),
-       PINCTRL_PIN(181, "PIO APP GPIO 25"),
-       PINCTRL_PIN(182, "PIO APP GPIO 24"),
-       PINCTRL_PIN(183, "PIO APP GPIO 23"),
-       PINCTRL_PIN(184, "PIO APP GPIO 22"),
-       PINCTRL_PIN(185, "PIO APP GPIO 21"),
-       PINCTRL_PIN(186, "PIO APP GPIO 20"),
-       PINCTRL_PIN(187, "P PAD VDD 19"),
-       PINCTRL_PIN(188, "P PAD GND 19"),
-       PINCTRL_PIN(189, "P PAD VSSIO 23"),
-       PINCTRL_PIN(190, "P PAD VDDIO 23"),
-       PINCTRL_PIN(191, "PIO APP GPIO 19"),
-       PINCTRL_PIN(192, "PIO APP GPIO 18"),
-       PINCTRL_PIN(193, "PIO APP GPIO 17"),
-       PINCTRL_PIN(194, "PIO APP GPIO 16"),
-       PINCTRL_PIN(195, "PI CI D1"),
-       PINCTRL_PIN(196, "PI CI D0"),
-       PINCTRL_PIN(197, "PI CI HSYNC"),
-       PINCTRL_PIN(198, "PI CI VSYNC"),
-       PINCTRL_PIN(199, "PI CI EXT CLK"),
-       PINCTRL_PIN(200, "PO CI EXT RST N"),
-       PINCTRL_PIN(201, "P PAD VSSIO 43"),
-       PINCTRL_PIN(202, "P PAD VDDIO 43"),
-       PINCTRL_PIN(203, "PI CI D6"),
-       PINCTRL_PIN(204, "PI CI D7"),
-       PINCTRL_PIN(205, "PI CI D2"),
-       PINCTRL_PIN(206, "PI CI D3"),
-       PINCTRL_PIN(207, "PI CI D4"),
-       PINCTRL_PIN(208, "PI CI D5"),
-       PINCTRL_PIN(209, "PI CI D8"),
-       PINCTRL_PIN(210, "PI CI D9"),
-       PINCTRL_PIN(211, "P PAD VDD 20"),
-       PINCTRL_PIN(212, "P PAD GND 20"),
-       PINCTRL_PIN(213, "P PAD VSSIO 24"),
-       PINCTRL_PIN(214, "P PAD VDDIO 24"),
-       PINCTRL_PIN(215, "P PAD VDDIO 26"),
-       PINCTRL_PIN(216, "PO EMIF 1 A26"),
-       PINCTRL_PIN(217, "PO EMIF 1 A25"),
-       PINCTRL_PIN(218, "P PAD VSSIO 26"),
-       PINCTRL_PIN(219, "PO EMIF 1 A24"),
-       PINCTRL_PIN(220, "PO EMIF 1 A23"),
-       /* Pads along the bottom edge of the chip */
-       PINCTRL_PIN(221, "PO EMIF 1 A22"),
-       PINCTRL_PIN(222, "PO EMIF 1 A21"),
-       PINCTRL_PIN(223, "P PAD VDD 21"),
-       PINCTRL_PIN(224, "P PAD GND 21"),
-       PINCTRL_PIN(225, "P PAD VSSIO 27"),
-       PINCTRL_PIN(226, "P PAD VDDIO 27"),
-       PINCTRL_PIN(227, "PO EMIF 1 A20"),
-       PINCTRL_PIN(228, "PO EMIF 1 A19"),
-       PINCTRL_PIN(229, "PO EMIF 1 A18"),
-       PINCTRL_PIN(230, "PO EMIF 1 A17"),
-       PINCTRL_PIN(231, "P PAD VDDIO 28"),
-       PINCTRL_PIN(232, "P PAD VSSIO 28"),
-       PINCTRL_PIN(233, "PO EMIF 1 A16"),
-       PINCTRL_PIN(234, "PIO EMIF 1 D15"),
-       PINCTRL_PIN(235, "PO EMIF 1 A15"),
-       PINCTRL_PIN(236, "PIO EMIF 1 D14"),
-       PINCTRL_PIN(237, "P PAD VDD 22"),
-       PINCTRL_PIN(238, "P PAD GND 22"),
-       PINCTRL_PIN(239, "P PAD VSSIO 29"),
-       PINCTRL_PIN(240, "P PAD VDDIO 29"),
-       PINCTRL_PIN(241, "PO EMIF 1 A14"),
-       PINCTRL_PIN(242, "PIO EMIF 1 D13"),
-       PINCTRL_PIN(243, "PO EMIF 1 A13"),
-       PINCTRL_PIN(244, "PIO EMIF 1 D12"),
-       PINCTRL_PIN(245, "P PAD VSSIO 30"),
-       PINCTRL_PIN(246, "P PAD VDDIO 30"),
-       PINCTRL_PIN(247, "PO EMIF 1 A12"),
-       PINCTRL_PIN(248, "PIO EMIF 1 D11"),
-       PINCTRL_PIN(249, "PO EMIF 1 A11"),
-       PINCTRL_PIN(250, "PIO EMIF 1 D10"),
-       PINCTRL_PIN(251, "P PAD VSSIO 31"),
-       PINCTRL_PIN(252, "P PAD VDDIO 31"),
-       PINCTRL_PIN(253, "PO EMIF 1 A10"),
-       PINCTRL_PIN(254, "PIO EMIF 1 D09"),
-       PINCTRL_PIN(255, "PO EMIF 1 A09"),
-       PINCTRL_PIN(256, "P PAD VDDIO 32"),
-       PINCTRL_PIN(257, "P PAD VSSIO 32"),
-       PINCTRL_PIN(258, "P PAD GND 24"),
-       PINCTRL_PIN(259, "P PAD VDD 24"),
-       PINCTRL_PIN(260, "PIO EMIF 1 D08"),
-       PINCTRL_PIN(261, "PO EMIF 1 A08"),
-       PINCTRL_PIN(262, "PIO EMIF 1 D07"),
-       PINCTRL_PIN(263, "PO EMIF 1 A07"),
-       PINCTRL_PIN(264, "P PAD VDDIO 33"),
-       PINCTRL_PIN(265, "P PAD VSSIO 33"),
-       PINCTRL_PIN(266, "PIO EMIF 1 D06"),
-       PINCTRL_PIN(267, "PO EMIF 1 A06"),
-       PINCTRL_PIN(268, "PIO EMIF 1 D05"),
-       PINCTRL_PIN(269, "PO EMIF 1 A05"),
-       PINCTRL_PIN(270, "P PAD VDDIO 34"),
-       PINCTRL_PIN(271, "P PAD VSSIO 34"),
-       PINCTRL_PIN(272, "PIO EMIF 1 D04"),
-       PINCTRL_PIN(273, "PO EMIF 1 A04"),
-       PINCTRL_PIN(274, "PIO EMIF 1 D03"),
-       PINCTRL_PIN(275, "PO EMIF 1 A03"),
-       PINCTRL_PIN(276, "P PAD VDDIO 35"),
-       PINCTRL_PIN(277, "P PAD VSSIO 35"),
-       PINCTRL_PIN(278, "P PAD GND 23"),
-       PINCTRL_PIN(279, "P PAD VDD 23"),
-       PINCTRL_PIN(280, "PIO EMIF 1 D02"),
-       PINCTRL_PIN(281, "PO EMIF 1 A02"),
-       PINCTRL_PIN(282, "PIO EMIF 1 D01"),
-       PINCTRL_PIN(283, "PO EMIF 1 A01"),
-       PINCTRL_PIN(284, "P PAD VDDIO 36"),
-       PINCTRL_PIN(285, "P PAD VSSIO 36"),
-       PINCTRL_PIN(286, "PIO EMIF 1 D00"),
-       PINCTRL_PIN(287, "PO EMIF 1 BE1 N"),
-       PINCTRL_PIN(288, "PO EMIF 1 BE0 N"),
-       PINCTRL_PIN(289, "PO EMIF 1 ADV N"),
-       PINCTRL_PIN(290, "P PAD VDDIO 37"),
-       PINCTRL_PIN(291, "P PAD VSSIO 37"),
-       PINCTRL_PIN(292, "PO EMIF 1 SD CKE0"),
-       PINCTRL_PIN(293, "PO EMIF 1 OE N"),
-       PINCTRL_PIN(294, "PO EMIF 1 WE N"),
-       PINCTRL_PIN(295, "P PAD VDDIO 38"),
-       PINCTRL_PIN(296, "P PAD VSSIO 38"),
-       PINCTRL_PIN(297, "PO EMIF 1 CLK"),
-       PINCTRL_PIN(298, "PIO EMIF 1 SD CLK"),
-       PINCTRL_PIN(299, "P PAD VSSIO 45 (not bonded)"),
-       PINCTRL_PIN(300, "P PAD VDDIO 42"),
-       PINCTRL_PIN(301, "P PAD VSSIO 42"),
-       PINCTRL_PIN(302, "P PAD GND 31"),
-       PINCTRL_PIN(303, "P PAD VDD 31"),
-       PINCTRL_PIN(304, "PI EMIF 1 RET CLK"),
-       PINCTRL_PIN(305, "PI EMIF 1 WAIT N"),
-       PINCTRL_PIN(306, "PI EMIF 1 NFIF READY"),
-       PINCTRL_PIN(307, "PO EMIF 1 SD CKE1"),
-       PINCTRL_PIN(308, "PO EMIF 1 CS3 N"),
-       PINCTRL_PIN(309, "P PAD VDD 25"),
-       PINCTRL_PIN(310, "P PAD GND 25"),
-       PINCTRL_PIN(311, "P PAD VSSIO 39"),
-       PINCTRL_PIN(312, "P PAD VDDIO 39"),
-       PINCTRL_PIN(313, "PO EMIF 1 CS2 N"),
-       PINCTRL_PIN(314, "PO EMIF 1 CS1 N"),
-       PINCTRL_PIN(315, "PO EMIF 1 CS0 N"),
-       PINCTRL_PIN(316, "PO ETM TRACE PKT0"),
-       PINCTRL_PIN(317, "PO ETM TRACE PKT1"),
-       PINCTRL_PIN(318, "PO ETM TRACE PKT2"),
-       PINCTRL_PIN(319, "P PAD VDD 30"),
-       PINCTRL_PIN(320, "P PAD GND 30"),
-       PINCTRL_PIN(321, "P PAD VSSIO 44"),
-       PINCTRL_PIN(322, "P PAD VDDIO 44"),
-       PINCTRL_PIN(323, "PO ETM TRACE PKT3"),
-       PINCTRL_PIN(324, "PO ETM TRACE PKT4"),
-       PINCTRL_PIN(325, "PO ETM TRACE PKT5"),
-       PINCTRL_PIN(326, "PO ETM TRACE PKT6"),
-       PINCTRL_PIN(327, "PO ETM TRACE PKT7"),
-       PINCTRL_PIN(328, "PO ETM PIPE STAT0"),
-       PINCTRL_PIN(329, "P PAD VDD 26"),
-       PINCTRL_PIN(330, "P PAD GND 26"),
-       PINCTRL_PIN(331, "P PAD VSSIO 40"),
-       PINCTRL_PIN(332, "P PAD VDDIO 40"),
-       PINCTRL_PIN(333, "PO ETM PIPE STAT1"),
-       PINCTRL_PIN(334, "PO ETM PIPE STAT2"),
-       PINCTRL_PIN(335, "PO ETM TRACE CLK"),
-       PINCTRL_PIN(336, "PO ETM TRACE SYNC"),
-       PINCTRL_PIN(337, "PIO ACC GPIO 33"),
-       PINCTRL_PIN(338, "PIO ACC GPIO 32"),
-       PINCTRL_PIN(339, "PIO ACC GPIO 30"),
-       PINCTRL_PIN(340, "PIO ACC GPIO 29"),
-       PINCTRL_PIN(341, "P PAD VDDIO 17"),
-       PINCTRL_PIN(342, "P PAD VSSIO 17"),
-       PINCTRL_PIN(343, "P PAD GND 15"),
-       PINCTRL_PIN(344, "P PAD VDD 15"),
-       PINCTRL_PIN(345, "PIO ACC GPIO 28"),
-       PINCTRL_PIN(346, "PIO ACC GPIO 27"),
-       PINCTRL_PIN(347, "PIO ACC GPIO 16"),
-       PINCTRL_PIN(348, "PI TAP TMS"),
-       PINCTRL_PIN(349, "PI TAP TDI"),
-       PINCTRL_PIN(350, "PO TAP TDO"),
-       PINCTRL_PIN(351, "PI TAP RST N"),
-       /* Pads along the left edge of the chip */
-       PINCTRL_PIN(352, "PI EMU MODE 0"),
-       PINCTRL_PIN(353, "PO TAP RET CLK"),
-       PINCTRL_PIN(354, "PI TAP CLK"),
-       PINCTRL_PIN(355, "PO EMIF 0 SD CS N"),
-       PINCTRL_PIN(356, "PO EMIF 0 SD CAS N"),
-       PINCTRL_PIN(357, "PO EMIF 0 SD WE N"),
-       PINCTRL_PIN(358, "P PAD VDDIO 1"),
-       PINCTRL_PIN(359, "P PAD VSSIO 1"),
-       PINCTRL_PIN(360, "P PAD GND 1"),
-       PINCTRL_PIN(361, "P PAD VDD 1"),
-       PINCTRL_PIN(362, "PO EMIF 0 SD CKE"),
-       PINCTRL_PIN(363, "PO EMIF 0 SD DQML"),
-       PINCTRL_PIN(364, "PO EMIF 0 SD DQMU"),
-       PINCTRL_PIN(365, "PO EMIF 0 SD RAS N"),
-       PINCTRL_PIN(366, "PIO EMIF 0 D15"),
-       PINCTRL_PIN(367, "PO EMIF 0 A15"),
-       PINCTRL_PIN(368, "PIO EMIF 0 D14"),
-       PINCTRL_PIN(369, "PO EMIF 0 A14"),
-       PINCTRL_PIN(370, "PIO EMIF 0 D13"),
-       PINCTRL_PIN(371, "PO EMIF 0 A13"),
-       PINCTRL_PIN(372, "P PAD VDDIO 2"),
-       PINCTRL_PIN(373, "P PAD VSSIO 2"),
-       PINCTRL_PIN(374, "P PAD GND 2"),
-       PINCTRL_PIN(375, "P PAD VDD 2"),
-       PINCTRL_PIN(376, "PIO EMIF 0 D12"),
-       PINCTRL_PIN(377, "PO EMIF 0 A12"),
-       PINCTRL_PIN(378, "PIO EMIF 0 D11"),
-       PINCTRL_PIN(379, "PO EMIF 0 A11"),
-       PINCTRL_PIN(380, "PIO EMIF 0 D10"),
-       PINCTRL_PIN(381, "PO EMIF 0 A10"),
-       PINCTRL_PIN(382, "PIO EMIF 0 D09"),
-       PINCTRL_PIN(383, "PO EMIF 0 A09"),
-       PINCTRL_PIN(384, "PIO EMIF 0 D08"),
-       PINCTRL_PIN(385, "PO EMIF 0 A08"),
-       PINCTRL_PIN(386, "PIO EMIF 0 D07"),
-       PINCTRL_PIN(387, "PO EMIF 0 A07"),
-       PINCTRL_PIN(388, "P PAD VDDIO 3"),
-       PINCTRL_PIN(389, "P PAD VSSIO 3"),
-       PINCTRL_PIN(390, "P PAD GND 3"),
-       PINCTRL_PIN(391, "P PAD VDD 3"),
-       PINCTRL_PIN(392, "PO EFUSE RDOUT1"),
-       PINCTRL_PIN(393, "PIO EMIF 0 D06"),
-       PINCTRL_PIN(394, "PO EMIF 0 A06"),
-       PINCTRL_PIN(395, "PIO EMIF 0 D05"),
-       PINCTRL_PIN(396, "PO EMIF 0 A05"),
-       PINCTRL_PIN(397, "PIO EMIF 0 D04"),
-       PINCTRL_PIN(398, "PO EMIF 0 A04"),
-       PINCTRL_PIN(399, "A PADS/A VDDCO1v82v5 GND 80U SF LIN VDDCO AF"),
-       PINCTRL_PIN(400, "PWR VDDCO AF"),
-       PINCTRL_PIN(401, "PWR EFUSE HV1"),
-       PINCTRL_PIN(402, "P PAD VSSIO 4"),
-       PINCTRL_PIN(403, "P PAD VDDIO 4"),
-       PINCTRL_PIN(404, "P PAD GND 4"),
-       PINCTRL_PIN(405, "P PAD VDD 4"),
-       PINCTRL_PIN(406, "PIO EMIF 0 D03"),
-       PINCTRL_PIN(407, "PO EMIF 0 A03"),
-       PINCTRL_PIN(408, "PWR EFUSE HV2"),
-       PINCTRL_PIN(409, "PWR EFUSE HV3"),
-       PINCTRL_PIN(410, "PIO EMIF 0 D02"),
-       PINCTRL_PIN(411, "PO EMIF 0 A02"),
-       PINCTRL_PIN(412, "PIO EMIF 0 D01"),
-       PINCTRL_PIN(413, "P PAD VDDIO 5"),
-       PINCTRL_PIN(414, "P PAD VSSIO 5"),
-       PINCTRL_PIN(415, "P PAD GND 5"),
-       PINCTRL_PIN(416, "P PAD VDD 5"),
-       PINCTRL_PIN(417, "PO EMIF 0 A01"),
-       PINCTRL_PIN(418, "PIO EMIF 0 D00"),
-       PINCTRL_PIN(419, "IF 0 SD CLK"),
-       PINCTRL_PIN(420, "APP SPI CLK"),
-       PINCTRL_PIN(421, "APP SPI DO"),
-       PINCTRL_PIN(422, "APP SPI DI"),
-       PINCTRL_PIN(423, "APP SPI CS0"),
-       PINCTRL_PIN(424, "APP SPI CS1"),
-       PINCTRL_PIN(425, "APP SPI CS2"),
-       PINCTRL_PIN(426, "PIO APP GPIO 10"),
-       PINCTRL_PIN(427, "P PAD VDDIO 41"),
-       PINCTRL_PIN(428, "P PAD VSSIO 41"),
-       PINCTRL_PIN(429, "P PAD GND 6"),
-       PINCTRL_PIN(430, "P PAD VDD 6"),
-       PINCTRL_PIN(431, "PIO ACC SDIO0 CMD"),
-       PINCTRL_PIN(432, "PIO ACC SDIO0 CK"),
-       PINCTRL_PIN(433, "PIO ACC SDIO0 D3"),
-       PINCTRL_PIN(434, "PIO ACC SDIO0 D2"),
-       PINCTRL_PIN(435, "PIO ACC SDIO0 D1"),
-       PINCTRL_PIN(436, "PIO ACC SDIO0 D0"),
-       PINCTRL_PIN(437, "PIO USB PU"),
-       PINCTRL_PIN(438, "PIO USB SP"),
-       PINCTRL_PIN(439, "PIO USB DAT VP"),
-       PINCTRL_PIN(440, "PIO USB SE0 VM"),
-       PINCTRL_PIN(441, "PIO USB OE"),
-       PINCTRL_PIN(442, "PIO USB SUSP"),
-       PINCTRL_PIN(443, "P PAD VSSIO 6"),
-       PINCTRL_PIN(444, "P PAD VDDIO 6"),
-       PINCTRL_PIN(445, "PIO USB PUEN"),
-       PINCTRL_PIN(446, "PIO ACC UART0 RX"),
-       PINCTRL_PIN(447, "PIO ACC UART0 TX"),
-       PINCTRL_PIN(448, "PIO ACC UART0 CTS"),
-       PINCTRL_PIN(449, "PIO ACC UART0 RTS"),
-       PINCTRL_PIN(450, "PIO ACC UART3 RX"),
-       PINCTRL_PIN(451, "PIO ACC UART3 TX"),
-       PINCTRL_PIN(452, "PIO ACC UART3 CTS"),
-       PINCTRL_PIN(453, "PIO ACC UART3 RTS"),
-       PINCTRL_PIN(454, "PIO ACC IRDA TX"),
-       PINCTRL_PIN(455, "P PAD VDDIO 7"),
-       PINCTRL_PIN(456, "P PAD VSSIO 7"),
-       PINCTRL_PIN(457, "P PAD GND 7"),
-       PINCTRL_PIN(458, "P PAD VDD 7"),
-       PINCTRL_PIN(459, "PIO ACC IRDA RX"),
-       PINCTRL_PIN(460, "PIO ACC PCM I2S CLK"),
-       PINCTRL_PIN(461, "PIO ACC PCM I2S WS"),
-       PINCTRL_PIN(462, "PIO ACC PCM I2S DATA A"),
-       PINCTRL_PIN(463, "PIO ACC PCM I2S DATA B"),
-       PINCTRL_PIN(464, "PO SIM CLK"),
-       PINCTRL_PIN(465, "PIO ACC IRDA SD"),
-       PINCTRL_PIN(466, "PIO SIM DATA"),
-};
-
-/**
- * @dev: a pointer back to containing device
- * @virtbase: the offset to the controller in virtual memory
- */
-struct u300_pmx {
-       struct device *dev;
-       struct pinctrl_dev *pctl;
-       u32 phybase;
-       u32 physize;
-       void __iomem *virtbase;
-};
-
-/**
- * u300_pmx_registers - the array of registers read/written for each pinmux
- * shunt setting
- */
-const u32 u300_pmx_registers[] = {
-       U300_SYSCON_PMC1LR,
-       U300_SYSCON_PMC1HR,
-       U300_SYSCON_PMC2R,
-       U300_SYSCON_PMC3R,
-       U300_SYSCON_PMC4R,
-};
-
-/**
- * struct u300_pin_group - describes a U300 pin group
- * @name: the name of this specific pin group
- * @pins: an array of discrete physical pins used in this group, taken
- *     from the driver-local pin enumeration space
- * @num_pins: the number of pins in this group array, i.e. the number of
- *     elements in .pins so we can iterate over that array
- */
-struct u300_pin_group {
-       const char *name;
-       const unsigned int *pins;
-       const unsigned num_pins;
-};
-
-/**
- * struct pmx_onmask - mask bits to enable/disable padmux
- * @mask: mask bits to disable
- * @val: mask bits to enable
- *
- * onmask lazy dog:
- * onmask = {
- *   {"PMC1LR" mask, "PMC1LR" value},
- *   {"PMC1HR" mask, "PMC1HR" value},
- *   {"PMC2R"  mask, "PMC2R"  value},
- *   {"PMC3R"  mask, "PMC3R"  value},
- *   {"PMC4R"  mask, "PMC4R"  value}
- * }
- */
-struct u300_pmx_mask {
-       u16 mask;
-       u16 bits;
-};
-
-/* The chip power pins are VDD, GND, VDDIO and VSSIO */
-static const unsigned power_pins[] = { 0, 1, 3, 31, 46, 47, 49, 50, 61, 62, 63,
-       64, 78, 79, 80, 81, 92, 93, 94, 95, 101, 102, 103, 104, 115, 116, 117,
-       118, 130, 131, 132, 133, 145, 146, 147, 148, 159, 160, 172, 173, 174,
-       175, 187, 188, 189, 190, 201, 202, 211, 212, 213, 214, 215, 218, 223,
-       224, 225, 226, 231, 232, 237, 238, 239, 240, 245, 246, 251, 252, 256,
-       257, 258, 259, 264, 265, 270, 271, 276, 277, 278, 279, 284, 285, 290,
-       291, 295, 296, 299, 300, 301, 302, 303, 309, 310, 311, 312, 319, 320,
-       321, 322, 329, 330, 331, 332, 341, 342, 343, 344, 358, 359, 360, 361,
-       372, 373, 374, 375, 388, 389, 390, 391, 402, 403, 404, 405, 413, 414,
-       415, 416, 427, 428, 429, 430, 443, 444, 455, 456, 457, 458 };
-static const unsigned emif0_pins[] = { 355, 356, 357, 362, 363, 364, 365, 366,
-       367, 368, 369, 370, 371, 376, 377, 378, 379, 380, 381, 382, 383, 384,
-       385, 386, 387, 393, 394, 395, 396, 397, 398, 406, 407, 410, 411, 412,
-       417, 418 };
-static const unsigned emif1_pins[] = { 216, 217, 219, 220, 221, 222, 227, 228,
-       229, 230, 233, 234, 235, 236, 241, 242, 243, 244, 247, 248, 249, 250,
-       253, 254, 255, 260, 261, 262, 263, 266, 267, 268, 269, 272, 273, 274,
-       275, 280, 281, 282, 283, 286, 287, 288, 289, 292, 293, 294, 297, 298,
-       304, 305, 306, 307, 308, 313, 314, 315 };
-static const unsigned uart0_pins[] = { 134, 135, 136, 137 };
-static const unsigned mmc0_pins[] = { 166, 167, 168, 169, 170, 171, 176, 177 };
-static const unsigned spi0_pins[] = { 420, 421, 422, 423, 424, 425 };
-
-static const struct u300_pmx_mask emif0_mask[] = {
-       {0, 0},
-       {0, 0},
-       {0, 0},
-       {0, 0},
-       {0, 0},
-};
-
-static const struct u300_pmx_mask emif1_mask[] = {
-       /*
-        * This connects the SDRAM to CS2 and a NAND flash to
-        * CS0 on the EMIF.
-        */
-       {
-               U300_SYSCON_PMC1LR_EMIF_1_CS2_MASK |
-               U300_SYSCON_PMC1LR_EMIF_1_CS1_MASK |
-               U300_SYSCON_PMC1LR_EMIF_1_CS0_MASK |
-               U300_SYSCON_PMC1LR_EMIF_1_MASK,
-               U300_SYSCON_PMC1LR_EMIF_1_CS2_SDRAM |
-               U300_SYSCON_PMC1LR_EMIF_1_CS1_STATIC |
-               U300_SYSCON_PMC1LR_EMIF_1_CS0_NFIF |
-               U300_SYSCON_PMC1LR_EMIF_1_SDRAM0
-       },
-       {0, 0},
-       {0, 0},
-       {0, 0},
-       {0, 0},
-};
-
-static const struct u300_pmx_mask uart0_mask[] = {
-       {0, 0},
-       {
-               U300_SYSCON_PMC1HR_APP_UART0_1_MASK |
-               U300_SYSCON_PMC1HR_APP_UART0_2_MASK,
-               U300_SYSCON_PMC1HR_APP_UART0_1_UART0 |
-               U300_SYSCON_PMC1HR_APP_UART0_2_UART0
-       },
-       {0, 0},
-       {0, 0},
-       {0, 0},
-};
-
-static const struct u300_pmx_mask mmc0_mask[] = {
-       { U300_SYSCON_PMC1LR_MMCSD_MASK, U300_SYSCON_PMC1LR_MMCSD_MMCSD},
-       {0, 0},
-       {0, 0},
-       {0, 0},
-       { U300_SYSCON_PMC4R_APP_MISC_12_MASK,
-         U300_SYSCON_PMC4R_APP_MISC_12_APP_GPIO }
-};
-
-static const struct u300_pmx_mask spi0_mask[] = {
-       {0, 0},
-       {
-               U300_SYSCON_PMC1HR_APP_SPI_2_MASK |
-               U300_SYSCON_PMC1HR_APP_SPI_CS_1_MASK |
-               U300_SYSCON_PMC1HR_APP_SPI_CS_2_MASK,
-               U300_SYSCON_PMC1HR_APP_SPI_2_SPI |
-               U300_SYSCON_PMC1HR_APP_SPI_CS_1_SPI |
-               U300_SYSCON_PMC1HR_APP_SPI_CS_2_SPI
-       },
-       {0, 0},
-       {0, 0},
-       {0, 0}
-};
-
-static const struct u300_pin_group u300_pin_groups[] = {
-       {
-               .name = "powergrp",
-               .pins = power_pins,
-               .num_pins = ARRAY_SIZE(power_pins),
-       },
-       {
-               .name = "emif0grp",
-               .pins = emif0_pins,
-               .num_pins = ARRAY_SIZE(emif0_pins),
-       },
-       {
-               .name = "emif1grp",
-               .pins = emif1_pins,
-               .num_pins = ARRAY_SIZE(emif1_pins),
-       },
-       {
-               .name = "uart0grp",
-               .pins = uart0_pins,
-               .num_pins = ARRAY_SIZE(uart0_pins),
-       },
-       {
-               .name = "mmc0grp",
-               .pins = mmc0_pins,
-               .num_pins = ARRAY_SIZE(mmc0_pins),
-       },
-       {
-               .name = "spi0grp",
-               .pins = spi0_pins,
-               .num_pins = ARRAY_SIZE(spi0_pins),
-       },
-};
-
-static int u300_list_groups(struct pinctrl_dev *pctldev, unsigned selector)
-{
-       if (selector >= ARRAY_SIZE(u300_pin_groups))
-               return -EINVAL;
-       return 0;
-}
-
-static const char *u300_get_group_name(struct pinctrl_dev *pctldev,
-                                      unsigned selector)
-{
-       if (selector >= ARRAY_SIZE(u300_pin_groups))
-               return NULL;
-       return u300_pin_groups[selector].name;
-}
-
-static int u300_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
-                              const unsigned **pins,
-                              unsigned *num_pins)
-{
-       if (selector >= ARRAY_SIZE(u300_pin_groups))
-               return -EINVAL;
-       *pins = u300_pin_groups[selector].pins;
-       *num_pins = u300_pin_groups[selector].num_pins;
-       return 0;
-}
-
-static void u300_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
-                  unsigned offset)
-{
-       seq_printf(s, " " DRIVER_NAME);
-}
-
-static struct pinctrl_ops u300_pctrl_ops = {
-       .list_groups = u300_list_groups,
-       .get_group_name = u300_get_group_name,
-       .get_group_pins = u300_get_group_pins,
-       .pin_dbg_show = u300_pin_dbg_show,
-};
-
-/*
- * Here we define the available functions and their corresponding pin groups
- */
-
-/**
- * struct u300_pmx_func - describes U300 pinmux functions
- * @name: the name of this specific function
- * @groups: corresponding pin groups
- * @onmask: bits to set to enable this when doing pin muxing
- */
-struct u300_pmx_func {
-       const char *name;
-       const char * const *groups;
-       const unsigned num_groups;
-       const struct u300_pmx_mask *mask;
-};
-
-static const char * const powergrps[] = { "powergrp" };
-static const char * const emif0grps[] = { "emif0grp" };
-static const char * const emif1grps[] = { "emif1grp" };
-static const char * const uart0grps[] = { "uart0grp" };
-static const char * const mmc0grps[] = { "mmc0grp" };
-static const char * const spi0grps[] = { "spi0grp" };
-
-static const struct u300_pmx_func u300_pmx_functions[] = {
-       {
-               .name = "power",
-               .groups = powergrps,
-               .num_groups = ARRAY_SIZE(powergrps),
-               /* Mask is N/A */
-       },
-       {
-               .name = "emif0",
-               .groups = emif0grps,
-               .num_groups = ARRAY_SIZE(emif0grps),
-               .mask = emif0_mask,
-       },
-       {
-               .name = "emif1",
-               .groups = emif1grps,
-               .num_groups = ARRAY_SIZE(emif1grps),
-               .mask = emif1_mask,
-       },
-       {
-               .name = "uart0",
-               .groups = uart0grps,
-               .num_groups = ARRAY_SIZE(uart0grps),
-               .mask = uart0_mask,
-       },
-       {
-               .name = "mmc0",
-               .groups = mmc0grps,
-               .num_groups = ARRAY_SIZE(mmc0grps),
-               .mask = mmc0_mask,
-       },
-       {
-               .name = "spi0",
-               .groups = spi0grps,
-               .num_groups = ARRAY_SIZE(spi0grps),
-               .mask = spi0_mask,
-       },
-};
-
-static void u300_pmx_endisable(struct u300_pmx *upmx, unsigned selector,
-                              bool enable)
-{
-       u16 regval, val, mask;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(u300_pmx_registers); i++) {
-               if (enable)
-                       val = u300_pmx_functions[selector].mask->bits;
-               else
-                       val = 0;
-
-               mask = u300_pmx_functions[selector].mask->mask;
-               if (mask != 0) {
-                       regval = readw(upmx->virtbase + u300_pmx_registers[i]);
-                       regval &= ~mask;
-                       regval |= val;
-                       writew(regval, upmx->virtbase + u300_pmx_registers[i]);
-               }
-       }
-}
-
-static int u300_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
-                          unsigned group)
-{
-       struct u300_pmx *upmx;
-
-       /* There is nothing to do with the power pins */
-       if (selector == 0)
-               return 0;
-
-       upmx = pinctrl_dev_get_drvdata(pctldev);
-       u300_pmx_endisable(upmx, selector, true);
-
-       return 0;
-}
-
-static void u300_pmx_disable(struct pinctrl_dev *pctldev, unsigned selector,
-                            unsigned group)
-{
-       struct u300_pmx *upmx;
-
-       /* There is nothing to do with the power pins */
-       if (selector == 0)
-               return;
-
-       upmx = pinctrl_dev_get_drvdata(pctldev);
-       u300_pmx_endisable(upmx, selector, false);
-}
-
-static int u300_pmx_list_funcs(struct pinctrl_dev *pctldev, unsigned selector)
-{
-       if (selector >= ARRAY_SIZE(u300_pmx_functions))
-               return -EINVAL;
-       return 0;
-}
-
-static const char *u300_pmx_get_func_name(struct pinctrl_dev *pctldev,
-                                         unsigned selector)
-{
-       return u300_pmx_functions[selector].name;
-}
-
-static int u300_pmx_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
-                              const char * const **groups,
-                              unsigned * const num_groups)
-{
-       *groups = u300_pmx_functions[selector].groups;
-       *num_groups = u300_pmx_functions[selector].num_groups;
-       return 0;
-}
-
-static struct pinmux_ops u300_pmx_ops = {
-       .list_functions = u300_pmx_list_funcs,
-       .get_function_name = u300_pmx_get_func_name,
-       .get_function_groups = u300_pmx_get_groups,
-       .enable = u300_pmx_enable,
-       .disable = u300_pmx_disable,
-};
-
-/*
- * FIXME: this will be set to sane values as this driver engulfs
- * drivers/gpio/gpio-u300.c and we really know this stuff.
- */
-static struct pinctrl_gpio_range u300_gpio_range = {
-       .name = "COH901*",
-       .id = 0,
-       .base = 0,
-       .npins = 64,
-};
-
-static struct pinctrl_desc u300_pmx_desc = {
-       .name = DRIVER_NAME,
-       .pins = u300_pads,
-       .npins = ARRAY_SIZE(u300_pads),
-       .maxpin = U300_NUM_PADS-1,
-       .pctlops = &u300_pctrl_ops,
-       .pmxops = &u300_pmx_ops,
-       .owner = THIS_MODULE,
-};
-
-static int __init u300_pmx_probe(struct platform_device *pdev)
-{
-       int ret;
-       struct u300_pmx *upmx;
-       struct resource *res;
-
-       /* Create state holders etc for this driver */
-       upmx = devm_kzalloc(&pdev->dev, sizeof(*upmx), GFP_KERNEL);
-       if (!upmx)
-               return -ENOMEM;
-
-       upmx->dev = &pdev->dev;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               ret = -ENOENT;
-               goto out_no_resource;
-       }
-       upmx->phybase = res->start;
-       upmx->physize = resource_size(res);
-
-       if (request_mem_region(upmx->phybase, upmx->physize,
-                              DRIVER_NAME) == NULL) {
-               ret = -ENOMEM;
-               goto out_no_memregion;
-       }
-
-       upmx->virtbase = ioremap(upmx->phybase, upmx->physize);
-       if (!upmx->virtbase) {
-               ret = -ENOMEM;
-               goto out_no_remap;
-       }
-
-       upmx->pctl = pinctrl_register(&u300_pmx_desc, &pdev->dev, upmx);
-       if (!upmx->pctl) {
-               dev_err(&pdev->dev, "could not register U300 pinmux driver\n");
-               ret = -EINVAL;
-               goto out_no_pmx;
-       }
-
-       /* We will handle a range of GPIO pins */
-       pinctrl_add_gpio_range(upmx->pctl, &u300_gpio_range);
-
-       platform_set_drvdata(pdev, upmx);
-
-       dev_info(&pdev->dev, "initialized U300 pinmux driver\n");
-
-       return 0;
-
-out_no_pmx:
-       iounmap(upmx->virtbase);
-out_no_remap:
-       platform_set_drvdata(pdev, NULL);
-out_no_memregion:
-       release_mem_region(upmx->phybase, upmx->physize);
-out_no_resource:
-       devm_kfree(&pdev->dev, upmx);
-       return ret;
-}
-
-static int __exit u300_pmx_remove(struct platform_device *pdev)
-{
-       struct u300_pmx *upmx = platform_get_drvdata(pdev);
-
-       pinctrl_remove_gpio_range(upmx->pctl, &u300_gpio_range);
-       pinctrl_unregister(upmx->pctl);
-       iounmap(upmx->virtbase);
-       release_mem_region(upmx->phybase, upmx->physize);
-       platform_set_drvdata(pdev, NULL);
-       devm_kfree(&pdev->dev, upmx);
-
-       return 0;
-}
-
-static struct platform_driver u300_pmx_driver = {
-       .driver = {
-               .name = DRIVER_NAME,
-               .owner = THIS_MODULE,
-       },
-       .remove = __exit_p(u300_pmx_remove),
-};
-
-static int __init u300_pmx_init(void)
-{
-       return platform_driver_probe(&u300_pmx_driver, u300_pmx_probe);
-}
-arch_initcall(u300_pmx_init);
-
-static void __exit u300_pmx_exit(void)
-{
-       platform_driver_unregister(&u300_pmx_driver);
-}
-module_exit(u300_pmx_exit);
-
-MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
-MODULE_DESCRIPTION("U300 pin control driver");
-MODULE_LICENSE("GPL v2");
index a5467f8709e9a72087ab272e6206f7e8ae1625d0..a76a348321bb4284d6820279fed3bb679cd23fcf 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
+#include <linux/string.h>
 #include <linux/sysfs.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 static DEFINE_MUTEX(pinmux_list_mutex);
 static LIST_HEAD(pinmux_list);
 
-/* List of pinmux hogs */
-static DEFINE_MUTEX(pinmux_hoglist_mutex);
-static LIST_HEAD(pinmux_hoglist);
-
-/* Global pinmux maps, we allow one set only */
-static struct pinmux_map const *pinmux_maps;
+/* Global pinmux maps */
+static struct pinmux_map *pinmux_maps;
 static unsigned pinmux_maps_num;
 
 /**
@@ -98,41 +95,35 @@ struct pinmux_hog {
  * @function: a functional name to give to this pin, passed to the driver
  *     so it knows what function to mux in, e.g. the string "gpioNN"
  *     means that you want to mux in the pin for use as GPIO number NN
- * @gpio: if this request concerns a single GPIO pin
  * @gpio_range: the range matching the GPIO pin if this is a request for a
  *     single GPIO pin
  */
 static int pin_request(struct pinctrl_dev *pctldev,
-                      int pin, const char *function, bool gpio,
+                      int pin, const char *function,
                       struct pinctrl_gpio_range *gpio_range)
 {
        struct pin_desc *desc;
        const struct pinmux_ops *ops = pctldev->desc->pmxops;
        int status = -EINVAL;
 
-       dev_dbg(&pctldev->dev, "request pin %d for %s\n", pin, function);
-
-       if (!pin_is_valid(pctldev, pin)) {
-               dev_err(&pctldev->dev, "pin is invalid\n");
-               return -EINVAL;
-       }
-
-       if (!function) {
-               dev_err(&pctldev->dev, "no function name given\n");
-               return -EINVAL;
-       }
+       dev_dbg(pctldev->dev, "request pin %d for %s\n", pin, function);
 
        desc = pin_desc_get(pctldev, pin);
        if (desc == NULL) {
-               dev_err(&pctldev->dev,
+               dev_err(pctldev->dev,
                        "pin is not registered so it cannot be requested\n");
                goto out;
        }
 
+       if (!function) {
+               dev_err(pctldev->dev, "no function name given\n");
+               return -EINVAL;
+       }
+
        spin_lock(&desc->lock);
        if (desc->mux_function) {
                spin_unlock(&desc->lock);
-               dev_err(&pctldev->dev,
+               dev_err(pctldev->dev,
                        "pin already requested\n");
                goto out;
        }
@@ -141,7 +132,7 @@ static int pin_request(struct pinctrl_dev *pctldev,
 
        /* Let each pin increase references to this module */
        if (!try_module_get(pctldev->owner)) {
-               dev_err(&pctldev->dev,
+               dev_err(pctldev->dev,
                        "could not increase module refcount for pin %d\n",
                        pin);
                status = -EINVAL;
@@ -152,7 +143,7 @@ static int pin_request(struct pinctrl_dev *pctldev,
         * If there is no kind of request function for the pin we just assume
         * we got it by default and proceed.
         */
-       if (gpio && ops->gpio_request_enable)
+       if (gpio_range && ops->gpio_request_enable)
                /* This requests and enables a single GPIO pin */
                status = ops->gpio_request_enable(pctldev, gpio_range, pin);
        else if (ops->request)
@@ -161,7 +152,7 @@ static int pin_request(struct pinctrl_dev *pctldev,
                status = 0;
 
        if (status)
-               dev_err(&pctldev->dev, "->request on device %s failed "
+               dev_err(pctldev->dev, "->request on device %s failed "
                       "for pin %d\n",
                       pctldev->desc->name, pin);
 out_free_pin:
@@ -172,7 +163,7 @@ out_free_pin:
        }
 out:
        if (status)
-               dev_err(&pctldev->dev, "pin-%d (%s) status %d\n",
+               dev_err(pctldev->dev, "pin-%d (%s) status %d\n",
                       pin, function ? : "?", status);
 
        return status;
@@ -182,34 +173,52 @@ out:
  * pin_free() - release a single muxed in pin so something else can be muxed
  * @pctldev: pin controller device handling this pin
  * @pin: the pin to free
- * @free_func: whether to free the pin's assigned function name string
+ * @gpio_range: the range matching the GPIO pin if this is a request for a
+ *     single GPIO pin
+ *
+ * This function returns a pointer to the function name in use. This is used
+ * for callers that dynamically allocate a function name so it can be freed
+ * once the pin is free. This is done for GPIO request functions.
  */
-static void pin_free(struct pinctrl_dev *pctldev, int pin, int free_func)
+static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
+                           struct pinctrl_gpio_range *gpio_range)
 {
        const struct pinmux_ops *ops = pctldev->desc->pmxops;
        struct pin_desc *desc;
+       const char *func;
 
        desc = pin_desc_get(pctldev, pin);
        if (desc == NULL) {
-               dev_err(&pctldev->dev,
+               dev_err(pctldev->dev,
                        "pin is not registered so it cannot be freed\n");
-               return;
+               return NULL;
        }
 
-       if (ops->free)
+       /*
+        * If there is no kind of request function for the pin we just assume
+        * we got it by default and proceed.
+        */
+       if (gpio_range && ops->gpio_disable_free)
+               ops->gpio_disable_free(pctldev, gpio_range, pin);
+       else if (ops->free)
                ops->free(pctldev, pin);
 
        spin_lock(&desc->lock);
-       if (free_func)
-               kfree(desc->mux_function);
+       func = desc->mux_function;
        desc->mux_function = NULL;
        spin_unlock(&desc->lock);
        module_put(pctldev->owner);
+
+       return func;
 }
 
 /**
  * pinmux_request_gpio() - request a single pin to be muxed in as GPIO
  * @gpio: the GPIO pin number from the GPIO subsystem number space
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers,
+ * as part of their gpio_request() semantics, platforms and individual drivers
+ * shall *NOT* request GPIO pins to be muxed in.
  */
 int pinmux_request_gpio(unsigned gpio)
 {
@@ -225,7 +234,7 @@ int pinmux_request_gpio(unsigned gpio)
                return -EINVAL;
 
        /* Convert to the pin controllers number space */
-       pin = gpio - range->base;
+       pin = gpio - range->base + range->pin_base;
 
        /* Conjure some name stating what chip and pin this is taken by */
        snprintf(gpiostr, 15, "%s:%d", range->name, gpio);
@@ -234,7 +243,7 @@ int pinmux_request_gpio(unsigned gpio)
        if (!function)
                return -EINVAL;
 
-       ret = pin_request(pctldev, pin, function, true, range);
+       ret = pin_request(pctldev, pin, function, range);
        if (ret < 0)
                kfree(function);
 
@@ -245,6 +254,10 @@ EXPORT_SYMBOL_GPL(pinmux_request_gpio);
 /**
  * pinmux_free_gpio() - free a single pin, currently used as GPIO
  * @gpio: the GPIO pin number from the GPIO subsystem number space
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers,
+ * as part of their gpio_free() semantics, platforms and individual drivers
+ * shall *NOT* request GPIO pins to be muxed out.
  */
 void pinmux_free_gpio(unsigned gpio)
 {
@@ -252,53 +265,108 @@ void pinmux_free_gpio(unsigned gpio)
        struct pinctrl_gpio_range *range;
        int ret;
        int pin;
+       const char *func;
 
        ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
        if (ret)
                return;
 
        /* Convert to the pin controllers number space */
-       pin = gpio - range->base;
+       pin = gpio - range->base + range->pin_base;
 
-       pin_free(pctldev, pin, true);
+       func = pin_free(pctldev, pin, range);
+       kfree(func);
 }
 EXPORT_SYMBOL_GPL(pinmux_free_gpio);
 
+static int pinmux_gpio_direction(unsigned gpio, bool input)
+{
+       struct pinctrl_dev *pctldev;
+       struct pinctrl_gpio_range *range;
+       const struct pinmux_ops *ops;
+       int ret;
+       int pin;
+
+       ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
+       if (ret)
+               return ret;
+
+       ops = pctldev->desc->pmxops;
+
+       /* Convert to the pin controllers number space */
+       pin = gpio - range->base + range->pin_base;
+
+       if (ops->gpio_set_direction)
+               ret = ops->gpio_set_direction(pctldev, range, pin, input);
+       else
+               ret = 0;
+
+       return ret;
+}
+
+/**
+ * pinmux_gpio_direction_input() - request a GPIO pin to go into input mode
+ * @gpio: the GPIO pin number from the GPIO subsystem number space
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers,
+ * as part of their gpio_direction_input() semantics, platforms and individual
+ * drivers shall *NOT* touch pinmux GPIO calls.
+ */
+int pinmux_gpio_direction_input(unsigned gpio)
+{
+       return pinmux_gpio_direction(gpio, true);
+}
+EXPORT_SYMBOL_GPL(pinmux_gpio_direction_input);
+
+/**
+ * pinmux_gpio_direction_output() - request a GPIO pin to go into output mode
+ * @gpio: the GPIO pin number from the GPIO subsystem number space
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers,
+ * as part of their gpio_direction_output() semantics, platforms and individual
+ * drivers shall *NOT* touch pinmux GPIO calls.
+ */
+int pinmux_gpio_direction_output(unsigned gpio)
+{
+       return pinmux_gpio_direction(gpio, false);
+}
+EXPORT_SYMBOL_GPL(pinmux_gpio_direction_output);
+
 /**
  * pinmux_register_mappings() - register a set of pinmux mappings
- * @maps: the pinmux mappings table to register
+ * @maps: the pinmux mappings table to register, this should be marked with
+ *     __initdata so it can be discarded after boot, this function will
+ *     perform a shallow copy for the mapping entries.
  * @num_maps: the number of maps in the mapping table
  *
  * Only call this once during initialization of your machine, the function is
  * tagged as __init and won't be callable after init has completed. The map
  * passed into this function will be owned by the pinmux core and cannot be
- * free:d.
+ * freed.
  */
 int __init pinmux_register_mappings(struct pinmux_map const *maps,
                                    unsigned num_maps)
 {
+       void *tmp_maps;
        int i;
 
-       if (pinmux_maps != NULL) {
-               pr_err("pinmux mappings already registered, you can only "
-                      "register one set of maps\n");
-               return -EINVAL;
-       }
-
        pr_debug("add %d pinmux maps\n", num_maps);
+
+       /* First sanity check the new mapping */
        for (i = 0; i < num_maps; i++) {
-               /* Sanity check the mapping */
                if (!maps[i].name) {
                        pr_err("failed to register map %d: "
                               "no map name given\n", i);
                        return -EINVAL;
                }
+
                if (!maps[i].ctrl_dev && !maps[i].ctrl_dev_name) {
                        pr_err("failed to register map %s (%d): "
                               "no pin control device given\n",
                               maps[i].name, i);
                        return -EINVAL;
                }
+
                if (!maps[i].function) {
                        pr_err("failed to register map %s (%d): "
                               "no function ID given\n", maps[i].name, i);
@@ -315,9 +383,30 @@ int __init pinmux_register_mappings(struct pinmux_map const *maps,
                                 maps[i].function);
        }
 
-       pinmux_maps = maps;
-       pinmux_maps_num = num_maps;
+       /*
+        * Make a copy of the map array - string pointers will end up in the
+        * kernel const section anyway so these do not need to be deep copied.
+        */
+       if (!pinmux_maps_num) {
+               /* On first call, just copy them */
+               tmp_maps = kmemdup(maps,
+                                  sizeof(struct pinmux_map) * num_maps,
+                                  GFP_KERNEL);
+               if (!tmp_maps)
+                       return -ENOMEM;
+       } else {
+               /* Subsequent calls, reallocate array to new size */
+               size_t oldsize = sizeof(struct pinmux_map) * pinmux_maps_num;
+               size_t newsize = sizeof(struct pinmux_map) * num_maps;
+
+               tmp_maps = krealloc(pinmux_maps, oldsize + newsize, GFP_KERNEL);
+               if (!tmp_maps)
+                       return -ENOMEM;
+               memcpy((tmp_maps + oldsize), maps, newsize);
+       }
 
+       pinmux_maps = tmp_maps;
+       pinmux_maps_num += num_maps;
        return 0;
 }
 
@@ -345,14 +434,14 @@ static int acquire_pins(struct pinctrl_dev *pctldev,
        if (ret)
                return ret;
 
-       dev_dbg(&pctldev->dev, "requesting the %u pins from group %u\n",
+       dev_dbg(pctldev->dev, "requesting the %u pins from group %u\n",
                num_pins, group_selector);
 
        /* Try to allocate all pins in this group, one by one */
        for (i = 0; i < num_pins; i++) {
-               ret = pin_request(pctldev, pins[i], func, false, NULL);
+               ret = pin_request(pctldev, pins[i], func, NULL);
                if (ret) {
-                       dev_err(&pctldev->dev,
+                       dev_err(pctldev->dev,
                                "could not get pin %d for function %s "
                                "on device %s - conflicting mux mappings?\n",
                                pins[i], func ? : "(undefined)",
@@ -360,7 +449,7 @@ static int acquire_pins(struct pinctrl_dev *pctldev,
                        /* On error release all taken pins */
                        i--; /* this pin just failed */
                        for (; i >= 0; i--)
-                               pin_free(pctldev, pins[i], false);
+                               pin_free(pctldev, pins[i], NULL);
                        return -ENODEV;
                }
        }
@@ -384,44 +473,13 @@ static void release_pins(struct pinctrl_dev *pctldev,
        ret = pctlops->get_group_pins(pctldev, group_selector,
                                      &pins, &num_pins);
        if (ret) {
-               dev_err(&pctldev->dev, "could not get pins to release for "
+               dev_err(pctldev->dev, "could not get pins to release for "
                        "group selector %d\n",
                        group_selector);
                return;
        }
        for (i = 0; i < num_pins; i++)
-               pin_free(pctldev, pins[i], false);
-}
-
-/**
- * pinmux_get_group_selector() - returns the group selector for a group
- * @pctldev: the pin controller handling the group
- * @pin_group: the pin group to look up
- */
-static int pinmux_get_group_selector(struct pinctrl_dev *pctldev,
-                                    const char *pin_group)
-{
-       const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
-       unsigned group_selector = 0;
-
-       while (pctlops->list_groups(pctldev, group_selector) >= 0) {
-               const char *gname = pctlops->get_group_name(pctldev,
-                                                           group_selector);
-               if (!strcmp(gname, pin_group)) {
-                       dev_dbg(&pctldev->dev,
-                               "found group selector %u for %s\n",
-                               group_selector,
-                               pin_group);
-                       return group_selector;
-               }
-
-               group_selector++;
-       }
-
-       dev_err(&pctldev->dev, "does not have pin group %s\n",
-               pin_group);
-
-       return -EINVAL;
+               pin_free(pctldev, pins[i], NULL);
 }
 
 /**
@@ -465,9 +523,9 @@ static int pinmux_check_pin_group(struct pinctrl_dev *pctldev,
                        return ret;
                if (num_groups < 1)
                        return -EINVAL;
-               ret = pinmux_get_group_selector(pctldev, groups[0]);
+               ret = pinctrl_get_group_selector(pctldev, groups[0]);
                if (ret < 0) {
-                       dev_err(&pctldev->dev,
+                       dev_err(pctldev->dev,
                                "function %s wants group %s but the pin "
                                "controller does not seem to have that group\n",
                                pmxops->get_function_name(pctldev, func_selector),
@@ -476,7 +534,7 @@ static int pinmux_check_pin_group(struct pinctrl_dev *pctldev,
                }
 
                if (num_groups > 1)
-                       dev_dbg(&pctldev->dev,
+                       dev_dbg(pctldev->dev,
                                "function %s support more than one group, "
                                "default-selecting first group %s (%d)\n",
                                pmxops->get_function_name(pctldev, func_selector),
@@ -486,13 +544,13 @@ static int pinmux_check_pin_group(struct pinctrl_dev *pctldev,
                return ret;
        }
 
-       dev_dbg(&pctldev->dev,
+       dev_dbg(pctldev->dev,
                "check if we have pin group %s on controller %s\n",
                pin_group, pinctrl_dev_get_name(pctldev));
 
-       ret = pinmux_get_group_selector(pctldev, pin_group);
+       ret = pinctrl_get_group_selector(pctldev, pin_group);
        if (ret < 0) {
-               dev_dbg(&pctldev->dev,
+               dev_dbg(pctldev->dev,
                        "%s does not support pin group %s with function %s\n",
                        pinctrl_dev_get_name(pctldev),
                        pin_group,
@@ -569,7 +627,7 @@ static int pinmux_enable_muxmap(struct pinctrl_dev *pctldev,
         */
 
        if (pmx->pctldev && pmx->pctldev != pctldev) {
-               dev_err(&pctldev->dev,
+               dev_err(pctldev->dev,
                        "different pin control devices given for device %s, "
                        "function %s\n",
                        devname,
@@ -592,7 +650,7 @@ static int pinmux_enable_muxmap(struct pinctrl_dev *pctldev,
         */
        if (pmx->func_selector != UINT_MAX &&
            pmx->func_selector != func_selector) {
-               dev_err(&pctldev->dev,
+               dev_err(pctldev->dev,
                        "dual function defines in the map for device %s\n",
                       devname);
                return -EINVAL;
@@ -698,7 +756,7 @@ struct pinmux *pinmux_get(struct device *dev, const char *name)
                }
 
                pr_debug("in map, found pctldev %s to handle function %s",
-                        dev_name(&pctldev->dev), map->function);
+                        dev_name(pctldev->dev), map->function);
 
 
                /*
@@ -874,7 +932,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
                 * without any problems, so then we can hog pinmuxes for
                 * all devices that just want a static pin mux at this point.
                 */
-               dev_err(&pctldev->dev, "map %s wants to hog a non-system "
+               dev_err(pctldev->dev, "map %s wants to hog a non-system "
                        "pinmux, this is not going to work\n", map->name);
                return -EINVAL;
        }
@@ -886,7 +944,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
        pmx = pinmux_get(NULL, map->name);
        if (IS_ERR(pmx)) {
                kfree(hog);
-               dev_err(&pctldev->dev,
+               dev_err(pctldev->dev,
                        "could not get the %s pinmux mapping for hogging\n",
                        map->name);
                return PTR_ERR(pmx);
@@ -896,7 +954,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
        if (ret) {
                pinmux_put(pmx);
                kfree(hog);
-               dev_err(&pctldev->dev,
+               dev_err(pctldev->dev,
                        "could not enable the %s pinmux mapping for hogging\n",
                        map->name);
                return ret;
@@ -905,7 +963,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
        hog->map = map;
        hog->pmx = pmx;
 
-       dev_info(&pctldev->dev, "hogged map %s, function %s\n", map->name,
+       dev_info(pctldev->dev, "hogged map %s, function %s\n", map->name,
                 map->function);
        mutex_lock(&pctldev->pinmux_hogs_lock);
        list_add(&hog->node, &pctldev->pinmux_hogs);
@@ -924,7 +982,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
  */
 int pinmux_hog_maps(struct pinctrl_dev *pctldev)
 {
-       struct device *dev = &pctldev->dev;
+       struct device *dev = pctldev->dev;
        const char *devname = dev_name(dev);
        int ret;
        int i;
@@ -948,7 +1006,7 @@ int pinmux_hog_maps(struct pinctrl_dev *pctldev)
 }
 
 /**
- * pinmux_hog_maps() - unhog specific map entries on controller device
+ * pinmux_unhog_maps() - unhog specific map entries on controller device
  * @pctldev: the pin control device to unhog entries on
  */
 void pinmux_unhog_maps(struct pinctrl_dev *pctldev)
@@ -1005,18 +1063,19 @@ static int pinmux_functions_show(struct seq_file *s, void *what)
 static int pinmux_pins_show(struct seq_file *s, void *what)
 {
        struct pinctrl_dev *pctldev = s->private;
-       unsigned pin;
+       unsigned i, pin;
 
        seq_puts(s, "Pinmux settings per pin\n");
        seq_puts(s, "Format: pin (name): pinmuxfunction\n");
 
-       /* The highest pin number need to be included in the loop, thus <= */
-       for (pin = 0; pin <= pctldev->desc->maxpin; pin++) {
+       /* The pin number can be retrived from the pin controller descriptor */
+       for (i = 0; i < pctldev->desc->npins; i++) {
 
                struct pin_desc *desc;
 
+               pin = pctldev->desc->pins[i].number;
                desc = pin_desc_get(pctldev, pin);
-               /* Pin space may be sparse */
+               /* Skip if we cannot search the pin */
                if (desc == NULL)
                        continue;
 
index ca0d608f8248c4b29dc599f2c7048f9f45d8c21a..df33530cec4a458d0cf51be33ce60d81943116c3 100644 (file)
@@ -427,7 +427,7 @@ static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
 
        /* replace driver_data with info */
        info->regulator = regulator_register(&info->desc, &pdev->dev,
-                                            pdata, info);
+                                            pdata, info, NULL);
        if (IS_ERR(info->regulator)) {
                dev_err(&pdev->dev, "failed to register regulator %s\n",
                        info->desc.name);
index 9713b1b860cb70a88f9fc51805f087a2919f0ef2..7a61b17ddd04700325ea69b1b801d13408a95aa6 100644 (file)
@@ -93,6 +93,7 @@ config REGULATOR_MAX1586
 config REGULATOR_MAX8649
        tristate "Maxim 8649 voltage regulator"
        depends on I2C
+       select REGMAP_I2C
        help
          This driver controls a Maxim 8649 voltage output regulator via
          I2C bus.
@@ -177,6 +178,13 @@ config REGULATOR_DA903X
          Say y here to support the BUCKs and LDOs regulators found on
          Dialog Semiconductor DA9030/DA9034 PMIC.
 
+config REGULATOR_DA9052
+       tristate "Dialog DA9052/DA9053 regulators"
+       depends on PMIC_DA9052
+       help
+         This driver supports the voltage regulators of DA9052-BC and
+         DA9053-AA/Bx PMIC.
+
 config REGULATOR_PCF50633
        tristate "PCF50633 regulator driver"
         depends on MFD_PCF50633
index 93a6318f5328eabaa73af83c877aee2a3edac876..503bac87715ede60b0d0f5ae8124df6059c824fc 100644 (file)
@@ -4,6 +4,7 @@
 
 
 obj-$(CONFIG_REGULATOR) += core.o dummy.o
+obj-$(CONFIG_OF) += of_regulator.o
 obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
 obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
 obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
@@ -29,6 +30,7 @@ obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
 obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
 obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
 obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
+obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
 obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
 obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
 obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
index 298c6c6a279574561eb24f329b115fe134d274a1..685ad43b074973f40e579fe4896ad5e0e595c041 100644 (file)
@@ -63,7 +63,7 @@ static int aat2870_ldo_set_voltage_sel(struct regulator_dev *rdev,
        struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
 
        return aat2870->update(aat2870, ri->voltage_addr, ri->voltage_mask,
-                       (selector << ri->voltage_shift) & ri->voltage_mask);
+                              selector << ri->voltage_shift);
 }
 
 static int aat2870_ldo_get_voltage_sel(struct regulator_dev *rdev)
@@ -188,7 +188,7 @@ static int aat2870_regulator_probe(struct platform_device *pdev)
        ri->pdev = pdev;
 
        rdev = regulator_register(&ri->desc, &pdev->dev,
-                                 pdev->dev.platform_data, ri);
+                                 pdev->dev.platform_data, ri, NULL);
        if (IS_ERR(rdev)) {
                dev_err(&pdev->dev, "Failed to register regulator %s\n",
                        ri->desc.name);
index 585e4946fe0a7157b6ccaf6ae59465f788505a38..042271aace6a65b8a2f40cdc3be75934ae081ff4 100644 (file)
@@ -634,7 +634,7 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
                rdev = regulator_register(&ab3100_regulator_desc[i],
                                          &pdev->dev,
                                          &plfdata->reg_constraints[i],
-                                         reg);
+                                         reg, NULL);
 
                if (IS_ERR(rdev)) {
                        err = PTR_ERR(rdev);
index 6e1ae69646b396778660a8d3cf665f40e0d8e07e..e91b8ddc2793120b9e4e3697aa6912ab4358d2e2 100644 (file)
@@ -822,7 +822,7 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
 
                /* register regulator with framework */
                info->regulator = regulator_register(&info->desc, &pdev->dev,
-                               &pdata->regulator[i], info);
+                               &pdata->regulator[i], info, NULL);
                if (IS_ERR(info->regulator)) {
                        err = PTR_ERR(info->regulator);
                        dev_err(&pdev->dev, "failed to register regulator %s\n",
index a4be41614eebd41fa5733c09a7e9645c64c72c59..483c80930852f08de0a4130968afbce5c87f9a39 100644 (file)
@@ -233,7 +233,7 @@ static int __devinit ad5398_probe(struct i2c_client *client,
        chip->current_mask = (chip->current_level - 1) << chip->current_offset;
 
        chip->rdev = regulator_register(&ad5398_reg, &client->dev,
-                                       init_data, chip);
+                                       init_data, chip, NULL);
        if (IS_ERR(chip->rdev)) {
                ret = PTR_ERR(chip->rdev);
                dev_err(&client->dev, "failed to register %s %s\n",
index e24d1b7d97a84c4f5ded6aaf672ba3f317be838c..9fab6d1bbe80d28a1f388e8bc2bdb14589c8702e 100644 (file)
@@ -107,7 +107,7 @@ static int __init bq24022_probe(struct platform_device *pdev)
        ret = gpio_direction_output(pdata->gpio_nce, 1);
 
        bq24022 = regulator_register(&bq24022_desc, &pdev->dev,
-                                    pdata->init_data, pdata);
+                                    pdata->init_data, pdata, NULL);
        if (IS_ERR(bq24022)) {
                dev_dbg(&pdev->dev, "couldn't register regulator\n");
                ret = PTR_ERR(bq24022);
index 938398f3e869c57814fc130b9133ac435118214e..ca86f39a0fdc824fba463f0eb30157ce933bbe9c 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/mutex.h>
 #include <linux/suspend.h>
 #include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/regulator/of_regulator.h>
 #include <linux/regulator/consumer.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
@@ -132,6 +134,33 @@ static struct regulator *get_device_regulator(struct device *dev)
        return NULL;
 }
 
+/**
+ * of_get_regulator - get a regulator device node based on supply name
+ * @dev: Device pointer for the consumer (of regulator) device
+ * @supply: regulator supply name
+ *
+ * Extract the regulator device node corresponding to the supply name.
+ * retruns the device node corresponding to the regulator if found, else
+ * returns NULL.
+ */
+static struct device_node *of_get_regulator(struct device *dev, const char *supply)
+{
+       struct device_node *regnode = NULL;
+       char prop_name[32]; /* 32 is max size of property name */
+
+       dev_dbg(dev, "Looking up %s-supply from device tree\n", supply);
+
+       snprintf(prop_name, 32, "%s-supply", supply);
+       regnode = of_parse_phandle(dev->of_node, prop_name, 0);
+
+       if (!regnode) {
+               dev_warn(dev, "%s property in node %s references invalid phandle",
+                               prop_name, dev->of_node->full_name);
+               return NULL;
+       }
+       return regnode;
+}
+
 /* Platform voltage constraint check */
 static int regulator_check_voltage(struct regulator_dev *rdev,
                                   int *min_uV, int *max_uV)
@@ -883,8 +912,12 @@ static int set_machine_constraints(struct regulator_dev *rdev,
        int ret = 0;
        struct regulator_ops *ops = rdev->desc->ops;
 
-       rdev->constraints = kmemdup(constraints, sizeof(*constraints),
-                                   GFP_KERNEL);
+       if (constraints)
+               rdev->constraints = kmemdup(constraints, sizeof(*constraints),
+                                           GFP_KERNEL);
+       else
+               rdev->constraints = kzalloc(sizeof(*constraints),
+                                           GFP_KERNEL);
        if (!rdev->constraints)
                return -ENOMEM;
 
@@ -893,7 +926,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
                goto out;
 
        /* do we need to setup our suspend state */
-       if (constraints->initial_state) {
+       if (rdev->constraints->initial_state) {
                ret = suspend_prepare(rdev, rdev->constraints->initial_state);
                if (ret < 0) {
                        rdev_err(rdev, "failed to set suspend state\n");
@@ -901,7 +934,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
                }
        }
 
-       if (constraints->initial_mode) {
+       if (rdev->constraints->initial_mode) {
                if (!ops->set_mode) {
                        rdev_err(rdev, "no set_mode operation\n");
                        ret = -EINVAL;
@@ -952,9 +985,8 @@ static int set_supply(struct regulator_dev *rdev,
        rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
 
        rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
-       if (IS_ERR(rdev->supply)) {
-               err = PTR_ERR(rdev->supply);
-               rdev->supply = NULL;
+       if (rdev->supply == NULL) {
+               err = -ENOMEM;
                return err;
        }
 
@@ -1148,6 +1180,30 @@ static int _regulator_get_enable_time(struct regulator_dev *rdev)
        return rdev->desc->ops->enable_time(rdev);
 }
 
+static struct regulator_dev *regulator_dev_lookup(struct device *dev,
+                                                        const char *supply)
+{
+       struct regulator_dev *r;
+       struct device_node *node;
+
+       /* first do a dt based lookup */
+       if (dev && dev->of_node) {
+               node = of_get_regulator(dev, supply);
+               if (node)
+                       list_for_each_entry(r, &regulator_list, list)
+                               if (r->dev.parent &&
+                                       node == r->dev.of_node)
+                                       return r;
+       }
+
+       /* if not found, try doing it non-dt way */
+       list_for_each_entry(r, &regulator_list, list)
+               if (strcmp(rdev_get_name(r), supply) == 0)
+                       return r;
+
+       return NULL;
+}
+
 /* Internal regulator request function */
 static struct regulator *_regulator_get(struct device *dev, const char *id,
                                        int exclusive)
@@ -1168,6 +1224,10 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
 
        mutex_lock(&regulator_list_mutex);
 
+       rdev = regulator_dev_lookup(dev, id);
+       if (rdev)
+               goto found;
+
        list_for_each_entry(map, &regulator_map_list, list) {
                /* If the mapping has a device set up it must match */
                if (map->dev_name &&
@@ -1221,6 +1281,7 @@ found:
        if (regulator == NULL) {
                regulator = ERR_PTR(-ENOMEM);
                module_put(rdev->owner);
+               goto out;
        }
 
        rdev->open_count++;
@@ -1726,6 +1787,7 @@ int regulator_is_supported_voltage(struct regulator *regulator,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(regulator_is_supported_voltage);
 
 static int _regulator_do_set_voltage(struct regulator_dev *rdev,
                                     int min_uV, int max_uV)
@@ -2428,6 +2490,43 @@ err:
 }
 EXPORT_SYMBOL_GPL(regulator_bulk_disable);
 
+/**
+ * regulator_bulk_force_disable - force disable multiple regulator consumers
+ *
+ * @num_consumers: Number of consumers
+ * @consumers:     Consumer data; clients are stored here.
+ * @return         0 on success, an errno on failure
+ *
+ * This convenience API allows consumers to forcibly disable multiple regulator
+ * clients in a single API call.
+ * NOTE: This should be used for situations when device damage will
+ * likely occur if the regulators are not disabled (e.g. over temp).
+ * Although regulator_force_disable function call for some consumers can
+ * return error numbers, the function is called for all consumers.
+ */
+int regulator_bulk_force_disable(int num_consumers,
+                          struct regulator_bulk_data *consumers)
+{
+       int i;
+       int ret;
+
+       for (i = 0; i < num_consumers; i++)
+               consumers[i].ret =
+                           regulator_force_disable(consumers[i].consumer);
+
+       for (i = 0; i < num_consumers; i++) {
+               if (consumers[i].ret != 0) {
+                       ret = consumers[i].ret;
+                       goto out;
+               }
+       }
+
+       return 0;
+out:
+       return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_bulk_force_disable);
+
 /**
  * regulator_bulk_free - free multiple regulator consumers
  *
@@ -2503,7 +2602,8 @@ static int add_regulator_attributes(struct regulator_dev *rdev)
        int                     status = 0;
 
        /* some attributes need specific methods to be displayed */
-       if (ops->get_voltage || ops->get_voltage_sel) {
+       if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
+           (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0)) {
                status = device_create_file(dev, &dev_attr_microvolts);
                if (status < 0)
                        return status;
@@ -2637,11 +2737,13 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
  */
 struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
        struct device *dev, const struct regulator_init_data *init_data,
-       void *driver_data)
+       void *driver_data, struct device_node *of_node)
 {
+       const struct regulation_constraints *constraints = NULL;
        static atomic_t regulator_no = ATOMIC_INIT(0);
        struct regulator_dev *rdev;
        int ret, i;
+       const char *supply = NULL;
 
        if (regulator_desc == NULL)
                return ERR_PTR(-EINVAL);
@@ -2653,9 +2755,6 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
            regulator_desc->type != REGULATOR_CURRENT)
                return ERR_PTR(-EINVAL);
 
-       if (!init_data)
-               return ERR_PTR(-EINVAL);
-
        /* Only one of each should be implemented */
        WARN_ON(regulator_desc->ops->get_voltage &&
                regulator_desc->ops->get_voltage_sel);
@@ -2688,7 +2787,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
        INIT_DELAYED_WORK(&rdev->disable_work, regulator_disable_work);
 
        /* preform any regulator specific init */
-       if (init_data->regulator_init) {
+       if (init_data && init_data->regulator_init) {
                ret = init_data->regulator_init(rdev->reg_data);
                if (ret < 0)
                        goto clean;
@@ -2696,6 +2795,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
 
        /* register with sysfs */
        rdev->dev.class = &regulator_class;
+       rdev->dev.of_node = of_node;
        rdev->dev.parent = dev;
        dev_set_name(&rdev->dev, "regulator.%d",
                     atomic_inc_return(&regulator_no) - 1);
@@ -2708,7 +2808,10 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
        dev_set_drvdata(&rdev->dev, rdev);
 
        /* set regulator constraints */
-       ret = set_machine_constraints(rdev, &init_data->constraints);
+       if (init_data)
+               constraints = &init_data->constraints;
+
+       ret = set_machine_constraints(rdev, constraints);
        if (ret < 0)
                goto scrub;
 
@@ -2717,21 +2820,18 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
        if (ret < 0)
                goto scrub;
 
-       if (init_data->supply_regulator) {
+       if (init_data && init_data->supply_regulator)
+               supply = init_data->supply_regulator;
+       else if (regulator_desc->supply_name)
+               supply = regulator_desc->supply_name;
+
+       if (supply) {
                struct regulator_dev *r;
-               int found = 0;
 
-               list_for_each_entry(r, &regulator_list, list) {
-                       if (strcmp(rdev_get_name(r),
-                                  init_data->supply_regulator) == 0) {
-                               found = 1;
-                               break;
-                       }
-               }
+               r = regulator_dev_lookup(dev, supply);
 
-               if (!found) {
-                       dev_err(dev, "Failed to find supply %s\n",
-                               init_data->supply_regulator);
+               if (!r) {
+                       dev_err(dev, "Failed to find supply %s\n", supply);
                        ret = -ENODEV;
                        goto scrub;
                }
@@ -2739,18 +2839,28 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
                ret = set_supply(rdev, r);
                if (ret < 0)
                        goto scrub;
+
+               /* Enable supply if rail is enabled */
+               if (rdev->desc->ops->is_enabled &&
+                               rdev->desc->ops->is_enabled(rdev)) {
+                       ret = regulator_enable(rdev->supply);
+                       if (ret < 0)
+                               goto scrub;
+               }
        }
 
        /* add consumers devices */
-       for (i = 0; i < init_data->num_consumer_supplies; i++) {
-               ret = set_consumer_device_supply(rdev,
-                       init_data->consumer_supplies[i].dev,
-                       init_data->consumer_supplies[i].dev_name,
-                       init_data->consumer_supplies[i].supply);
-               if (ret < 0) {
-                       dev_err(dev, "Failed to set supply %s\n",
+       if (init_data) {
+               for (i = 0; i < init_data->num_consumer_supplies; i++) {
+                       ret = set_consumer_device_supply(rdev,
+                               init_data->consumer_supplies[i].dev,
+                               init_data->consumer_supplies[i].dev_name,
                                init_data->consumer_supplies[i].supply);
-                       goto unset_supplies;
+                       if (ret < 0) {
+                               dev_err(dev, "Failed to set supply %s\n",
+                                       init_data->consumer_supplies[i].supply);
+                               goto unset_supplies;
+                       }
                }
        }
 
index e23ddfa8b2c6d2f2241e6813e2d6ff660dfeff02..8dbc54da7d7078004ff61eddd1d1f82a71f0f257 100644 (file)
@@ -537,7 +537,7 @@ static int __devinit da903x_regulator_probe(struct platform_device *pdev)
                ri->desc.ops = &da9030_regulator_ldo1_15_ops;
 
        rdev = regulator_register(&ri->desc, &pdev->dev,
-                                 pdev->dev.platform_data, ri);
+                                 pdev->dev.platform_data, ri, NULL);
        if (IS_ERR(rdev)) {
                dev_err(&pdev->dev, "failed to register regulator %s\n",
                                ri->desc.name);
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
new file mode 100644 (file)
index 0000000..3767364
--- /dev/null
@@ -0,0 +1,606 @@
+/*
+* da9052-regulator.c: Regulator driver for DA9052
+*
+* Copyright(c) 2011 Dialog Semiconductor Ltd.
+*
+* Author: David Dajun Chen <dchen@diasemi.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/pdata.h>
+
+/* Buck step size */
+#define DA9052_BUCK_PERI_3uV_STEP              100000
+#define DA9052_BUCK_PERI_REG_MAP_UPTO_3uV      24
+#define DA9052_CONST_3uV                       3000000
+
+#define DA9052_MIN_UA          0
+#define DA9052_MAX_UA          3
+#define DA9052_CURRENT_RANGE   4
+
+/* Bit masks */
+#define DA9052_BUCK_ILIM_MASK_EVEN     0x0c
+#define DA9052_BUCK_ILIM_MASK_ODD      0xc0
+
+static const u32 da9052_current_limits[3][4] = {
+       {700000, 800000, 1000000, 1200000},     /* DA9052-BC BUCKs */
+       {1600000, 2000000, 2400000, 3000000},   /* DA9053-AA/Bx BUCK-CORE */
+       {800000, 1000000, 1200000, 1500000},    /* DA9053-AA/Bx BUCK-PRO,
+                                                * BUCK-MEM and BUCK-PERI
+                                               */
+};
+
+struct da9052_regulator_info {
+       struct regulator_desc reg_desc;
+       int step_uV;
+       int min_uV;
+       int max_uV;
+       unsigned char volt_shift;
+       unsigned char en_bit;
+       unsigned char activate_bit;
+};
+
+struct da9052_regulator {
+       struct da9052 *da9052;
+       struct da9052_regulator_info *info;
+       struct regulator_dev *rdev;
+};
+
+static int verify_range(struct da9052_regulator_info *info,
+                        int min_uV, int max_uV)
+{
+       if (min_uV > info->max_uV || max_uV < info->min_uV)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int da9052_regulator_enable(struct regulator_dev *rdev)
+{
+       struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+       struct da9052_regulator_info *info = regulator->info;
+       int offset = rdev_get_id(rdev);
+
+       return da9052_reg_update(regulator->da9052,
+                                DA9052_BUCKCORE_REG + offset,
+                                1 << info->en_bit, 1 << info->en_bit);
+}
+
+static int da9052_regulator_disable(struct regulator_dev *rdev)
+{
+       struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+       struct da9052_regulator_info *info = regulator->info;
+       int offset = rdev_get_id(rdev);
+
+       return da9052_reg_update(regulator->da9052,
+                                DA9052_BUCKCORE_REG + offset,
+                                1 << info->en_bit, 0);
+}
+
+static int da9052_regulator_is_enabled(struct regulator_dev *rdev)
+{
+       struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+       struct da9052_regulator_info *info = regulator->info;
+       int offset = rdev_get_id(rdev);
+       int ret;
+
+       ret = da9052_reg_read(regulator->da9052, DA9052_BUCKCORE_REG + offset);
+       if (ret < 0)
+               return ret;
+
+       return ret & (1 << info->en_bit);
+}
+
+static int da9052_dcdc_get_current_limit(struct regulator_dev *rdev)
+{
+       struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+       int offset = rdev_get_id(rdev);
+       int ret, row = 2;
+
+       ret = da9052_reg_read(regulator->da9052, DA9052_BUCKA_REG + offset/2);
+       if (ret < 0)
+               return ret;
+
+       /* Determine the even or odd position of the buck current limit
+        * register field
+       */
+       if (offset % 2 == 0)
+               ret = (ret & DA9052_BUCK_ILIM_MASK_EVEN) >> 2;
+       else
+               ret = (ret & DA9052_BUCK_ILIM_MASK_ODD) >> 6;
+
+       /* Select the appropriate current limit range */
+       if (regulator->da9052->chip_id == DA9052)
+               row = 0;
+       else if (offset == 0)
+               row = 1;
+
+       return da9052_current_limits[row][ret];
+}
+
+static int da9052_dcdc_set_current_limit(struct regulator_dev *rdev, int min_uA,
+                                         int max_uA)
+{
+       struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+       int offset = rdev_get_id(rdev);
+       int reg_val = 0;
+       int i, row = 2;
+
+       /* Select the appropriate current limit range */
+       if (regulator->da9052->chip_id == DA9052)
+               row = 0;
+       else if (offset == 0)
+               row = 1;
+
+       if (min_uA > da9052_current_limits[row][DA9052_MAX_UA] ||
+           max_uA < da9052_current_limits[row][DA9052_MIN_UA])
+               return -EINVAL;
+
+       for (i = 0; i < DA9052_CURRENT_RANGE; i++) {
+               if (min_uA <= da9052_current_limits[row][i]) {
+                       reg_val = i;
+                       break;
+               }
+       }
+
+       /* Determine the even or odd position of the buck current limit
+        * register field
+       */
+       if (offset % 2 == 0)
+               return da9052_reg_update(regulator->da9052,
+                                        DA9052_BUCKA_REG + offset/2,
+                                        DA9052_BUCK_ILIM_MASK_EVEN,
+                                        reg_val << 2);
+       else
+               return da9052_reg_update(regulator->da9052,
+                                        DA9052_BUCKA_REG + offset/2,
+                                        DA9052_BUCK_ILIM_MASK_ODD,
+                                        reg_val << 6);
+}
+
+static int da9052_list_buckperi_voltage(struct regulator_dev *rdev,
+                                        unsigned int selector)
+{
+       struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+       struct da9052_regulator_info *info = regulator->info;
+       int volt_uV;
+
+       if ((regulator->da9052->chip_id == DA9052) &&
+           (selector >= DA9052_BUCK_PERI_REG_MAP_UPTO_3uV)) {
+               volt_uV = ((DA9052_BUCK_PERI_REG_MAP_UPTO_3uV * info->step_uV)
+                           + info->min_uV);
+               volt_uV += (selector - DA9052_BUCK_PERI_REG_MAP_UPTO_3uV)
+                           * (DA9052_BUCK_PERI_3uV_STEP);
+       } else
+                       volt_uV = (selector * info->step_uV) + info->min_uV;
+
+       if (volt_uV > info->max_uV)
+               return -EINVAL;
+
+       return volt_uV;
+}
+
+static int da9052_list_voltage(struct regulator_dev *rdev,
+                               unsigned int selector)
+{
+       struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+       struct da9052_regulator_info *info = regulator->info;
+       int volt_uV;
+
+       volt_uV = info->min_uV + info->step_uV * selector;
+
+       if (volt_uV > info->max_uV)
+               return -EINVAL;
+
+       return volt_uV;
+}
+
+static int da9052_regulator_set_voltage_int(struct regulator_dev *rdev,
+                                            int min_uV, int max_uV,
+                                            unsigned int *selector)
+{
+       struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+       struct da9052_regulator_info *info = regulator->info;
+       int offset = rdev_get_id(rdev);
+       int ret;
+
+       ret = verify_range(info, min_uV, max_uV);
+       if (ret < 0)
+               return ret;
+
+       if (min_uV < info->min_uV)
+               min_uV = info->min_uV;
+
+       *selector = (min_uV - info->min_uV) / info->step_uV;
+
+       ret = da9052_list_voltage(rdev, *selector);
+       if (ret < 0)
+               return ret;
+
+       return da9052_reg_update(regulator->da9052,
+                                DA9052_BUCKCORE_REG + offset,
+                                (1 << info->volt_shift) - 1, *selector);
+}
+
+static int da9052_set_ldo_voltage(struct regulator_dev *rdev,
+                                  int min_uV, int max_uV,
+                                  unsigned int *selector)
+{
+       return da9052_regulator_set_voltage_int(rdev, min_uV, max_uV, selector);
+}
+
+static int da9052_set_ldo5_6_voltage(struct regulator_dev *rdev,
+                                     int min_uV, int max_uV,
+                                     unsigned int *selector)
+{
+       struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+       struct da9052_regulator_info *info = regulator->info;
+       int ret;
+
+       ret = da9052_regulator_set_voltage_int(rdev, min_uV, max_uV, selector);
+       if (ret < 0)
+               return ret;
+
+       /* Some LDOs are DVC controlled which requires enabling of
+        * the LDO activate bit to implment the changes on the
+        * LDO output.
+       */
+       return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG, 0,
+                                info->activate_bit);
+}
+
+static int da9052_set_dcdc_voltage(struct regulator_dev *rdev,
+                                   int min_uV, int max_uV,
+                                   unsigned int *selector)
+{
+       struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+       struct da9052_regulator_info *info = regulator->info;
+       int ret;
+
+       ret = da9052_regulator_set_voltage_int(rdev, min_uV, max_uV, selector);
+       if (ret < 0)
+               return ret;
+
+       /* Some DCDCs are DVC controlled which requires enabling of
+        * the DCDC activate bit to implment the changes on the
+        * DCDC output.
+       */
+       return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG, 0,
+                                info->activate_bit);
+}
+
+static int da9052_get_regulator_voltage_sel(struct regulator_dev *rdev)
+{
+       struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+       struct da9052_regulator_info *info = regulator->info;
+       int offset = rdev_get_id(rdev);
+       int ret;
+
+       ret = da9052_reg_read(regulator->da9052, DA9052_BUCKCORE_REG + offset);
+       if (ret < 0)
+               return ret;
+
+       ret &= ((1 << info->volt_shift) - 1);
+
+       return ret;
+}
+
+static int da9052_set_buckperi_voltage(struct regulator_dev *rdev, int min_uV,
+                                       int max_uV, unsigned int *selector)
+{
+       struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+       struct da9052_regulator_info *info = regulator->info;
+       int offset = rdev_get_id(rdev);
+       int ret;
+
+       ret = verify_range(info, min_uV, max_uV);
+       if (ret < 0)
+               return ret;
+
+       if (min_uV < info->min_uV)
+               min_uV = info->min_uV;
+
+       if ((regulator->da9052->chip_id == DA9052) &&
+           (min_uV >= DA9052_CONST_3uV))
+               *selector = DA9052_BUCK_PERI_REG_MAP_UPTO_3uV +
+                           ((min_uV - DA9052_CONST_3uV) /
+                           (DA9052_BUCK_PERI_3uV_STEP));
+       else
+               *selector = (min_uV - info->min_uV) / info->step_uV;
+
+       ret = da9052_list_buckperi_voltage(rdev, *selector);
+       if (ret < 0)
+               return ret;
+
+       return da9052_reg_update(regulator->da9052,
+                                DA9052_BUCKCORE_REG + offset,
+                                (1 << info->volt_shift) - 1, *selector);
+}
+
+static int da9052_get_buckperi_voltage_sel(struct regulator_dev *rdev)
+{
+       struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+       struct da9052_regulator_info *info = regulator->info;
+       int offset = rdev_get_id(rdev);
+       int ret;
+
+       ret = da9052_reg_read(regulator->da9052, DA9052_BUCKCORE_REG + offset);
+       if (ret < 0)
+               return ret;
+
+       ret &= ((1 << info->volt_shift) - 1);
+
+       return ret;
+}
+
+static struct regulator_ops da9052_buckperi_ops = {
+       .list_voltage = da9052_list_buckperi_voltage,
+       .get_voltage_sel = da9052_get_buckperi_voltage_sel,
+       .set_voltage = da9052_set_buckperi_voltage,
+
+       .get_current_limit = da9052_dcdc_get_current_limit,
+       .set_current_limit = da9052_dcdc_set_current_limit,
+
+       .is_enabled = da9052_regulator_is_enabled,
+       .enable = da9052_regulator_enable,
+       .disable = da9052_regulator_disable,
+};
+
+static struct regulator_ops da9052_dcdc_ops = {
+       .set_voltage = da9052_set_dcdc_voltage,
+       .get_current_limit = da9052_dcdc_get_current_limit,
+       .set_current_limit = da9052_dcdc_set_current_limit,
+
+       .list_voltage = da9052_list_voltage,
+       .get_voltage_sel = da9052_get_regulator_voltage_sel,
+       .is_enabled = da9052_regulator_is_enabled,
+       .enable = da9052_regulator_enable,
+       .disable = da9052_regulator_disable,
+};
+
+static struct regulator_ops da9052_ldo5_6_ops = {
+       .set_voltage = da9052_set_ldo5_6_voltage,
+
+       .list_voltage = da9052_list_voltage,
+       .get_voltage_sel = da9052_get_regulator_voltage_sel,
+       .is_enabled = da9052_regulator_is_enabled,
+       .enable = da9052_regulator_enable,
+       .disable = da9052_regulator_disable,
+};
+
+static struct regulator_ops da9052_ldo_ops = {
+       .set_voltage = da9052_set_ldo_voltage,
+
+       .list_voltage = da9052_list_voltage,
+       .get_voltage_sel = da9052_get_regulator_voltage_sel,
+       .is_enabled = da9052_regulator_is_enabled,
+       .enable = da9052_regulator_enable,
+       .disable = da9052_regulator_disable,
+};
+
+#define DA9052_LDO5_6(_id, step, min, max, sbits, ebits, abits) \
+{\
+       .reg_desc = {\
+               .name = "LDO" #_id,\
+               .ops = &da9052_ldo5_6_ops,\
+               .type = REGULATOR_VOLTAGE,\
+               .id = _id,\
+               .owner = THIS_MODULE,\
+       },\
+       .min_uV = (min) * 1000,\
+       .max_uV = (max) * 1000,\
+       .step_uV = (step) * 1000,\
+       .volt_shift = (sbits),\
+       .en_bit = (ebits),\
+       .activate_bit = (abits),\
+}
+
+#define DA9052_LDO(_id, step, min, max, sbits, ebits, abits) \
+{\
+       .reg_desc = {\
+               .name = "LDO" #_id,\
+               .ops = &da9052_ldo_ops,\
+               .type = REGULATOR_VOLTAGE,\
+               .id = _id,\
+               .owner = THIS_MODULE,\
+       },\
+       .min_uV = (min) * 1000,\
+       .max_uV = (max) * 1000,\
+       .step_uV = (step) * 1000,\
+       .volt_shift = (sbits),\
+       .en_bit = (ebits),\
+       .activate_bit = (abits),\
+}
+
+#define DA9052_DCDC(_id, step, min, max, sbits, ebits, abits) \
+{\
+       .reg_desc = {\
+               .name = "BUCK" #_id,\
+               .ops = &da9052_dcdc_ops,\
+               .type = REGULATOR_VOLTAGE,\
+               .id = _id,\
+               .owner = THIS_MODULE,\
+       },\
+       .min_uV = (min) * 1000,\
+       .max_uV = (max) * 1000,\
+       .step_uV = (step) * 1000,\
+       .volt_shift = (sbits),\
+       .en_bit = (ebits),\
+       .activate_bit = (abits),\
+}
+
+#define DA9052_BUCKPERI(_id, step, min, max, sbits, ebits, abits) \
+{\
+       .reg_desc = {\
+               .name = "BUCK" #_id,\
+               .ops = &da9052_buckperi_ops,\
+               .type = REGULATOR_VOLTAGE,\
+               .id = _id,\
+               .owner = THIS_MODULE,\
+       },\
+       .min_uV = (min) * 1000,\
+       .max_uV = (max) * 1000,\
+       .step_uV = (step) * 1000,\
+       .volt_shift = (sbits),\
+       .en_bit = (ebits),\
+       .activate_bit = (abits),\
+}
+
+static struct da9052_regulator_info da9052_regulator_info[] = {
+       /* Buck1 - 4 */
+       DA9052_DCDC(0, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
+       DA9052_DCDC(1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
+       DA9052_DCDC(2, 25, 925, 2500, 6, 6, DA9052_SUPPLY_VBMEMGO),
+       DA9052_BUCKPERI(3, 50, 1800, 3600, 5, 6, 0),
+       /* LD01 - LDO10 */
+       DA9052_LDO(4, 50, 600, 1800, 5, 6, 0),
+       DA9052_LDO5_6(5, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
+       DA9052_LDO5_6(6, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
+       DA9052_LDO(7, 25, 1725, 3300, 6, 6, 0),
+       DA9052_LDO(8, 50, 1200, 3600, 6, 6, 0),
+       DA9052_LDO(9, 50, 1200, 3600, 6, 6, 0),
+       DA9052_LDO(10, 50, 1200, 3600, 6, 6, 0),
+       DA9052_LDO(11, 50, 1200, 3600, 6, 6, 0),
+       DA9052_LDO(12, 50, 1250, 3650, 6, 6, 0),
+       DA9052_LDO(13, 50, 1200, 3600, 6, 6, 0),
+};
+
+static struct da9052_regulator_info da9053_regulator_info[] = {
+       /* Buck1 - 4 */
+       DA9052_DCDC(0, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
+       DA9052_DCDC(1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
+       DA9052_DCDC(2, 25, 925, 2500, 6, 6, DA9052_SUPPLY_VBMEMGO),
+       DA9052_BUCKPERI(3, 25, 925, 2500, 6, 6, 0),
+       /* LD01 - LDO10 */
+       DA9052_LDO(4, 50, 600, 1800, 5, 6, 0),
+       DA9052_LDO5_6(5, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
+       DA9052_LDO5_6(6, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
+       DA9052_LDO(7, 25, 1725, 3300, 6, 6, 0),
+       DA9052_LDO(8, 50, 1200, 3600, 6, 6, 0),
+       DA9052_LDO(9, 50, 1200, 3600, 6, 6, 0),
+       DA9052_LDO(10, 50, 1200, 3600, 6, 6, 0),
+       DA9052_LDO(11, 50, 1200, 3600, 6, 6, 0),
+       DA9052_LDO(12, 50, 1250, 3650, 6, 6, 0),
+       DA9052_LDO(13, 50, 1200, 3600, 6, 6, 0),
+};
+
+static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id,
+                                                                int id)
+{
+       struct da9052_regulator_info *info;
+       int i;
+
+       switch (chip_id) {
+       case DA9052:
+               for (i = 0; i < ARRAY_SIZE(da9052_regulator_info); i++) {
+                       info = &da9052_regulator_info[i];
+                       if (info->reg_desc.id == id)
+                               return info;
+               }
+               break;
+       case DA9053_AA:
+       case DA9053_BA:
+       case DA9053_BB:
+               for (i = 0; i < ARRAY_SIZE(da9053_regulator_info); i++) {
+                       info = &da9053_regulator_info[i];
+                       if (info->reg_desc.id == id)
+                               return info;
+               }
+               break;
+       }
+
+       return NULL;
+}
+
+static int __devinit da9052_regulator_probe(struct platform_device *pdev)
+{
+       struct da9052_regulator *regulator;
+       struct da9052 *da9052;
+       struct da9052_pdata *pdata;
+       int ret;
+
+       regulator = devm_kzalloc(&pdev->dev, sizeof(struct da9052_regulator),
+                                GFP_KERNEL);
+       if (!regulator)
+               return -ENOMEM;
+
+       da9052 = dev_get_drvdata(pdev->dev.parent);
+       pdata = da9052->dev->platform_data;
+       regulator->da9052 = da9052;
+
+       regulator->info = find_regulator_info(regulator->da9052->chip_id,
+                                             pdev->id);
+       if (regulator->info == NULL) {
+               dev_err(&pdev->dev, "invalid regulator ID specified\n");
+               ret = -EINVAL;
+               goto err;
+       }
+       regulator->rdev = regulator_register(&regulator->info->reg_desc,
+                                            &pdev->dev,
+                                            pdata->regulators[pdev->id],
+                                            regulator, NULL);
+       if (IS_ERR(regulator->rdev)) {
+               dev_err(&pdev->dev, "failed to register regulator %s\n",
+                       regulator->info->reg_desc.name);
+               ret = PTR_ERR(regulator->rdev);
+               goto err;
+       }
+
+       platform_set_drvdata(pdev, regulator);
+
+       return 0;
+err:
+       devm_kfree(&pdev->dev, regulator);
+       return ret;
+}
+
+static int __devexit da9052_regulator_remove(struct platform_device *pdev)
+{
+       struct da9052_regulator *regulator = platform_get_drvdata(pdev);
+
+       regulator_unregister(regulator->rdev);
+       devm_kfree(&pdev->dev, regulator);
+
+       return 0;
+}
+
+static struct platform_driver da9052_regulator_driver = {
+       .probe = da9052_regulator_probe,
+       .remove = __devexit_p(da9052_regulator_remove),
+       .driver = {
+               .name = "da9052-regulator",
+               .owner = THIS_MODULE,
+       },
+};
+
+static int __init da9052_regulator_init(void)
+{
+       return platform_driver_register(&da9052_regulator_driver);
+}
+subsys_initcall(da9052_regulator_init);
+
+static void __exit da9052_regulator_exit(void)
+{
+       platform_driver_unregister(&da9052_regulator_driver);
+}
+module_exit(da9052_regulator_exit);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("Power Regulator driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-regulator");
index 78329751af542dc36d80fc58d0c085ab24b99401..515443fcd26b09ecd6616b6f2084069c3b407912 100644 (file)
@@ -486,7 +486,7 @@ static int __devinit db8500_regulator_probe(struct platform_device *pdev)
 
                /* register with the regulator framework */
                info->rdev = regulator_register(&info->desc, &pdev->dev,
-                               init_data, info);
+                               init_data, info, NULL);
                if (IS_ERR(info->rdev)) {
                        err = PTR_ERR(info->rdev);
                        dev_err(&pdev->dev, "failed to register %s: err %i\n",
index b8f520513ce74b47beac3ed234fda7ad17cbb50c..0ee00de4be72867e38febb48ae35d4f8f115bc4f 100644 (file)
@@ -42,7 +42,7 @@ static int __devinit dummy_regulator_probe(struct platform_device *pdev)
        int ret;
 
        dummy_regulator_rdev = regulator_register(&dummy_desc, NULL,
-                                                 &dummy_initdata, NULL);
+                                                 &dummy_initdata, NULL, NULL);
        if (IS_ERR(dummy_regulator_rdev)) {
                ret = PTR_ERR(dummy_regulator_rdev);
                pr_err("Failed to register regulator: %d\n", ret);
index 21ecf212a5227643abcad804c855f699be32f338..e24e3a174c4b343b0dfbf32a52d69dceee60fb10 100644 (file)
 #include <linux/gpio.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
 
 struct fixed_voltage_data {
        struct regulator_desc desc;
@@ -38,6 +42,58 @@ struct fixed_voltage_data {
        bool is_enabled;
 };
 
+
+/**
+ * of_get_fixed_voltage_config - extract fixed_voltage_config structure info
+ * @dev: device requesting for fixed_voltage_config
+ *
+ * Populates fixed_voltage_config structure by extracting data from device
+ * tree node, returns a pointer to the populated structure of NULL if memory
+ * alloc fails.
+ */
+static struct fixed_voltage_config *
+of_get_fixed_voltage_config(struct device *dev)
+{
+       struct fixed_voltage_config *config;
+       struct device_node *np = dev->of_node;
+       const __be32 *delay;
+       struct regulator_init_data *init_data;
+
+       config = devm_kzalloc(dev, sizeof(struct fixed_voltage_config),
+                                                                GFP_KERNEL);
+       if (!config)
+               return NULL;
+
+       config->init_data = of_get_regulator_init_data(dev, dev->of_node);
+       if (!config->init_data)
+               return NULL;
+
+       init_data = config->init_data;
+       init_data->constraints.apply_uV = 0;
+
+       config->supply_name = init_data->constraints.name;
+       if (init_data->constraints.min_uV == init_data->constraints.max_uV) {
+               config->microvolts = init_data->constraints.min_uV;
+       } else {
+               dev_err(dev,
+                        "Fixed regulator specified with variable voltages\n");
+               return NULL;
+       }
+
+       if (init_data->constraints.boot_on)
+               config->enabled_at_boot = true;
+
+       config->gpio = of_get_named_gpio(np, "gpio", 0);
+       delay = of_get_property(np, "startup-delay-us", NULL);
+       if (delay)
+               config->startup_delay = be32_to_cpu(*delay);
+
+       if (of_find_property(np, "enable-active-high", NULL))
+               config->enable_high = true;
+
+       return config;
+}
+
 static int fixed_voltage_is_enabled(struct regulator_dev *dev)
 {
        struct fixed_voltage_data *data = rdev_get_drvdata(dev);
@@ -80,7 +136,10 @@ static int fixed_voltage_get_voltage(struct regulator_dev *dev)
 {
        struct fixed_voltage_data *data = rdev_get_drvdata(dev);
 
-       return data->microvolts;
+       if (data->microvolts)
+               return data->microvolts;
+       else
+               return -EINVAL;
 }
 
 static int fixed_voltage_list_voltage(struct regulator_dev *dev,
@@ -105,10 +164,18 @@ static struct regulator_ops fixed_voltage_ops = {
 
 static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
 {
-       struct fixed_voltage_config *config = pdev->dev.platform_data;
+       struct fixed_voltage_config *config;
        struct fixed_voltage_data *drvdata;
        int ret;
 
+       if (pdev->dev.of_node)
+               config = of_get_fixed_voltage_config(&pdev->dev);
+       else
+               config = pdev->dev.platform_data;
+
+       if (!config)
+               return -ENOMEM;
+
        drvdata = kzalloc(sizeof(struct fixed_voltage_data), GFP_KERNEL);
        if (drvdata == NULL) {
                dev_err(&pdev->dev, "Failed to allocate device data\n");
@@ -180,7 +247,8 @@ static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
        }
 
        drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev,
-                                         config->init_data, drvdata);
+                                         config->init_data, drvdata,
+                                         pdev->dev.of_node);
        if (IS_ERR(drvdata->dev)) {
                ret = PTR_ERR(drvdata->dev);
                dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
@@ -217,12 +285,23 @@ static int __devexit reg_fixed_voltage_remove(struct platform_device *pdev)
        return 0;
 }
 
+#if defined(CONFIG_OF)
+static const struct of_device_id fixed_of_match[] __devinitconst = {
+       { .compatible = "regulator-fixed", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, fixed_of_match);
+#else
+#define fixed_of_match NULL
+#endif
+
 static struct platform_driver regulator_fixed_voltage_driver = {
        .probe          = reg_fixed_voltage_probe,
        .remove         = __devexit_p(reg_fixed_voltage_remove),
        .driver         = {
                .name           = "reg-fixed-voltage",
                .owner          = THIS_MODULE,
+               .of_match_table = fixed_of_match,
        },
 };
 
index f0acf52498bd4e312aee97fda6711ec0e498291e..42e1cb1835e54e9534ced30aa7aaf49f2897bf38 100644 (file)
@@ -284,7 +284,7 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
        drvdata->state = state;
 
        drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev,
-                                         config->init_data, drvdata);
+                                         config->init_data, drvdata, NULL);
        if (IS_ERR(drvdata->dev)) {
                ret = PTR_ERR(drvdata->dev);
                dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
index e4b3592e81767856cdadd4543a6e25bb001d695d..c1a456c4257c1b5f3b23a553ee87a1a67f939a00 100644 (file)
@@ -170,7 +170,7 @@ static int __devinit isl6271a_probe(struct i2c_client *i2c,
 
        for (i = 0; i < 3; i++) {
                pmic->rdev[i] = regulator_register(&isl_rd[i], &i2c->dev,
-                                               init_data, pmic);
+                                               init_data, pmic, NULL);
                if (IS_ERR(pmic->rdev[i])) {
                        dev_err(&i2c->dev, "failed to register %s\n", id->name);
                        err = PTR_ERR(pmic->rdev[i]);
index 72b16b5f3db6fca2f7a2ded2ce55344256ba327f..0cfabd318a59faa015b21226ebe9417f91ac7f12 100644 (file)
@@ -451,7 +451,7 @@ static int __devinit setup_regulators(struct lp3971 *lp3971,
        for (i = 0; i < pdata->num_regulators; i++) {
                struct lp3971_regulator_subdev *reg = &pdata->regulators[i];
                lp3971->rdev[i] = regulator_register(&regulators[reg->id],
-                                       lp3971->dev, reg->initdata, lp3971);
+                               lp3971->dev, reg->initdata, lp3971, NULL);
 
                if (IS_ERR(lp3971->rdev[i])) {
                        err = PTR_ERR(lp3971->rdev[i]);
index fbc5e3741befffc8dbd097e0653881ff3c149459..49a15eefe5fe25390bdaf52cd0455675565cb516 100644 (file)
@@ -555,7 +555,7 @@ static int __devinit setup_regulators(struct lp3972 *lp3972,
        for (i = 0; i < pdata->num_regulators; i++) {
                struct lp3972_regulator_subdev *reg = &pdata->regulators[i];
                lp3972->rdev[i] = regulator_register(&regulators[reg->id],
-                                       lp3972->dev, reg->initdata, lp3972);
+                               lp3972->dev, reg->initdata, lp3972, NULL);
 
                if (IS_ERR(lp3972->rdev[i])) {
                        err = PTR_ERR(lp3972->rdev[i]);
index 3f49512c513488583f1a58103f6df508614635f9..40e7a4db28534268a746dcf54db3564c8b9ab098 100644 (file)
@@ -214,7 +214,7 @@ static int __devinit max1586_pmic_probe(struct i2c_client *client,
                }
                rdev[i] = regulator_register(&max1586_reg[id], &client->dev,
                                             pdata->subdevs[i].platform_data,
-                                            max1586);
+                                            max1586, NULL);
                if (IS_ERR(rdev[i])) {
                        ret = PTR_ERR(rdev[i]);
                        dev_err(&client->dev, "failed to register %s\n",
index 1062cf9f02dc3303a7fae53a23203dd21a7707a8..b06a2399587c0e08fec38c8c827da6859130840f 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/regulator/driver.h>
 #include <linux/slab.h>
 #include <linux/regulator/max8649.h>
+#include <linux/regmap.h>
 
 #define MAX8649_DCDC_VMIN      750000          /* uV */
 #define MAX8649_DCDC_VMAX      1380000         /* uV */
@@ -49,9 +50,8 @@
 
 struct max8649_regulator_info {
        struct regulator_dev    *regulator;
-       struct i2c_client       *i2c;
        struct device           *dev;
-       struct mutex            io_lock;
+       struct regmap           *regmap;
 
        int             vol_reg;
        unsigned        mode:2; /* bit[1:0] = VID1, VID0 */
@@ -63,71 +63,6 @@ struct max8649_regulator_info {
 
 /* I2C operations */
 
-static inline int max8649_read_device(struct i2c_client *i2c,
-                                     int reg, int bytes, void *dest)
-{
-       unsigned char data;
-       int ret;
-
-       data = (unsigned char)reg;
-       ret = i2c_master_send(i2c, &data, 1);
-       if (ret < 0)
-               return ret;
-       ret = i2c_master_recv(i2c, dest, bytes);
-       if (ret < 0)
-               return ret;
-       return 0;
-}
-
-static inline int max8649_write_device(struct i2c_client *i2c,
-                                      int reg, int bytes, void *src)
-{
-       unsigned char buf[bytes + 1];
-       int ret;
-
-       buf[0] = (unsigned char)reg;
-       memcpy(&buf[1], src, bytes);
-
-       ret = i2c_master_send(i2c, buf, bytes + 1);
-       if (ret < 0)
-               return ret;
-       return 0;
-}
-
-static int max8649_reg_read(struct i2c_client *i2c, int reg)
-{
-       struct max8649_regulator_info *info = i2c_get_clientdata(i2c);
-       unsigned char data;
-       int ret;
-
-       mutex_lock(&info->io_lock);
-       ret = max8649_read_device(i2c, reg, 1, &data);
-       mutex_unlock(&info->io_lock);
-
-       if (ret < 0)
-               return ret;
-       return (int)data;
-}
-
-static int max8649_set_bits(struct i2c_client *i2c, int reg,
-                           unsigned char mask, unsigned char data)
-{
-       struct max8649_regulator_info *info = i2c_get_clientdata(i2c);
-       unsigned char value;
-       int ret;
-
-       mutex_lock(&info->io_lock);
-       ret = max8649_read_device(i2c, reg, 1, &value);
-       if (ret < 0)
-               goto out;
-       value &= ~mask;
-       value |= data;
-       ret = max8649_write_device(i2c, reg, 1, &value);
-out:
-       mutex_unlock(&info->io_lock);
-       return ret;
-}
-
 static inline int check_range(int min_uV, int max_uV)
 {
        if ((min_uV < MAX8649_DCDC_VMIN) || (max_uV > MAX8649_DCDC_VMAX)
@@ -144,13 +79,14 @@ static int max8649_list_voltage(struct regulator_dev *rdev, unsigned index)
 static int max8649_get_voltage(struct regulator_dev *rdev)
 {
        struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+       unsigned int val;
        unsigned char data;
        int ret;
 
-       ret = max8649_reg_read(info->i2c, info->vol_reg);
-       if (ret < 0)
+       ret = regmap_read(info->regmap, info->vol_reg, &val);
+       if (ret != 0)
                return ret;
-       data = (unsigned char)ret & MAX8649_VOL_MASK;
+       data = (unsigned char)val & MAX8649_VOL_MASK;
        return max8649_list_voltage(rdev, data);
 }
 
@@ -170,14 +106,14 @@ static int max8649_set_voltage(struct regulator_dev *rdev,
        mask = MAX8649_VOL_MASK;
        *selector = data & mask;
 
-       return max8649_set_bits(info->i2c, info->vol_reg, mask, data);
+       return regmap_update_bits(info->regmap, info->vol_reg, mask, data);
 }
 
 /* EN_PD means pulldown on EN input */
 static int max8649_enable(struct regulator_dev *rdev)
 {
        struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
-       return max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_EN_PD, 0);
+       return regmap_update_bits(info->regmap, MAX8649_CONTROL, MAX8649_EN_PD, 0);
 }
 
 /*
@@ -187,38 +123,40 @@ static int max8649_enable(struct regulator_dev *rdev)
 static int max8649_disable(struct regulator_dev *rdev)
 {
        struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
-       return max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_EN_PD,
+       return regmap_update_bits(info->regmap, MAX8649_CONTROL, MAX8649_EN_PD,
                                MAX8649_EN_PD);
 }
 
 static int max8649_is_enabled(struct regulator_dev *rdev)
 {
        struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+       unsigned int val;
        int ret;
 
-       ret = max8649_reg_read(info->i2c, MAX8649_CONTROL);
-       if (ret < 0)
+       ret = regmap_read(info->regmap, MAX8649_CONTROL, &val);
+       if (ret != 0)
                return ret;
-       return !((unsigned char)ret & MAX8649_EN_PD);
+       return !((unsigned char)val & MAX8649_EN_PD);
 }
 
 static int max8649_enable_time(struct regulator_dev *rdev)
 {
        struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
        int voltage, rate, ret;
+       unsigned int val;
 
        /* get voltage */
-       ret = max8649_reg_read(info->i2c, info->vol_reg);
-       if (ret < 0)
+       ret = regmap_read(info->regmap, info->vol_reg, &val);
+       if (ret != 0)
                return ret;
-       ret &= MAX8649_VOL_MASK;
+       val &= MAX8649_VOL_MASK;
        voltage = max8649_list_voltage(rdev, (unsigned char)ret); /* uV */
 
        /* get rate */
-       ret = max8649_reg_read(info->i2c, MAX8649_RAMP);
-       if (ret < 0)
+       ret = regmap_read(info->regmap, MAX8649_RAMP, &val);
+       if (ret != 0)
                return ret;
-       ret = (ret & MAX8649_RAMP_MASK) >> 5;
+       ret = (val & MAX8649_RAMP_MASK) >> 5;
        rate = (32 * 1000) >> ret;      /* uV/uS */
 
        return DIV_ROUND_UP(voltage, rate);
@@ -230,12 +168,12 @@ static int max8649_set_mode(struct regulator_dev *rdev, unsigned int mode)
 
        switch (mode) {
        case REGULATOR_MODE_FAST:
-               max8649_set_bits(info->i2c, info->vol_reg, MAX8649_FORCE_PWM,
-                                MAX8649_FORCE_PWM);
+               regmap_update_bits(info->regmap, info->vol_reg, MAX8649_FORCE_PWM,
+                                  MAX8649_FORCE_PWM);
                break;
        case REGULATOR_MODE_NORMAL:
-               max8649_set_bits(info->i2c, info->vol_reg,
-                                MAX8649_FORCE_PWM, 0);
+               regmap_update_bits(info->regmap, info->vol_reg,
+                                  MAX8649_FORCE_PWM, 0);
                break;
        default:
                return -EINVAL;
@@ -246,10 +184,13 @@ static int max8649_set_mode(struct regulator_dev *rdev, unsigned int mode)
 static unsigned int max8649_get_mode(struct regulator_dev *rdev)
 {
        struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+       unsigned int val;
        int ret;
 
-       ret = max8649_reg_read(info->i2c, info->vol_reg);
-       if (ret & MAX8649_FORCE_PWM)
+       ret = regmap_read(info->regmap, info->vol_reg, &val);
+       if (ret != 0)
+               return ret;
+       if (val & MAX8649_FORCE_PWM)
                return REGULATOR_MODE_FAST;
        return REGULATOR_MODE_NORMAL;
 }
@@ -275,11 +216,17 @@ static struct regulator_desc dcdc_desc = {
        .owner          = THIS_MODULE,
 };
 
+static struct regmap_config max8649_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+};
+
 static int __devinit max8649_regulator_probe(struct i2c_client *client,
                                             const struct i2c_device_id *id)
 {
        struct max8649_platform_data *pdata = client->dev.platform_data;
        struct max8649_regulator_info *info = NULL;
+       unsigned int val;
        unsigned char data;
        int ret;
 
@@ -289,9 +236,14 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
                return -ENOMEM;
        }
 
-       info->i2c = client;
+       info->regmap = regmap_init_i2c(client, &max8649_regmap_config);
+       if (IS_ERR(info->regmap)) {
+               ret = PTR_ERR(info->regmap);
+               dev_err(&client->dev, "Failed to allocate register map: %d\n", ret);
+               goto fail;
+       }
+
        info->dev = &client->dev;
-       mutex_init(&info->io_lock);
        i2c_set_clientdata(client, info);
 
        info->mode = pdata->mode;
@@ -312,8 +264,8 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
                break;
        }
 
-       ret = max8649_reg_read(info->i2c, MAX8649_CHIP_ID1);
-       if (ret < 0) {
+       ret = regmap_read(info->regmap, MAX8649_CHIP_ID1, &val);
+       if (ret != 0) {
                dev_err(info->dev, "Failed to detect ID of MAX8649:%d\n",
                        ret);
                goto out;
@@ -321,33 +273,33 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
        dev_info(info->dev, "Detected MAX8649 (ID:%x)\n", ret);
 
        /* enable VID0 & VID1 */
-       max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_VID_MASK, 0);
+       regmap_update_bits(info->regmap, MAX8649_CONTROL, MAX8649_VID_MASK, 0);
 
        /* enable/disable external clock synchronization */
        info->extclk = pdata->extclk;
        data = (info->extclk) ? MAX8649_SYNC_EXTCLK : 0;
-       max8649_set_bits(info->i2c, info->vol_reg, MAX8649_SYNC_EXTCLK, data);
+       regmap_update_bits(info->regmap, info->vol_reg, MAX8649_SYNC_EXTCLK, data);
        if (info->extclk) {
                /* set external clock frequency */
                info->extclk_freq = pdata->extclk_freq;
-               max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK,
-                                info->extclk_freq << 6);
+               regmap_update_bits(info->regmap, MAX8649_SYNC, MAX8649_EXT_MASK,
+                                  info->extclk_freq << 6);
        }
 
        if (pdata->ramp_timing) {
                info->ramp_timing = pdata->ramp_timing;
-               max8649_set_bits(info->i2c, MAX8649_RAMP, MAX8649_RAMP_MASK,
-                                info->ramp_timing << 5);
+               regmap_update_bits(info->regmap, MAX8649_RAMP, MAX8649_RAMP_MASK,
+                                  info->ramp_timing << 5);
        }
 
        info->ramp_down = pdata->ramp_down;
        if (info->ramp_down) {
-               max8649_set_bits(info->i2c, MAX8649_RAMP, MAX8649_RAMP_DOWN,
-                                MAX8649_RAMP_DOWN);
+               regmap_update_bits(info->regmap, MAX8649_RAMP, MAX8649_RAMP_DOWN,
+                                  MAX8649_RAMP_DOWN);
        }
 
        info->regulator = regulator_register(&dcdc_desc, &client->dev,
-                                            pdata->regulator, info);
+                                            pdata->regulator, info, NULL);
        if (IS_ERR(info->regulator)) {
                dev_err(info->dev, "failed to register regulator %s\n",
                        dcdc_desc.name);
@@ -358,6 +310,8 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
        dev_info(info->dev, "Max8649 regulator device is detected.\n");
        return 0;
 out:
+       regmap_exit(info->regmap);
+fail:
        kfree(info);
        return ret;
 }
@@ -369,6 +323,7 @@ static int __devexit max8649_regulator_remove(struct i2c_client *client)
        if (info) {
                if (info->regulator)
                        regulator_unregister(info->regulator);
+               regmap_exit(info->regmap);
                kfree(info);
        }
 
index 33f5d9a492efa809b3177ce1034bef633c32c037..a838e664569f26f7fb13c7974cc8c20abee2f567 100644 (file)
@@ -449,7 +449,7 @@ static int __devinit max8660_probe(struct i2c_client *client,
 
                rdev[i] = regulator_register(&max8660_reg[id], &client->dev,
                                             pdata->subdevs[i].platform_data,
-                                            max8660);
+                                            max8660, NULL);
                if (IS_ERR(rdev[i])) {
                        ret = PTR_ERR(rdev[i]);
                        dev_err(&client->dev, "failed to register %s\n",
index cc9ec0e0327183bb58aa81a69343905e2765db02..cc290d37c463c3059b19cd9f061b430802f8c491 100644 (file)
 #define SD1_DVM_SHIFT          5               /* SDCTL1 bit5 */
 #define SD1_DVM_EN             6               /* SDV1 bit 6 */
 
-/* bit definitions in SD & LDO control registers */
-#define OUT_ENABLE             0x1f            /* Power U/D sequence as I2C */
-#define OUT_DISABLE            0x1e            /* Power U/D sequence as I2C */
+/* bit definitions in LDO control registers */
+#define LDO_SEQ_I2C            0x7             /* Power U/D by i2c */
+#define LDO_SEQ_MASK           0x7             /* Power U/D sequence mask */
+#define LDO_SEQ_SHIFT          2               /* Power U/D sequence offset */
+#define LDO_I2C_EN             0x1             /* Enable by i2c */
+#define LDO_I2C_EN_MASK                0x1             /* Enable mask by i2c */
+#define LDO_I2C_EN_SHIFT       0               /* Enable offset by i2c */
 
 struct max8925_regulator_info {
        struct regulator_desc   desc;
@@ -40,7 +44,6 @@ struct max8925_regulator_info {
        int     vol_reg;
        int     vol_shift;
        int     vol_nbits;
-       int     enable_bit;
        int     enable_reg;
 };
 
@@ -98,8 +101,10 @@ static int max8925_enable(struct regulator_dev *rdev)
        struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
 
        return max8925_set_bits(info->i2c, info->enable_reg,
-                               OUT_ENABLE << info->enable_bit,
-                               OUT_ENABLE << info->enable_bit);
+                               LDO_SEQ_MASK << LDO_SEQ_SHIFT |
+                               LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT,
+                               LDO_SEQ_I2C << LDO_SEQ_SHIFT |
+                               LDO_I2C_EN << LDO_I2C_EN_SHIFT);
 }
 
 static int max8925_disable(struct regulator_dev *rdev)
@@ -107,20 +112,24 @@ static int max8925_disable(struct regulator_dev *rdev)
        struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
 
        return max8925_set_bits(info->i2c, info->enable_reg,
-                               OUT_ENABLE << info->enable_bit,
-                               OUT_DISABLE << info->enable_bit);
+                               LDO_SEQ_MASK << LDO_SEQ_SHIFT |
+                               LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT,
+                               LDO_SEQ_I2C << LDO_SEQ_SHIFT);
 }
 
 static int max8925_is_enabled(struct regulator_dev *rdev)
 {
        struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
-       int ret;
+       int ldo_seq, ret;
 
        ret = max8925_reg_read(info->i2c, info->enable_reg);
        if (ret < 0)
                return ret;
-
-       return ret & (1 << info->enable_bit);
+       ldo_seq = (ret >> LDO_SEQ_SHIFT) & LDO_SEQ_MASK;
+       if (ldo_seq != LDO_SEQ_I2C)
+               return 1;
+       else
+               return ret & (LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT);
 }
 
 static int max8925_set_dvm_voltage(struct regulator_dev *rdev, int uV)
@@ -188,7 +197,6 @@ static struct regulator_ops max8925_regulator_ldo_ops = {
        .vol_shift      = 0,                                    \
        .vol_nbits      = 6,                                    \
        .enable_reg     = MAX8925_SDCTL##_id,                   \
-       .enable_bit     = 0,                                    \
 }
 
 #define MAX8925_LDO(_id, min, max, step)                       \
@@ -207,7 +215,6 @@ static struct regulator_ops max8925_regulator_ldo_ops = {
        .vol_shift      = 0,                                    \
        .vol_nbits      = 6,                                    \
        .enable_reg     = MAX8925_LDOCTL##_id,                  \
-       .enable_bit     = 0,                                    \
 }
 
 static struct max8925_regulator_info max8925_regulator_info[] = {
@@ -266,7 +273,7 @@ static int __devinit max8925_regulator_probe(struct platform_device *pdev)
        ri->chip = chip;
 
        rdev = regulator_register(&ri->desc, &pdev->dev,
-                                 pdata->regulator[pdev->id], ri);
+                                 pdata->regulator[pdev->id], ri, NULL);
        if (IS_ERR(rdev)) {
                dev_err(&pdev->dev, "failed to register regulator %s\n",
                                ri->desc.name);
index 3883d85c5b8864835c41b2b5b74b919696bd02d8..75d89400c1234db391556ec14e0ced14775580ac 100644 (file)
@@ -208,7 +208,7 @@ static int __devinit max8952_pmic_probe(struct i2c_client *client,
        max8952->pdata = pdata;
 
        max8952->rdev = regulator_register(&regulator, max8952->dev,
-                       &pdata->reg_data, max8952);
+                       &pdata->reg_data, max8952, NULL);
 
        if (IS_ERR(max8952->rdev)) {
                ret = PTR_ERR(max8952->rdev);
index 6176129a27e531e20c8921537798648c723f78ec..d26e8646277b3eeb8cd7bd1a0d436a04c8d3a21c 100644 (file)
@@ -1146,7 +1146,7 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
                        regulators[id].n_voltages = 16;
 
                rdev[i] = regulator_register(&regulators[id], max8997->dev,
-                               pdata->regulators[i].initdata, max8997);
+                               pdata->regulators[i].initdata, max8997, NULL);
                if (IS_ERR(rdev[i])) {
                        ret = PTR_ERR(rdev[i]);
                        dev_err(max8997->dev, "regulator init failed for %d\n",
index 41a1495eec2bc28c5d58a9b68b8c2ef2f7b558a3..2d38c2493a070c4ee6b409266f51dc6704ca4993 100644 (file)
@@ -847,7 +847,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
                        regulators[index].n_voltages = count;
                }
                rdev[i] = regulator_register(&regulators[index], max8998->dev,
-                               pdata->regulators[i].initdata, max8998);
+                               pdata->regulators[i].initdata, max8998, NULL);
                if (IS_ERR(rdev[i])) {
                        ret = PTR_ERR(rdev[i]);
                        dev_err(max8998->dev, "regulator init failed\n");
index 8479082e1aeaaafa1d0ca5780101855c00863df7..8e9b90ad88ae35cea58ef488c41ba1383da918e2 100644 (file)
@@ -344,7 +344,7 @@ static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
 
        dev_dbg(&pdev->dev, "%s id %d\n", __func__, pdev->id);
 
-       priv = kzalloc(sizeof(*priv) +
+       priv = devm_kzalloc(&pdev->dev, sizeof(*priv) +
                        pdata->num_regulators * sizeof(priv->regulators[0]),
                        GFP_KERNEL);
        if (!priv)
@@ -357,7 +357,7 @@ static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
                init_data = &pdata->regulators[i];
                priv->regulators[i] = regulator_register(
                                &mc13783_regulators[init_data->id].desc,
-                               &pdev->dev, init_data->init_data, priv);
+                               &pdev->dev, init_data->init_data, priv, NULL);
 
                if (IS_ERR(priv->regulators[i])) {
                        dev_err(&pdev->dev, "failed to register regulator %s\n",
@@ -374,8 +374,6 @@ err:
        while (--i >= 0)
                regulator_unregister(priv->regulators[i]);
 
-       kfree(priv);
-
        return ret;
 }
 
@@ -391,7 +389,6 @@ static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
        for (i = 0; i < pdata->num_regulators; i++)
                regulator_unregister(priv->regulators[i]);
 
-       kfree(priv);
        return 0;
 }
 
index 023d17d022cf7ba329068b61a7d3b69695c44a82..e8cfc99dd8f066eb4a4ab3bc99007b70cf358bbc 100644 (file)
@@ -527,18 +527,27 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
        struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent);
        struct mc13xxx_regulator_platform_data *pdata =
                dev_get_platdata(&pdev->dev);
-       struct mc13xxx_regulator_init_data *init_data;
+       struct mc13xxx_regulator_init_data *mc13xxx_data;
        int i, ret;
+       int num_regulators = 0;
        u32 val;
 
-       priv = kzalloc(sizeof(*priv) +
-               pdata->num_regulators * sizeof(priv->regulators[0]),
+       num_regulators = mc13xxx_get_num_regulators_dt(pdev);
+       if (num_regulators <= 0 && pdata)
+               num_regulators = pdata->num_regulators;
+       if (num_regulators <= 0)
+               return -EINVAL;
+
+       priv = devm_kzalloc(&pdev->dev, sizeof(*priv) +
+               num_regulators * sizeof(priv->regulators[0]),
                GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
 
+       priv->num_regulators = num_regulators;
        priv->mc13xxx_regulators = mc13892_regulators;
        priv->mc13xxx = mc13892;
+       platform_set_drvdata(pdev, priv);
 
        mc13xxx_lock(mc13892);
        ret = mc13xxx_reg_read(mc13892, MC13892_REVISION, &val);
@@ -569,11 +578,27 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
                = mc13892_vcam_set_mode;
        mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
                = mc13892_vcam_get_mode;
-       for (i = 0; i < pdata->num_regulators; i++) {
-               init_data = &pdata->regulators[i];
+
+       mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
+                                       ARRAY_SIZE(mc13892_regulators));
+       for (i = 0; i < num_regulators; i++) {
+               struct regulator_init_data *init_data;
+               struct regulator_desc *desc;
+               struct device_node *node = NULL;
+               int id;
+
+               if (mc13xxx_data) {
+                       id = mc13xxx_data[i].id;
+                       init_data = mc13xxx_data[i].init_data;
+                       node = mc13xxx_data[i].node;
+               } else {
+                       id = pdata->regulators[i].id;
+                       init_data = pdata->regulators[i].init_data;
+               }
+               desc = &mc13892_regulators[id].desc;
+
                priv->regulators[i] = regulator_register(
-                       &mc13892_regulators[init_data->id].desc,
-                       &pdev->dev, init_data->init_data, priv);
+                       desc, &pdev->dev, init_data, priv, node);
 
                if (IS_ERR(priv->regulators[i])) {
                        dev_err(&pdev->dev, "failed to register regulator %s\n",
@@ -583,8 +608,6 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
                }
        }
 
-       platform_set_drvdata(pdev, priv);
-
        return 0;
 err:
        while (--i >= 0)
@@ -592,7 +615,6 @@ err:
 
 err_free:
        mc13xxx_unlock(mc13892);
-       kfree(priv);
 
        return ret;
 }
@@ -600,16 +622,13 @@ err_free:
 static int __devexit mc13892_regulator_remove(struct platform_device *pdev)
 {
        struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
-       struct mc13xxx_regulator_platform_data *pdata =
-               dev_get_platdata(&pdev->dev);
        int i;
 
        platform_set_drvdata(pdev, NULL);
 
-       for (i = 0; i < pdata->num_regulators; i++)
+       for (i = 0; i < priv->num_regulators; i++)
                regulator_unregister(priv->regulators[i]);
 
-       kfree(priv);
        return 0;
 }
 
index 6532853a6ef5e2e92beb3174c13f1b430e071dc5..80ecafef1bc38b217abe30f1aa0537ca64d9b3d4 100644 (file)
 #include <linux/mfd/mc13xxx.h>
 #include <linux/regulator/machine.h>
 #include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
 #include <linux/platform_device.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/err.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include "mc13xxx.h"
 
 static int mc13xxx_regulator_enable(struct regulator_dev *rdev)
@@ -236,6 +238,61 @@ int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev)
 }
 EXPORT_SYMBOL_GPL(mc13xxx_sw_regulator_is_enabled);
 
+#ifdef CONFIG_OF
+int __devinit mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
+{
+       struct device_node *parent, *child;
+       int num = 0;
+
+       of_node_get(pdev->dev.parent->of_node);
+       parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+       if (!parent)
+               return -ENODEV;
+
+       for_each_child_of_node(parent, child)
+               num++;
+
+       return num;
+}
+
+struct mc13xxx_regulator_init_data * __devinit mc13xxx_parse_regulators_dt(
+       struct platform_device *pdev, struct mc13xxx_regulator *regulators,
+       int num_regulators)
+{
+       struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
+       struct mc13xxx_regulator_init_data *data, *p;
+       struct device_node *parent, *child;
+       int i;
+
+       of_node_get(pdev->dev.parent->of_node);
+       parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+       if (!parent)
+               return NULL;
+
+       data = devm_kzalloc(&pdev->dev, sizeof(*data) * priv->num_regulators,
+                           GFP_KERNEL);
+       if (!data)
+               return NULL;
+       p = data;
+
+       for_each_child_of_node(parent, child) {
+               for (i = 0; i < num_regulators; i++) {
+                       if (!of_node_cmp(child->name,
+                                        regulators[i].desc.name)) {
+                               p->id = i;
+                               p->init_data = of_get_regulator_init_data(
+                                                       &pdev->dev, child);
+                               p->node = child;
+                               p++;
+                               break;
+                       }
+               }
+       }
+
+       return data;
+}
+#endif
+
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Yong Shen <yong.shen@linaro.org>");
 MODULE_DESCRIPTION("Regulator Driver for Freescale MC13xxx PMIC");
index 27758267e12298858c6af4a4d66bb0a76be9df33..b3961c658b0594a8c728f780f1c49fd7830e4959 100644 (file)
@@ -29,6 +29,7 @@ struct mc13xxx_regulator_priv {
        struct mc13xxx *mc13xxx;
        u32 powermisc_pwgt_state;
        struct mc13xxx_regulator *mc13xxx_regulators;
+       int num_regulators;
        struct regulator_dev *regulators[];
 };
 
@@ -42,13 +43,32 @@ extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev,
                int min_uV, int max_uV, unsigned *selector);
 extern int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev);
 
+#ifdef CONFIG_OF
+extern int mc13xxx_get_num_regulators_dt(struct platform_device *pdev);
+extern struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
+       struct platform_device *pdev, struct mc13xxx_regulator *regulators,
+       int num_regulators);
+#else
+static inline int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
+{
+       return -ENODEV;
+}
+
+static inline struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
+       struct platform_device *pdev, struct mc13xxx_regulator *regulators,
+       int num_regulators)
+{
+       return NULL;
+}
+#endif
+
 extern struct regulator_ops mc13xxx_regulator_ops;
 extern struct regulator_ops mc13xxx_fixed_regulator_ops;
 
 #define MC13xxx_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages, _ops)        \
        [prefix ## _name] = {                           \
                .desc = {                                               \
-                       .name = #prefix "_" #_name,                     \
+                       .name = #_name,                                 \
                        .n_voltages = ARRAY_SIZE(_voltages),            \
                        .ops = &_ops,                   \
                        .type = REGULATOR_VOLTAGE,                      \
@@ -66,7 +86,7 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
 #define MC13xxx_FIXED_DEFINE(prefix, _name, _reg, _voltages, _ops)     \
        [prefix ## _name] = {                           \
                .desc = {                                               \
-                       .name = #prefix "_" #_name,                     \
+                       .name = #_name,                                 \
                        .n_voltages = ARRAY_SIZE(_voltages),            \
                        .ops = &_ops,           \
                        .type = REGULATOR_VOLTAGE,                      \
@@ -81,7 +101,7 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
 #define MC13xxx_GPO_DEFINE(prefix, _name, _reg,  _voltages, _ops)      \
        [prefix ## _name] = {                           \
                .desc = {                                               \
-                       .name = #prefix "_" #_name,                     \
+                       .name = #_name,                                 \
                        .n_voltages = ARRAY_SIZE(_voltages),            \
                        .ops = &_ops,           \
                        .type = REGULATOR_VOLTAGE,                      \
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
new file mode 100644 (file)
index 0000000..f1651eb
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * OF helpers for regulator framework
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Rajendra Nayak <rnayak@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/regulator/machine.h>
+
+static void of_get_regulation_constraints(struct device_node *np,
+                                       struct regulator_init_data **init_data)
+{
+       const __be32 *min_uV, *max_uV, *uV_offset;
+       const __be32 *min_uA, *max_uA;
+       struct regulation_constraints *constraints = &(*init_data)->constraints;
+
+       constraints->name = of_get_property(np, "regulator-name", NULL);
+
+       min_uV = of_get_property(np, "regulator-min-microvolt", NULL);
+       if (min_uV)
+               constraints->min_uV = be32_to_cpu(*min_uV);
+       max_uV = of_get_property(np, "regulator-max-microvolt", NULL);
+       if (max_uV)
+               constraints->max_uV = be32_to_cpu(*max_uV);
+
+       /* Voltage change possible? */
+       if (constraints->min_uV != constraints->max_uV)
+               constraints->valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE;
+       /* Only one voltage?  Then make sure it's set. */
+       if (constraints->min_uV == constraints->max_uV)
+               constraints->apply_uV = true;
+
+       uV_offset = of_get_property(np, "regulator-microvolt-offset", NULL);
+       if (uV_offset)
+               constraints->uV_offset = be32_to_cpu(*uV_offset);
+       min_uA = of_get_property(np, "regulator-min-microamp", NULL);
+       if (min_uA)
+               constraints->min_uA = be32_to_cpu(*min_uA);
+       max_uA = of_get_property(np, "regulator-max-microamp", NULL);
+       if (max_uA)
+               constraints->max_uA = be32_to_cpu(*max_uA);
+
+       /* Current change possible? */
+       if (constraints->min_uA != constraints->max_uA)
+               constraints->valid_ops_mask |= REGULATOR_CHANGE_CURRENT;
+
+       if (of_find_property(np, "regulator-boot-on", NULL))
+               constraints->boot_on = true;
+
+       if (of_find_property(np, "regulator-always-on", NULL))
+               constraints->always_on = true;
+       else /* status change should be possible if not always on. */
+               constraints->valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+}
+
+/**
+ * of_get_regulator_init_data - extract regulator_init_data structure info
+ * @dev: device requesting for regulator_init_data
+ *
+ * Populates regulator_init_data structure by extracting data from device
+ * tree node, returns a pointer to the populated struture or NULL if memory
+ * alloc fails.
+ */
+struct regulator_init_data *of_get_regulator_init_data(struct device *dev,
+                                               struct device_node *node)
+{
+       struct regulator_init_data *init_data;
+
+       if (!node)
+               return NULL;
+
+       init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
+       if (!init_data)
+               return NULL; /* Out of memory? */
+
+       of_get_regulation_constraints(node, &init_data);
+       return init_data;
+}
+EXPORT_SYMBOL_GPL(of_get_regulator_init_data);
index 31f6e11a7f16f6c1621e99bcc2b3b8d4704cd224..a5aab1b08bcf6f246255b42600a3a355fc7de5a5 100644 (file)
@@ -277,7 +277,7 @@ static int __devinit pcap_regulator_probe(struct platform_device *pdev)
        void *pcap = dev_get_drvdata(pdev->dev.parent);
 
        rdev = regulator_register(&pcap_regulators[pdev->id], &pdev->dev,
-                               pdev->dev.platform_data, pcap);
+                               pdev->dev.platform_data, pcap, NULL);
        if (IS_ERR(rdev))
                return PTR_ERR(rdev);
 
index 69a11d9dd87f8388ccb88e4787cefe518888b78e..1d1c310562975a7204db2c30d7de230c6bec1514 100644 (file)
@@ -320,7 +320,7 @@ static int __devinit pcf50633_regulator_probe(struct platform_device *pdev)
        pcf = dev_to_pcf50633(pdev->dev.parent);
 
        rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
-                                 pdev->dev.platform_data, pcf);
+                                 pdev->dev.platform_data, pcf, NULL);
        if (IS_ERR(rdev))
                return PTR_ERR(rdev);
 
index 1011873896dc9205b4844aed9531de8cd9051531..d9278da18a9e9cede299722093977e2c572ccd3d 100644 (file)
@@ -151,7 +151,8 @@ static int __devinit tps6105x_regulator_probe(struct platform_device *pdev)
        /* Register regulator with framework */
        tps6105x->regulator = regulator_register(&tps6105x_regulator_desc,
                                             &tps6105x->client->dev,
-                                            pdata->regulator_data, tps6105x);
+                                            pdata->regulator_data, tps6105x,
+                                            NULL);
        if (IS_ERR(tps6105x->regulator)) {
                ret = PTR_ERR(tps6105x->regulator);
                dev_err(&tps6105x->client->dev,
index 9fb4c7b81753d99ff8effaa781f2d8b5930261d8..18d61a0529a9476d6aa9a2368f7b6df53b258387 100644 (file)
@@ -152,48 +152,21 @@ struct tps_driver_data {
        u8 core_regulator;
 };
 
-static int tps_65023_set_bits(struct tps_pmic *tps, u8 reg, u8 mask)
-{
-       return regmap_update_bits(tps->regmap, reg, mask, mask);
-}
-
-static int tps_65023_clear_bits(struct tps_pmic *tps, u8 reg, u8 mask)
-{
-       return regmap_update_bits(tps->regmap, reg, mask, 0);
-}
-
-static int tps_65023_reg_read(struct tps_pmic *tps, u8 reg)
-{
-       unsigned int val;
-       int ret;
-
-       ret = regmap_read(tps->regmap, reg, &val);
-
-       if (ret != 0)
-               return ret;
-       else
-               return val;
-}
-
-static int tps_65023_reg_write(struct tps_pmic *tps, u8 reg, u8 val)
-{
-       return regmap_write(tps->regmap, reg, val);
-}
-
 static int tps65023_dcdc_is_enabled(struct regulator_dev *dev)
 {
        struct tps_pmic *tps = rdev_get_drvdata(dev);
        int data, dcdc = rdev_get_id(dev);
+       int ret;
        u8 shift;
 
        if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
                return -EINVAL;
 
        shift = TPS65023_NUM_REGULATOR - dcdc;
-       data = tps_65023_reg_read(tps, TPS65023_REG_REG_CTRL);
+       ret = regmap_read(tps->regmap, TPS65023_REG_REG_CTRL, &data);
 
-       if (data < 0)
-               return data;
+       if (ret != 0)
+               return ret;
        else
                return (data & 1<<shift) ? 1 : 0;
 }
@@ -202,16 +175,17 @@ static int tps65023_ldo_is_enabled(struct regulator_dev *dev)
 {
        struct tps_pmic *tps = rdev_get_drvdata(dev);
        int data, ldo = rdev_get_id(dev);
+       int ret;
        u8 shift;
 
        if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
                return -EINVAL;
 
        shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
-       data = tps_65023_reg_read(tps, TPS65023_REG_REG_CTRL);
+       ret = regmap_read(tps->regmap, TPS65023_REG_REG_CTRL, &data);
 
-       if (data < 0)
-               return data;
+       if (ret != 0)
+               return ret;
        else
                return (data & 1<<shift) ? 1 : 0;
 }
@@ -226,7 +200,7 @@ static int tps65023_dcdc_enable(struct regulator_dev *dev)
                return -EINVAL;
 
        shift = TPS65023_NUM_REGULATOR - dcdc;
-       return tps_65023_set_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+       return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 1 << shift);
 }
 
 static int tps65023_dcdc_disable(struct regulator_dev *dev)
@@ -239,7 +213,7 @@ static int tps65023_dcdc_disable(struct regulator_dev *dev)
                return -EINVAL;
 
        shift = TPS65023_NUM_REGULATOR - dcdc;
-       return tps_65023_clear_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+       return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 0);
 }
 
 static int tps65023_ldo_enable(struct regulator_dev *dev)
@@ -252,7 +226,7 @@ static int tps65023_ldo_enable(struct regulator_dev *dev)
                return -EINVAL;
 
        shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
-       return tps_65023_set_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+       return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 1 << shift);
 }
 
 static int tps65023_ldo_disable(struct regulator_dev *dev)
@@ -265,21 +239,22 @@ static int tps65023_ldo_disable(struct regulator_dev *dev)
                return -EINVAL;
 
        shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
-       return tps_65023_clear_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+       return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 0);
 }
 
 static int tps65023_dcdc_get_voltage(struct regulator_dev *dev)
 {
        struct tps_pmic *tps = rdev_get_drvdata(dev);
+       int ret;
        int data, dcdc = rdev_get_id(dev);
 
        if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
                return -EINVAL;
 
        if (dcdc == tps->core_regulator) {
-               data = tps_65023_reg_read(tps, TPS65023_REG_DEF_CORE);
-               if (data < 0)
-                       return data;
+               ret = regmap_read(tps->regmap, TPS65023_REG_DEF_CORE, &data);
+               if (ret != 0)
+                       return ret;
                data &= (tps->info[dcdc]->table_len - 1);
                return tps->info[dcdc]->table[data] * 1000;
        } else
@@ -318,13 +293,13 @@ static int tps65023_dcdc_set_voltage(struct regulator_dev *dev,
        if (vsel == tps->info[dcdc]->table_len)
                goto failed;
 
-       ret = tps_65023_reg_write(tps, TPS65023_REG_DEF_CORE, vsel);
+       ret = regmap_write(tps->regmap, TPS65023_REG_DEF_CORE, vsel);
 
        /* Tell the chip that we have changed the value in DEFCORE
         * and its time to update the core voltage
         */
-       tps_65023_set_bits(tps, TPS65023_REG_CON_CTRL2,
-                                               TPS65023_REG_CTRL2_GO);
+       regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
+                       TPS65023_REG_CTRL2_GO, TPS65023_REG_CTRL2_GO);
 
        return ret;
 
@@ -336,13 +311,14 @@ static int tps65023_ldo_get_voltage(struct regulator_dev *dev)
 {
        struct tps_pmic *tps = rdev_get_drvdata(dev);
        int data, ldo = rdev_get_id(dev);
+       int ret;
 
        if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
                return -EINVAL;
 
-       data = tps_65023_reg_read(tps, TPS65023_REG_LDO_CTRL);
-       if (data < 0)
-               return data;
+       ret = regmap_read(tps->regmap, TPS65023_REG_LDO_CTRL, &data);
+       if (ret != 0)
+               return ret;
 
        data >>= (TPS65023_LDO_CTRL_LDOx_SHIFT(ldo - TPS65023_LDO_1));
        data &= (tps->info[ldo]->table_len - 1);
@@ -354,6 +330,7 @@ static int tps65023_ldo_set_voltage(struct regulator_dev *dev,
 {
        struct tps_pmic *tps = rdev_get_drvdata(dev);
        int data, vsel, ldo = rdev_get_id(dev);
+       int ret;
 
        if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
                return -EINVAL;
@@ -377,13 +354,13 @@ static int tps65023_ldo_set_voltage(struct regulator_dev *dev,
 
        *selector = vsel;
 
-       data = tps_65023_reg_read(tps, TPS65023_REG_LDO_CTRL);
-       if (data < 0)
-               return data;
+       ret = regmap_read(tps->regmap, TPS65023_REG_LDO_CTRL, &data);
+       if (ret != 0)
+               return ret;
 
        data &= TPS65023_LDO_CTRL_LDOx_MASK(ldo - TPS65023_LDO_1);
        data |= (vsel << (TPS65023_LDO_CTRL_LDOx_SHIFT(ldo - TPS65023_LDO_1)));
-       return tps_65023_reg_write(tps, TPS65023_REG_LDO_CTRL, data);
+       return regmap_write(tps->regmap, TPS65023_REG_LDO_CTRL, data);
 }
 
 static int tps65023_dcdc_list_voltage(struct regulator_dev *dev,
@@ -496,7 +473,7 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
 
                /* Register the regulators */
                rdev = regulator_register(&tps->desc[i], &client->dev,
-                                         init_data, tps);
+                                         init_data, tps, NULL);
                if (IS_ERR(rdev)) {
                        dev_err(&client->dev, "failed to register %s\n",
                                id->name);
@@ -511,12 +488,12 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
        i2c_set_clientdata(client, tps);
 
        /* Enable setting output voltage by I2C */
-       tps_65023_clear_bits(tps, TPS65023_REG_CON_CTRL2,
-                                               TPS65023_REG_CTRL2_CORE_ADJ);
+       regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
+                       TPS65023_REG_CTRL2_CORE_ADJ, TPS65023_REG_CTRL2_CORE_ADJ);
 
        /* Enable setting output voltage by I2C */
-       tps_65023_clear_bits(tps, TPS65023_REG_CON_CTRL2,
-                                               TPS65023_REG_CTRL2_CORE_ADJ);
+       regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
+                       TPS65023_REG_CTRL2_CORE_ADJ, TPS65023_REG_CTRL2_CORE_ADJ);
 
        return 0;
 
index bdef70365f52b5c1c01ae30cd66d194d31c820ac..0b63ef71a5fe9810a7b2c3aec40909cd8785035d 100644 (file)
@@ -599,7 +599,7 @@ int tps6507x_pmic_probe(struct platform_device *pdev)
                tps->desc[i].owner = THIS_MODULE;
 
                rdev = regulator_register(&tps->desc[i],
-                                       tps6507x_dev->dev, init_data, tps);
+                                       tps6507x_dev->dev, init_data, tps, NULL);
                if (IS_ERR(rdev)) {
                        dev_err(tps6507x_dev->dev,
                                "failed to register %s regulator\n",
index 9166aa0a9df71c582425d89a7d6517bd22f01072..70b7b1f4f000e1f779ecdb2c2dbfaf3de4b11274 100644 (file)
@@ -651,7 +651,7 @@ static int __devinit pmic_probe(struct spi_device *spi)
                        hw->desc[i].n_voltages = 1;
 
                hw->rdev[i] = regulator_register(&hw->desc[i], dev,
-                                                init_data, hw);
+                                                init_data, hw, NULL);
                if (IS_ERR(hw->rdev[i])) {
                        ret = PTR_ERR(hw->rdev[i]);
                        hw->rdev[i] = NULL;
index 14b9389dd52abf719552d7e084b5776d791d45c6..c75fb20faa579486b4cfe31cbfd97b2ea43b9320 100644 (file)
@@ -396,7 +396,7 @@ static int __devinit tps6586x_regulator_probe(struct platform_device *pdev)
                return err;
 
        rdev = regulator_register(&ri->desc, &pdev->dev,
-                                 pdev->dev.platform_data, ri);
+                                 pdev->dev.platform_data, ri, NULL);
        if (IS_ERR(rdev)) {
                dev_err(&pdev->dev, "failed to register regulator %s\n",
                                ri->desc.name);
index b552aae55b417c7cd9aa97fba33bd61ad0b7e787..5c15ba01e9c729bb6139b9d14fb52cb38f5337e8 100644 (file)
 #include <linux/gpio.h>
 #include <linux/mfd/tps65910.h>
 
-#define TPS65910_REG_VRTC              0
-#define TPS65910_REG_VIO               1
-#define TPS65910_REG_VDD1              2
-#define TPS65910_REG_VDD2              3
-#define TPS65910_REG_VDD3              4
-#define TPS65910_REG_VDIG1             5
-#define TPS65910_REG_VDIG2             6
-#define TPS65910_REG_VPLL              7
-#define TPS65910_REG_VDAC              8
-#define TPS65910_REG_VAUX1             9
-#define TPS65910_REG_VAUX2             10
-#define TPS65910_REG_VAUX33            11
-#define TPS65910_REG_VMMC              12
-
-#define TPS65911_REG_VDDCTRL           4
-#define TPS65911_REG_LDO1              5
-#define TPS65911_REG_LDO2              6
-#define TPS65911_REG_LDO3              7
-#define TPS65911_REG_LDO4              8
-#define TPS65911_REG_LDO5              9
-#define TPS65911_REG_LDO6              10
-#define TPS65911_REG_LDO7              11
-#define TPS65911_REG_LDO8              12
-
 #define TPS65910_SUPPLY_STATE_ENABLED  0x1
 
 /* supported VIO voltages in milivolts */
@@ -885,8 +861,6 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
        if (!pmic_plat_data)
                return -EINVAL;
 
-       reg_data = pmic_plat_data->tps65910_pmic_init_data;
-
        pmic = kzalloc(sizeof(*pmic), GFP_KERNEL);
        if (!pmic)
                return -ENOMEM;
@@ -937,7 +911,16 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
                goto err_free_info;
        }
 
-       for (i = 0; i < pmic->num_regulators; i++, info++, reg_data++) {
+       for (i = 0; i < pmic->num_regulators && i < TPS65910_NUM_REGS;
+                       i++, info++) {
+
+               reg_data = pmic_plat_data->tps65910_pmic_init_data[i];
+
+               /* Regulator API handles empty constraints but not NULL
+                * constraints */
+               if (!reg_data)
+                       continue;
+
                /* Register the regulators */
                pmic->info[i] = info;
 
@@ -965,7 +948,7 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
                pmic->desc[i].owner = THIS_MODULE;
 
                rdev = regulator_register(&pmic->desc[i],
-                               tps65910->dev, reg_data, pmic);
+                               tps65910->dev, reg_data, pmic, NULL);
                if (IS_ERR(rdev)) {
                        dev_err(tps65910->dev,
                                "failed to register %s regulator\n",
index 39d4a1749e71a78b3ad73907e407bb8d02435df3..da00d88f94b7921b89384134d96a0d176cbfe999 100644 (file)
@@ -727,7 +727,7 @@ static __devinit int tps65912_probe(struct platform_device *pdev)
                pmic->desc[i].owner = THIS_MODULE;
                range = tps65912_get_range(pmic, i);
                rdev = regulator_register(&pmic->desc[i],
-                                       tps65912->dev, reg_data, pmic);
+                                       tps65912->dev, reg_data, pmic, NULL);
                if (IS_ERR(rdev)) {
                        dev_err(tps65912->dev,
                                "failed to register %s regulator\n",
index 11cc308d66e925db83fa8c50697e734608aac1b8..181a2cfe180cf42a4364a12ba194e099cd00dc9e 100644 (file)
@@ -1112,7 +1112,7 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
                break;
        }
 
-       rdev = regulator_register(&info->desc, &pdev->dev, initdata, info);
+       rdev = regulator_register(&info->desc, &pdev->dev, initdata, info, NULL);
        if (IS_ERR(rdev)) {
                dev_err(&pdev->dev, "can't register %s, %ld\n",
                                info->desc.name, PTR_ERR(rdev));
index fc66551469996a6fa05775fa1d1d1decbae38f8a..518667ef9a0d7e132b57fa92cb37397b6c799d9c 100644 (file)
@@ -185,18 +185,7 @@ static struct platform_driver regulator_userspace_consumer_driver = {
        },
 };
 
-
-static int __init regulator_userspace_consumer_init(void)
-{
-       return platform_driver_register(&regulator_userspace_consumer_driver);
-}
-module_init(regulator_userspace_consumer_init);
-
-static void __exit regulator_userspace_consumer_exit(void)
-{
-       platform_driver_unregister(&regulator_userspace_consumer_driver);
-}
-module_exit(regulator_userspace_consumer_exit);
+module_platform_driver(regulator_userspace_consumer_driver);
 
 MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
 MODULE_DESCRIPTION("Userspace consumer for voltage and current regulators");
index 858c1f861ba596d134c253d74ba7d06d330ea0ff..ee0b161c998fd64ad276e228615ab47cefb178e3 100644 (file)
@@ -352,17 +352,7 @@ static struct platform_driver regulator_virtual_consumer_driver = {
        },
 };
 
-static int __init regulator_virtual_consumer_init(void)
-{
-       return platform_driver_register(&regulator_virtual_consumer_driver);
-}
-module_init(regulator_virtual_consumer_init);
-
-static void __exit regulator_virtual_consumer_exit(void)
-{
-       platform_driver_unregister(&regulator_virtual_consumer_driver);
-}
-module_exit(regulator_virtual_consumer_exit);
+module_platform_driver(regulator_virtual_consumer_driver);
 
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
 MODULE_DESCRIPTION("Virtual regulator consumer");
index bd3531d8b2ac9364fb48c182c4947a00696d7c34..4904a40b0d46f6b3b870155a535312623e72a831 100644 (file)
@@ -511,7 +511,8 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
        if (pdata == NULL || pdata->dcdc[id] == NULL)
                return -ENODEV;
 
-       dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL);
+       dcdc = devm_kzalloc(&pdev->dev,  sizeof(struct wm831x_dcdc),
+                           GFP_KERNEL);
        if (dcdc == NULL) {
                dev_err(&pdev->dev, "Unable to allocate private data\n");
                return -ENOMEM;
@@ -553,7 +554,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
                wm831x_buckv_dvs_init(dcdc, pdata->dcdc[id]->driver_data);
 
        dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
-                                            pdata->dcdc[id], dcdc);
+                                            pdata->dcdc[id], dcdc, NULL);
        if (IS_ERR(dcdc->regulator)) {
                ret = PTR_ERR(dcdc->regulator);
                dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -590,7 +591,6 @@ err_regulator:
 err:
        if (dcdc->dvs_gpio)
                gpio_free(dcdc->dvs_gpio);
-       kfree(dcdc);
        return ret;
 }
 
@@ -605,7 +605,6 @@ static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
        regulator_unregister(dcdc->regulator);
        if (dcdc->dvs_gpio)
                gpio_free(dcdc->dvs_gpio);
-       kfree(dcdc);
 
        return 0;
 }
@@ -722,7 +721,8 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
        if (pdata == NULL || pdata->dcdc[id] == NULL)
                return -ENODEV;
 
-       dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL);
+       dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc),
+                           GFP_KERNEL);
        if (dcdc == NULL) {
                dev_err(&pdev->dev, "Unable to allocate private data\n");
                return -ENOMEM;
@@ -747,7 +747,7 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
        dcdc->desc.owner = THIS_MODULE;
 
        dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
-                                            pdata->dcdc[id], dcdc);
+                                            pdata->dcdc[id], dcdc, NULL);
        if (IS_ERR(dcdc->regulator)) {
                ret = PTR_ERR(dcdc->regulator);
                dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -771,7 +771,6 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
 err_regulator:
        regulator_unregister(dcdc->regulator);
 err:
-       kfree(dcdc);
        return ret;
 }
 
@@ -783,7 +782,6 @@ static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
 
        free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
        regulator_unregister(dcdc->regulator);
-       kfree(dcdc);
 
        return 0;
 }
@@ -874,7 +872,7 @@ static __devinit int wm831x_boostp_probe(struct platform_device *pdev)
        dcdc->desc.owner = THIS_MODULE;
 
        dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
-                                            pdata->dcdc[id], dcdc);
+                                            pdata->dcdc[id], dcdc, NULL);
        if (IS_ERR(dcdc->regulator)) {
                ret = PTR_ERR(dcdc->regulator);
                dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -973,7 +971,7 @@ static __devinit int wm831x_epe_probe(struct platform_device *pdev)
        dcdc->desc.owner = THIS_MODULE;
 
        dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
-                                            pdata->epe[id], dcdc);
+                                            pdata->epe[id], dcdc, NULL);
        if (IS_ERR(dcdc->regulator)) {
                ret = PTR_ERR(dcdc->regulator);
                dev_err(wm831x->dev, "Failed to register EPE%d: %d\n",
index 01f27c7f4236bca00615e2f4521126f3ce87b702..634aac3f2d5f86c511bc4390777437997b369f87 100644 (file)
@@ -162,7 +162,8 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
        if (pdata == NULL || pdata->isink[id] == NULL)
                return -ENODEV;
 
-       isink = kzalloc(sizeof(struct wm831x_isink), GFP_KERNEL);
+       isink = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_isink),
+                            GFP_KERNEL);
        if (isink == NULL) {
                dev_err(&pdev->dev, "Unable to allocate private data\n");
                return -ENOMEM;
@@ -189,7 +190,7 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
        isink->desc.owner = THIS_MODULE;
 
        isink->regulator = regulator_register(&isink->desc, &pdev->dev,
-                                            pdata->isink[id], isink);
+                                            pdata->isink[id], isink, NULL);
        if (IS_ERR(isink->regulator)) {
                ret = PTR_ERR(isink->regulator);
                dev_err(wm831x->dev, "Failed to register ISINK%d: %d\n",
@@ -213,7 +214,6 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
 err_regulator:
        regulator_unregister(isink->regulator);
 err:
-       kfree(isink);
        return ret;
 }
 
@@ -226,7 +226,6 @@ static __devexit int wm831x_isink_remove(struct platform_device *pdev)
        free_irq(platform_get_irq(pdev, 0), isink);
 
        regulator_unregister(isink->regulator);
-       kfree(isink);
 
        return 0;
 }
index 6709710a059e6e78c2267c376903b04ea26820fa..f1e4ab0f9fda8e02d6857c705f8fc1032fc7f23b 100644 (file)
@@ -326,7 +326,7 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
        if (pdata == NULL || pdata->ldo[id] == NULL)
                return -ENODEV;
 
-       ldo = kzalloc(sizeof(struct wm831x_ldo), GFP_KERNEL);
+       ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
        if (ldo == NULL) {
                dev_err(&pdev->dev, "Unable to allocate private data\n");
                return -ENOMEM;
@@ -351,7 +351,7 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
        ldo->desc.owner = THIS_MODULE;
 
        ldo->regulator = regulator_register(&ldo->desc, &pdev->dev,
-                                            pdata->ldo[id], ldo);
+                                            pdata->ldo[id], ldo, NULL);
        if (IS_ERR(ldo->regulator)) {
                ret = PTR_ERR(ldo->regulator);
                dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -376,7 +376,6 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
 err_regulator:
        regulator_unregister(ldo->regulator);
 err:
-       kfree(ldo);
        return ret;
 }
 
@@ -388,7 +387,6 @@ static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev)
 
        free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
        regulator_unregister(ldo->regulator);
-       kfree(ldo);
 
        return 0;
 }
@@ -596,7 +594,7 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
        if (pdata == NULL || pdata->ldo[id] == NULL)
                return -ENODEV;
 
-       ldo = kzalloc(sizeof(struct wm831x_ldo), GFP_KERNEL);
+       ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
        if (ldo == NULL) {
                dev_err(&pdev->dev, "Unable to allocate private data\n");
                return -ENOMEM;
@@ -621,7 +619,7 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
        ldo->desc.owner = THIS_MODULE;
 
        ldo->regulator = regulator_register(&ldo->desc, &pdev->dev,
-                                            pdata->ldo[id], ldo);
+                                            pdata->ldo[id], ldo, NULL);
        if (IS_ERR(ldo->regulator)) {
                ret = PTR_ERR(ldo->regulator);
                dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -645,7 +643,6 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
 err_regulator:
        regulator_unregister(ldo->regulator);
 err:
-       kfree(ldo);
        return ret;
 }
 
@@ -655,7 +652,6 @@ static __devexit int wm831x_aldo_remove(struct platform_device *pdev)
 
        free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
        regulator_unregister(ldo->regulator);
-       kfree(ldo);
 
        return 0;
 }
@@ -793,7 +789,7 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
        if (pdata == NULL || pdata->ldo[id] == NULL)
                return -ENODEV;
 
-       ldo = kzalloc(sizeof(struct wm831x_ldo), GFP_KERNEL);
+       ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
        if (ldo == NULL) {
                dev_err(&pdev->dev, "Unable to allocate private data\n");
                return -ENOMEM;
@@ -818,7 +814,7 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
        ldo->desc.owner = THIS_MODULE;
 
        ldo->regulator = regulator_register(&ldo->desc, &pdev->dev,
-                                            pdata->ldo[id], ldo);
+                                            pdata->ldo[id], ldo, NULL);
        if (IS_ERR(ldo->regulator)) {
                ret = PTR_ERR(ldo->regulator);
                dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -831,7 +827,6 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
        return 0;
 
 err:
-       kfree(ldo);
        return ret;
 }
 
@@ -840,7 +835,6 @@ static __devexit int wm831x_alive_ldo_remove(struct platform_device *pdev)
        struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
 
        regulator_unregister(ldo->regulator);
-       kfree(ldo);
 
        return 0;
 }
index 1bcb22c440953b6dcab42609361c150e0857fb23..6894009d815aa8fea3e4b9b220d4e48a9fb74ca0 100644 (file)
@@ -1428,7 +1428,7 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
        /* register regulator */
        rdev = regulator_register(&wm8350_reg[pdev->id], &pdev->dev,
                                  pdev->dev.platform_data,
-                                 dev_get_drvdata(&pdev->dev));
+                                 dev_get_drvdata(&pdev->dev), NULL);
        if (IS_ERR(rdev)) {
                dev_err(&pdev->dev, "failed to register %s\n",
                        wm8350_reg[pdev->id].name);
index 71632ddc37813a2cd3ba4b9a107781456335ae81..706f39563a7b76f149173503e17baae77905d198 100644 (file)
@@ -326,7 +326,7 @@ static int __devinit wm8400_regulator_probe(struct platform_device *pdev)
        struct regulator_dev *rdev;
 
        rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
-                                 pdev->dev.platform_data, wm8400);
+                                 pdev->dev.platform_data, wm8400, NULL);
 
        if (IS_ERR(rdev))
                return PTR_ERR(rdev);
index b87bf5c841f8216a8867db35b23bab5acd0acce2..435e335d6e67ae372054bcb697e773684ab40d3d 100644 (file)
@@ -269,7 +269,7 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
                ldo->is_enabled = true;
 
        ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &pdev->dev,
-                                            pdata->ldo[id].init_data, ldo);
+                                            pdata->ldo[id].init_data, ldo, NULL);
        if (IS_ERR(ldo->regulator)) {
                ret = PTR_ERR(ldo->regulator);
                dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
index 8e286259a007fbc5921b4c569cdb959f62762f7a..8a1c031391d66f00a2c276325841bea20546e3c4 100644 (file)
@@ -228,11 +228,11 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
                alarm->time.tm_hour = now.tm_hour;
 
        /* For simplicity, only support date rollover for now */
-       if (alarm->time.tm_mday == -1) {
+       if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) {
                alarm->time.tm_mday = now.tm_mday;
                missing = day;
        }
-       if (alarm->time.tm_mon == -1) {
+       if ((unsigned)alarm->time.tm_mon >= 12) {
                alarm->time.tm_mon = now.tm_mon;
                if (missing == none)
                        missing = month;
index 64b847b7f9705e55e7471ce301bff5a4a3a7bd62..f04761e6622dddcc34a3e860b7559ea410a9d113 100644 (file)
@@ -410,17 +410,7 @@ static struct platform_driver pm860x_rtc_driver = {
        .remove         = __devexit_p(pm860x_rtc_remove),
 };
 
-static int __init pm860x_rtc_init(void)
-{
-       return platform_driver_register(&pm860x_rtc_driver);
-}
-module_init(pm860x_rtc_init);
-
-static void __exit pm860x_rtc_exit(void)
-{
-       platform_driver_unregister(&pm860x_rtc_driver);
-}
-module_exit(pm860x_rtc_exit);
+module_platform_driver(pm860x_rtc_driver);
 
 MODULE_DESCRIPTION("Marvell 88PM860x RTC driver");
 MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
index e346705aae92f1ebae4570f7f13adc77adc89fef..a0a9810adf0b91646aeed789020575c4d00f214e 100644 (file)
@@ -90,7 +90,7 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
 
        /* Early AB8500 chips will not clear the rtc read request bit */
        if (abx500_get_chip_id(dev) == 0) {
-               msleep(1);
+               usleep_range(1000, 1000);
        } else {
                /* Wait for some cycles after enabling the rtc read in ab8500 */
                while (time_before(jiffies, timeout)) {
@@ -102,7 +102,7 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
                        if (!(value & RTC_READ_REQUEST))
                                break;
 
-                       msleep(1);
+                       usleep_range(1000, 5000);
                }
        }
 
@@ -258,6 +258,109 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
        return ab8500_rtc_irq_enable(dev, alarm->enabled);
 }
 
+
+static int ab8500_rtc_set_calibration(struct device *dev, int calibration)
+{
+       int retval;
+       u8  rtccal = 0;
+
+       /*
+        * Check that the calibration value (which is in units of 0.5
+        * parts-per-million) is in the AB8500's range for RtcCalibration
+        * register. -128 (0x80) is not permitted because the AB8500 uses
+        * a sign-bit rather than two's complement, so 0x80 is just another
+        * representation of zero.
+        */
+       if ((calibration < -127) || (calibration > 127)) {
+               dev_err(dev, "RtcCalibration value outside permitted range\n");
+               return -EINVAL;
+       }
+
+       /*
+        * The AB8500 uses sign (in bit7) and magnitude (in bits0-7)
+        * so need to convert to this sort of representation before writing
+        * into RtcCalibration register...
+        */
+       if (calibration >= 0)
+               rtccal = 0x7F & calibration;
+       else
+               rtccal = ~(calibration - 1) | 0x80;
+
+       retval = abx500_set_register_interruptible(dev, AB8500_RTC,
+                       AB8500_RTC_CALIB_REG, rtccal);
+
+       return retval;
+}
+
+static int ab8500_rtc_get_calibration(struct device *dev, int *calibration)
+{
+       int retval;
+       u8  rtccal = 0;
+
+       retval =  abx500_get_register_interruptible(dev, AB8500_RTC,
+                       AB8500_RTC_CALIB_REG, &rtccal);
+       if (retval >= 0) {
+               /*
+                * The AB8500 uses sign (in bit7) and magnitude (in bits0-7)
+                * so need to convert value from RtcCalibration register into
+                * a two's complement signed value...
+                */
+               if (rtccal & 0x80)
+                       *calibration = 0 - (rtccal & 0x7F);
+               else
+                       *calibration = 0x7F & rtccal;
+       }
+
+       return retval;
+}
+
+static ssize_t ab8500_sysfs_store_rtc_calibration(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t count)
+{
+       int retval;
+       int calibration = 0;
+
+       if (sscanf(buf, " %i ", &calibration) != 1) {
+               dev_err(dev, "Failed to store RTC calibration attribute\n");
+               return -EINVAL;
+       }
+
+       retval = ab8500_rtc_set_calibration(dev, calibration);
+
+       return retval ? retval : count;
+}
+
+static ssize_t ab8500_sysfs_show_rtc_calibration(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       int  retval = 0;
+       int  calibration = 0;
+
+       retval = ab8500_rtc_get_calibration(dev, &calibration);
+       if (retval < 0) {
+               dev_err(dev, "Failed to read RTC calibration attribute\n");
+               sprintf(buf, "0\n");
+               return retval;
+       }
+
+       return sprintf(buf, "%d\n", calibration);
+}
+
+static DEVICE_ATTR(rtc_calibration, S_IRUGO | S_IWUSR,
+                  ab8500_sysfs_show_rtc_calibration,
+                  ab8500_sysfs_store_rtc_calibration);
+
+static int ab8500_sysfs_rtc_register(struct device *dev)
+{
+       return device_create_file(dev, &dev_attr_rtc_calibration);
+}
+
+static void ab8500_sysfs_rtc_unregister(struct device *dev)
+{
+       device_remove_file(dev, &dev_attr_rtc_calibration);
+}
+
 static irqreturn_t rtc_alarm_handler(int irq, void *data)
 {
        struct rtc_device *rtc = data;
@@ -295,7 +398,7 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
                return err;
 
        /* Wait for reset by the PorRtc */
-       msleep(1);
+       usleep_range(1000, 5000);
 
        err = abx500_get_register_interruptible(&pdev->dev, AB8500_RTC,
                AB8500_RTC_STAT_REG, &rtc_ctrl);
@@ -308,6 +411,8 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       device_init_wakeup(&pdev->dev, true);
+
        rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab8500_rtc_ops,
                        THIS_MODULE);
        if (IS_ERR(rtc)) {
@@ -316,8 +421,8 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
                return err;
        }
 
-       err = request_threaded_irq(irq, NULL, rtc_alarm_handler, 0,
-                                  "ab8500-rtc", rtc);
+       err = request_threaded_irq(irq, NULL, rtc_alarm_handler,
+               IRQF_NO_SUSPEND, "ab8500-rtc", rtc);
        if (err < 0) {
                rtc_device_unregister(rtc);
                return err;
@@ -325,6 +430,13 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, rtc);
 
+
+       err = ab8500_sysfs_rtc_register(&pdev->dev);
+       if (err) {
+               dev_err(&pdev->dev, "sysfs RTC failed to register\n");
+               return err;
+       }
+
        return 0;
 }
 
@@ -333,6 +445,8 @@ static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
        struct rtc_device *rtc = platform_get_drvdata(pdev);
        int irq = platform_get_irq_byname(pdev, "ALARM");
 
+       ab8500_sysfs_rtc_unregister(&pdev->dev);
+
        free_irq(irq, rtc);
        rtc_device_unregister(rtc);
        platform_set_drvdata(pdev, NULL);
@@ -349,18 +463,8 @@ static struct platform_driver ab8500_rtc_driver = {
        .remove = __devexit_p(ab8500_rtc_remove),
 };
 
-static int __init ab8500_rtc_init(void)
-{
-       return platform_driver_register(&ab8500_rtc_driver);
-}
-
-static void __exit ab8500_rtc_exit(void)
-{
-       platform_driver_unregister(&ab8500_rtc_driver);
-}
+module_platform_driver(ab8500_rtc_driver);
 
-module_init(ab8500_rtc_init);
-module_exit(ab8500_rtc_exit);
 MODULE_AUTHOR("Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>");
 MODULE_DESCRIPTION("AB8500 RTC Driver");
 MODULE_LICENSE("GPL v2");
index 90d866272c8ea5095981afaa62396f03d92343b6..abfc1a0c07d9cfb0c6e6f7c46249c1283d10afca 100644 (file)
@@ -456,18 +456,7 @@ static struct platform_driver bfin_rtc_driver = {
        .resume         = bfin_rtc_resume,
 };
 
-static int __init bfin_rtc_init(void)
-{
-       return platform_driver_register(&bfin_rtc_driver);
-}
-
-static void __exit bfin_rtc_exit(void)
-{
-       platform_driver_unregister(&bfin_rtc_driver);
-}
-
-module_init(bfin_rtc_init);
-module_exit(bfin_rtc_exit);
+module_platform_driver(bfin_rtc_driver);
 
 MODULE_DESCRIPTION("Blackfin On-Chip Real Time Clock Driver");
 MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
index 128270ce355d08f0689b8e0961d39bb565923bf5..bf612ef229417730a8146f62d3a9270daba67d28 100644 (file)
@@ -218,15 +218,4 @@ static struct platform_driver bq4802_driver = {
        .remove         = __devexit_p(bq4802_remove),
 };
 
-static int __init bq4802_init(void)
-{
-       return platform_driver_register(&bq4802_driver);
-}
-
-static void __exit bq4802_exit(void)
-{
-       platform_driver_unregister(&bq4802_driver);
-}
-
-module_init(bq4802_init);
-module_exit(bq4802_exit);
+module_platform_driver(bq4802_driver);
index 05beb6c1ca79c7a0e22793818debbfb774da4187..d7782aa099439575a1a4f1887190b70a1b5e6370 100644 (file)
@@ -164,7 +164,7 @@ static inline unsigned char cmos_read_bank2(unsigned char addr)
 static inline void cmos_write_bank2(unsigned char val, unsigned char addr)
 {
        outb(addr, RTC_PORT(2));
-       outb(val, RTC_PORT(2));
+       outb(val, RTC_PORT(3));
 }
 
 #else
index 2322c43af201bd23d0725e184c9bb5cced228996..d4457afcba8910892f85e90fa72309104fd73db5 100644 (file)
@@ -161,16 +161,6 @@ static struct platform_driver rtc_dm355evm_driver = {
        },
 };
 
-static int __init dm355evm_rtc_init(void)
-{
-       return platform_driver_register(&rtc_dm355evm_driver);
-}
-module_init(dm355evm_rtc_init);
-
-static void __exit dm355evm_rtc_exit(void)
-{
-       platform_driver_unregister(&rtc_dm355evm_driver);
-}
-module_exit(dm355evm_rtc_exit);
+module_platform_driver(rtc_dm355evm_driver);
 
 MODULE_LICENSE("GPL");
index 68e6caf2549662a66a84b7f94196a2c20ac2f93b..990c3ff489bf1fff947c854d747121b16c0de36c 100644 (file)
@@ -396,21 +396,10 @@ static struct platform_driver ds1286_platform_driver = {
        .remove         = __devexit_p(ds1286_remove),
 };
 
-static int __init ds1286_init(void)
-{
-       return platform_driver_register(&ds1286_platform_driver);
-}
-
-static void __exit ds1286_exit(void)
-{
-       platform_driver_unregister(&ds1286_platform_driver);
-}
+module_platform_driver(ds1286_platform_driver);
 
 MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
 MODULE_DESCRIPTION("DS1286 RTC driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_ALIAS("platform:rtc-ds1286");
-
-module_init(ds1286_init);
-module_exit(ds1286_exit);
index 586c244a05d854ab593b8310bd9e8a66991def14..761f36bc83a95e6bbbfa7dfc7f87aad55057858e 100644 (file)
@@ -580,20 +580,7 @@ static struct platform_driver ds1511_rtc_driver = {
        },
 };
 
- static int __init
-ds1511_rtc_init(void)
-{
-       return platform_driver_register(&ds1511_rtc_driver);
-}
-
- static void __exit
-ds1511_rtc_exit(void)
-{
-       platform_driver_unregister(&ds1511_rtc_driver);
-}
-
-module_init(ds1511_rtc_init);
-module_exit(ds1511_rtc_exit);
+module_platform_driver(ds1511_rtc_driver);
 
 MODULE_AUTHOR("Andrew Sharp <andy.sharp@lsi.com>");
 MODULE_DESCRIPTION("Dallas DS1511 RTC driver");
index 1350029044e6247247c3220efa362dd3bb832e63..6f0a1b530f2e8bdf8fea8ef2de1f3965114d74ed 100644 (file)
@@ -361,18 +361,7 @@ static struct platform_driver ds1553_rtc_driver = {
        },
 };
 
-static __init int ds1553_init(void)
-{
-       return platform_driver_register(&ds1553_rtc_driver);
-}
-
-static __exit void ds1553_exit(void)
-{
-       platform_driver_unregister(&ds1553_rtc_driver);
-}
-
-module_init(ds1553_init);
-module_exit(ds1553_exit);
+module_platform_driver(ds1553_rtc_driver);
 
 MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
 MODULE_DESCRIPTION("Dallas DS1553 RTC driver");
index e3e0f92b60f0d97012f723e3a11a56b9054568d3..76112667c5078103c8bc4f64dd1b30201a5f1fde 100644 (file)
@@ -240,18 +240,7 @@ static struct platform_driver ds1742_rtc_driver = {
        },
 };
 
-static __init int ds1742_init(void)
-{
-       return platform_driver_register(&ds1742_rtc_driver);
-}
-
-static __exit void ds1742_exit(void)
-{
-       platform_driver_unregister(&ds1742_rtc_driver);
-}
-
-module_init(ds1742_init);
-module_exit(ds1742_exit);
+module_platform_driver(ds1742_rtc_driver);
 
 MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
 MODULE_DESCRIPTION("Dallas DS1742 RTC driver");
index b6473631d18213e80ba404ec93b4ffe13d83e92d..05ab227eeff725aaa11761990fc520de9b71ce07 100644 (file)
@@ -345,7 +345,7 @@ static const struct dev_pm_ops jz4740_pm_ops = {
 #define JZ4740_RTC_PM_OPS NULL
 #endif  /* CONFIG_PM */
 
-struct platform_driver jz4740_rtc_driver = {
+static struct platform_driver jz4740_rtc_driver = {
        .probe   = jz4740_rtc_probe,
        .remove  = __devexit_p(jz4740_rtc_remove),
        .driver  = {
@@ -355,17 +355,7 @@ struct platform_driver jz4740_rtc_driver = {
        },
 };
 
-static int __init jz4740_rtc_init(void)
-{
-       return platform_driver_register(&jz4740_rtc_driver);
-}
-module_init(jz4740_rtc_init);
-
-static void __exit jz4740_rtc_exit(void)
-{
-       platform_driver_unregister(&jz4740_rtc_driver);
-}
-module_exit(jz4740_rtc_exit);
+module_platform_driver(jz4740_rtc_driver);
 
 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
 MODULE_LICENSE("GPL");
index ae16250c762f84ab7fd2958b0fa643fff1480c28..ecc1713b2b4f827f144e025d21ce8657d8a732d1 100644 (file)
@@ -396,17 +396,7 @@ static struct platform_driver lpc32xx_rtc_driver = {
        },
 };
 
-static int __init lpc32xx_rtc_init(void)
-{
-       return platform_driver_register(&lpc32xx_rtc_driver);
-}
-module_init(lpc32xx_rtc_init);
-
-static void __exit lpc32xx_rtc_exit(void)
-{
-       platform_driver_unregister(&lpc32xx_rtc_driver);
-}
-module_exit(lpc32xx_rtc_exit);
+module_platform_driver(lpc32xx_rtc_driver);
 
 MODULE_AUTHOR("Kevin Wells <wellsk40@gmail.com");
 MODULE_DESCRIPTION("RTC driver for the LPC32xx SoC");
index 7317d3b9a3d54ffacdde1c40773eacf1309a075c..ef71132ff205fc1ef0faec00ba55cdea383622fa 100644 (file)
@@ -200,7 +200,6 @@ static int __devexit m41t93_remove(struct spi_device *spi)
 static struct spi_driver m41t93_driver = {
        .driver = {
                .name   = "rtc-m41t93",
-               .bus    = &spi_bus_type,
                .owner  = THIS_MODULE,
        },
        .probe  = m41t93_probe,
index e259ed76ae856d5b2cc8f354522f39e70d5474dc..2a4721f617975bbb21db850418c5a41976fe5d89 100644 (file)
@@ -147,7 +147,6 @@ static int __devexit m41t94_remove(struct spi_device *spi)
 static struct spi_driver m41t94_driver = {
        .driver = {
                .name   = "rtc-m41t94",
-               .bus    = &spi_bus_type,
                .owner  = THIS_MODULE,
        },
        .probe  = m41t94_probe,
index 8e2a24e33ed69f89530ab9996707c109c5d76816..f9e3b3583733a21a296a0a2ec32bbfe8daf4f699 100644 (file)
@@ -216,21 +216,10 @@ static struct platform_driver m48t35_platform_driver = {
        .remove         = __devexit_p(m48t35_remove),
 };
 
-static int __init m48t35_init(void)
-{
-       return platform_driver_register(&m48t35_platform_driver);
-}
-
-static void __exit m48t35_exit(void)
-{
-       platform_driver_unregister(&m48t35_platform_driver);
-}
+module_platform_driver(m48t35_platform_driver);
 
 MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
 MODULE_DESCRIPTION("M48T35 RTC driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_ALIAS("platform:rtc-m48t35");
-
-module_init(m48t35_init);
-module_exit(m48t35_exit);
index 28365388fb6c8665a96c9d7fb68638300d748dc0..30ebfec9fd2b84a6bdd819bc3a736bdea86218c7 100644 (file)
@@ -530,18 +530,7 @@ static struct platform_driver m48t59_rtc_driver = {
        .remove         = __devexit_p(m48t59_rtc_remove),
 };
 
-static int __init m48t59_rtc_init(void)
-{
-       return platform_driver_register(&m48t59_rtc_driver);
-}
-
-static void __exit m48t59_rtc_exit(void)
-{
-       platform_driver_unregister(&m48t59_rtc_driver);
-}
-
-module_init(m48t59_rtc_init);
-module_exit(m48t59_rtc_exit);
+module_platform_driver(m48t59_rtc_driver);
 
 MODULE_AUTHOR("Mark Zhan <rongkai.zhan@windriver.com>");
 MODULE_DESCRIPTION("M48T59/M48T02/M48T08 RTC driver");
index f981287d582b76a4a68a11246451e37b9a60c352..863fb3363aa6da503a5e0d66f1b1e7e853a97dca 100644 (file)
@@ -185,21 +185,10 @@ static struct platform_driver m48t86_rtc_platform_driver = {
        .remove         = __devexit_p(m48t86_rtc_remove),
 };
 
-static int __init m48t86_rtc_init(void)
-{
-       return platform_driver_register(&m48t86_rtc_platform_driver);
-}
-
-static void __exit m48t86_rtc_exit(void)
-{
-       platform_driver_unregister(&m48t86_rtc_platform_driver);
-}
+module_platform_driver(m48t86_rtc_platform_driver);
 
 MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
 MODULE_DESCRIPTION("M48T86 RTC driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_ALIAS("platform:rtc-m48t86");
-
-module_init(m48t86_rtc_init);
-module_exit(m48t86_rtc_exit);
index 0ec3f588a255dc1abe0cff5bc900cded2bdc8b59..1f6b3cc58e8a3644e1412f77808464412540c37a 100644 (file)
@@ -154,7 +154,6 @@ static int __devexit max6902_remove(struct spi_device *spi)
 static struct spi_driver max6902_driver = {
        .driver = {
                .name   = "rtc-max6902",
-               .bus    = &spi_bus_type,
                .owner  = THIS_MODULE,
        },
        .probe  = max6902_probe,
index 3bc046f427e04c373465a0ac25f9125d0e89043f..4a5529346b47465528e1cddcce6bb9007d0ff4bd 100644 (file)
@@ -299,17 +299,7 @@ static struct platform_driver max8925_rtc_driver = {
        .remove         = __devexit_p(max8925_rtc_remove),
 };
 
-static int __init max8925_rtc_init(void)
-{
-       return platform_driver_register(&max8925_rtc_driver);
-}
-module_init(max8925_rtc_init);
-
-static void __exit max8925_rtc_exit(void)
-{
-       platform_driver_unregister(&max8925_rtc_driver);
-}
-module_exit(max8925_rtc_exit);
+module_platform_driver(max8925_rtc_driver);
 
 MODULE_DESCRIPTION("Maxim MAX8925 RTC driver");
 MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
index 2e48aa604273ad8e93f737888de136ec549467b5..7196f438c0895707233a4e9b0814c680e7821722 100644 (file)
@@ -327,17 +327,7 @@ static struct platform_driver max8998_rtc_driver = {
        .id_table       = max8998_rtc_id,
 };
 
-static int __init max8998_rtc_init(void)
-{
-       return platform_driver_register(&max8998_rtc_driver);
-}
-module_init(max8998_rtc_init);
-
-static void __exit max8998_rtc_exit(void)
-{
-       platform_driver_unregister(&max8998_rtc_driver);
-}
-module_exit(max8998_rtc_exit);
+module_platform_driver(max8998_rtc_driver);
 
 MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>");
 MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
index 9d0c3b478d558eb42279c6598e04f0d8e91a6695..546f6850bffbd9d0a9f884528b36d81296a08110 100644 (file)
@@ -399,7 +399,7 @@ static int __exit mc13xxx_rtc_remove(struct platform_device *pdev)
        return 0;
 }
 
-const struct platform_device_id mc13xxx_rtc_idtable[] = {
+static const struct platform_device_id mc13xxx_rtc_idtable[] = {
        {
                .name = "mc13783-rtc",
        }, {
index da60915818b68cb526948e4e42111c89ded290ce..9d3caccfc250ff6c5dd8a89b5ee296457ec31bcb 100644 (file)
@@ -418,17 +418,7 @@ static struct platform_driver mpc5121_rtc_driver = {
        .remove = __devexit_p(mpc5121_rtc_remove),
 };
 
-static int __init mpc5121_rtc_init(void)
-{
-       return platform_driver_register(&mpc5121_rtc_driver);
-}
-module_init(mpc5121_rtc_init);
-
-static void __exit mpc5121_rtc_exit(void)
-{
-       platform_driver_unregister(&mpc5121_rtc_driver);
-}
-module_exit(mpc5121_rtc_exit);
+module_platform_driver(mpc5121_rtc_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("John Rigby <jcrigby@gmail.com>");
index bb21f443fb7038719458d03c97e4b4eb00a225a1..6cd6c7235344916c1c51eac091f2cd80194c5426 100644 (file)
@@ -537,18 +537,7 @@ static struct platform_driver vrtc_mrst_platform_driver = {
        }
 };
 
-static int __init vrtc_mrst_init(void)
-{
-       return platform_driver_register(&vrtc_mrst_platform_driver);
-}
-
-static void __exit vrtc_mrst_exit(void)
-{
-       platform_driver_unregister(&vrtc_mrst_platform_driver);
-}
-
-module_init(vrtc_mrst_init);
-module_exit(vrtc_mrst_exit);
+module_platform_driver(vrtc_mrst_platform_driver);
 
 MODULE_AUTHOR("Jacob Pan; Feng Tang");
 MODULE_DESCRIPTION("Driver for Moorestown virtual RTC");
index 39e41fbdf08ba6de6259347feb0cc5dee76803d7..5e1d64ee52289b9e7a47990f8c7de0b41c7fccaf 100644 (file)
@@ -155,7 +155,6 @@ static int rtc_update_alarm(struct device *dev, struct rtc_time *alrm)
 {
        struct rtc_time alarm_tm, now_tm;
        unsigned long now, time;
-       int ret;
        struct platform_device *pdev = to_platform_device(dev);
        struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
        void __iomem *ioaddr = pdata->ioaddr;
@@ -168,21 +167,33 @@ static int rtc_update_alarm(struct device *dev, struct rtc_time *alrm)
        alarm_tm.tm_hour = alrm->tm_hour;
        alarm_tm.tm_min = alrm->tm_min;
        alarm_tm.tm_sec = alrm->tm_sec;
-       rtc_tm_to_time(&now_tm, &now);
        rtc_tm_to_time(&alarm_tm, &time);
 
-       if (time < now) {
-               time += 60 * 60 * 24;
-               rtc_time_to_tm(time, &alarm_tm);
-       }
-
-       ret = rtc_tm_to_time(&alarm_tm, &time);
-
        /* clear all the interrupt status bits */
        writew(readw(ioaddr + RTC_RTCISR), ioaddr + RTC_RTCISR);
        set_alarm_or_time(dev, MXC_RTC_ALARM, time);
 
-       return ret;
+       return 0;
+}
+
+static void mxc_rtc_irq_enable(struct device *dev, unsigned int bit,
+                               unsigned int enabled)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+       void __iomem *ioaddr = pdata->ioaddr;
+       u32 reg;
+
+       spin_lock_irq(&pdata->rtc->irq_lock);
+       reg = readw(ioaddr + RTC_RTCIENR);
+
+       if (enabled)
+               reg |= bit;
+       else
+               reg &= ~bit;
+
+       writew(reg, ioaddr + RTC_RTCIENR);
+       spin_unlock_irq(&pdata->rtc->irq_lock);
 }
 
 /* This function is the RTC interrupt service routine. */
@@ -199,13 +210,12 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
        /* clear interrupt sources */
        writew(status, ioaddr + RTC_RTCISR);
 
-       /* clear alarm interrupt if it has occurred */
-       if (status & RTC_ALM_BIT)
-               status &= ~RTC_ALM_BIT;
-
        /* update irq data & counter */
-       if (status & RTC_ALM_BIT)
+       if (status & RTC_ALM_BIT) {
                events |= (RTC_AF | RTC_IRQF);
+               /* RTC alarm should be one-shot */
+               mxc_rtc_irq_enable(&pdev->dev, RTC_ALM_BIT, 0);
+       }
 
        if (status & RTC_1HZ_BIT)
                events |= (RTC_UF | RTC_IRQF);
@@ -213,9 +223,6 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
        if (status & PIT_ALL_ON)
                events |= (RTC_PF | RTC_IRQF);
 
-       if ((status & RTC_ALM_BIT) && rtc_valid_tm(&pdata->g_rtc_alarm))
-               rtc_update_alarm(&pdev->dev, &pdata->g_rtc_alarm);
-
        rtc_update_irq(pdata->rtc, 1, events);
        spin_unlock_irq(&pdata->rtc->irq_lock);
 
@@ -242,26 +249,6 @@ static void mxc_rtc_release(struct device *dev)
        spin_unlock_irq(&pdata->rtc->irq_lock);
 }
 
-static void mxc_rtc_irq_enable(struct device *dev, unsigned int bit,
-                               unsigned int enabled)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-       void __iomem *ioaddr = pdata->ioaddr;
-       u32 reg;
-
-       spin_lock_irq(&pdata->rtc->irq_lock);
-       reg = readw(ioaddr + RTC_RTCIENR);
-
-       if (enabled)
-               reg |= bit;
-       else
-               reg &= ~bit;
-
-       writew(reg, ioaddr + RTC_RTCIENR);
-       spin_unlock_irq(&pdata->rtc->irq_lock);
-}
-
 static int mxc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
        mxc_rtc_irq_enable(dev, RTC_ALM_BIT, enabled);
@@ -290,6 +277,17 @@ static int mxc_rtc_read_time(struct device *dev, struct rtc_time *tm)
  */
 static int mxc_rtc_set_mmss(struct device *dev, unsigned long time)
 {
+       /*
+        * TTC_DAYR register is 9-bit in MX1 SoC, save time and day of year only
+        */
+       if (cpu_is_mx1()) {
+               struct rtc_time tm;
+
+               rtc_time_to_tm(time, &tm);
+               tm.tm_year = 70;
+               rtc_tm_to_time(&tm, &time);
+       }
+
        /* Avoid roll-over from reading the different registers */
        do {
                set_alarm_or_time(dev, MXC_RTC_TIME, time);
@@ -324,21 +322,7 @@ static int mxc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
        struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
        int ret;
 
-       if (rtc_valid_tm(&alrm->time)) {
-               if (alrm->time.tm_sec > 59 ||
-                   alrm->time.tm_hour > 23 ||
-                   alrm->time.tm_min > 59)
-                       return -EINVAL;
-
-               ret = rtc_update_alarm(dev, &alrm->time);
-       } else {
-               ret = rtc_valid_tm(&alrm->time);
-               if (ret)
-                       return ret;
-
-               ret = rtc_update_alarm(dev, &alrm->time);
-       }
-
+       ret = rtc_update_alarm(dev, &alrm->time);
        if (ret)
                return ret;
 
@@ -424,6 +408,9 @@ static int __init mxc_rtc_probe(struct platform_device *pdev)
                pdata->irq = -1;
        }
 
+       if (pdata->irq >=0)
+               device_init_wakeup(&pdev->dev, 1);
+
        rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops,
                                  THIS_MODULE);
        if (IS_ERR(rtc)) {
@@ -459,9 +446,39 @@ static int __exit mxc_rtc_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int mxc_rtc_suspend(struct device *dev)
+{
+       struct rtc_plat_data *pdata = dev_get_drvdata(dev);
+
+       if (device_may_wakeup(dev))
+               enable_irq_wake(pdata->irq);
+
+       return 0;
+}
+
+static int mxc_rtc_resume(struct device *dev)
+{
+       struct rtc_plat_data *pdata = dev_get_drvdata(dev);
+
+       if (device_may_wakeup(dev))
+               disable_irq_wake(pdata->irq);
+
+       return 0;
+}
+
+static struct dev_pm_ops mxc_rtc_pm_ops = {
+       .suspend        = mxc_rtc_suspend,
+       .resume         = mxc_rtc_resume,
+};
+#endif
+
 static struct platform_driver mxc_rtc_driver = {
        .driver = {
                   .name        = "mxc_rtc",
+#ifdef CONFIG_PM
+                  .pm          = &mxc_rtc_pm_ops,
+#endif
                   .owner       = THIS_MODULE,
        },
        .remove         = __exit_p(mxc_rtc_remove),
index 2ee3bbf7e5ea03df488233c61b659c4909219335..b46c4004d8fe4e5728221857806c7878d14ba63c 100644 (file)
@@ -340,7 +340,6 @@ static int __devexit pcf2123_remove(struct spi_device *spi)
 static struct spi_driver pcf2123_driver = {
        .driver = {
                        .name   = "rtc-pcf2123",
-                       .bus    = &spi_bus_type,
                        .owner  = THIS_MODULE,
        },
        .probe  = pcf2123_probe,
index 0c423892923c99f7ed04a4d9a2d0e387de7e7b57..a20202f9ee577d5ec477630c4a8073b7ed8abee0 100644 (file)
@@ -294,17 +294,7 @@ static struct platform_driver pcf50633_rtc_driver = {
        .remove = __devexit_p(pcf50633_rtc_remove),
 };
 
-static int __init pcf50633_rtc_init(void)
-{
-       return platform_driver_register(&pcf50633_rtc_driver);
-}
-module_init(pcf50633_rtc_init);
-
-static void __exit pcf50633_rtc_exit(void)
-{
-       platform_driver_unregister(&pcf50633_rtc_driver);
-}
-module_exit(pcf50633_rtc_exit);
+module_platform_driver(pcf50633_rtc_driver);
 
 MODULE_DESCRIPTION("PCF50633 RTC driver");
 MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
index d420e9d877e85e9cb7a6d3ccd3b7a5391ef1c69c..9f1d6bcbdf6cf0ffc9403be3ea380e9f58d14774 100644 (file)
@@ -532,17 +532,7 @@ static struct platform_driver pm8xxx_rtc_driver = {
        },
 };
 
-static int __init pm8xxx_rtc_init(void)
-{
-       return platform_driver_register(&pm8xxx_rtc_driver);
-}
-module_init(pm8xxx_rtc_init);
-
-static void __exit pm8xxx_rtc_exit(void)
-{
-       platform_driver_unregister(&pm8xxx_rtc_driver);
-}
-module_exit(pm8xxx_rtc_exit);
+module_platform_driver(pm8xxx_rtc_driver);
 
 MODULE_ALIAS("platform:rtc-pm8xxx");
 MODULE_DESCRIPTION("PMIC8xxx RTC driver");
index e4b6880aabd05492b2a08c0511f400340b9fc0fa..ab0acaeb23719bfc055fe97cfb1efe7a051df756 100644 (file)
@@ -164,7 +164,7 @@ static int puv3_rtc_open(struct device *dev)
        int ret;
 
        ret = request_irq(puv3_rtc_alarmno, puv3_rtc_alarmirq,
-                         IRQF_DISABLED,  "pkunity-rtc alarm", rtc_dev);
+                       0, "pkunity-rtc alarm", rtc_dev);
 
        if (ret) {
                dev_err(dev, "IRQ%d error %d\n", puv3_rtc_alarmno, ret);
@@ -172,7 +172,7 @@ static int puv3_rtc_open(struct device *dev)
        }
 
        ret = request_irq(puv3_rtc_tickno, puv3_rtc_tickirq,
-                         IRQF_DISABLED,  "pkunity-rtc tick", rtc_dev);
+                       0, "pkunity-rtc tick", rtc_dev);
 
        if (ret) {
                dev_err(dev, "IRQ%d error %d\n", puv3_rtc_tickno, ret);
@@ -326,7 +326,7 @@ static int puv3_rtc_resume(struct platform_device *pdev)
 #define puv3_rtc_resume  NULL
 #endif
 
-static struct platform_driver puv3_rtcdrv = {
+static struct platform_driver puv3_rtc_driver = {
        .probe          = puv3_rtc_probe,
        .remove         = __devexit_p(puv3_rtc_remove),
        .suspend        = puv3_rtc_suspend,
@@ -337,21 +337,7 @@ static struct platform_driver puv3_rtcdrv = {
        }
 };
 
-static char __initdata banner[] = "PKUnity-v3 RTC, (c) 2009 PKUnity Co.\n";
-
-static int __init puv3_rtc_init(void)
-{
-       printk(banner);
-       return platform_driver_register(&puv3_rtcdrv);
-}
-
-static void __exit puv3_rtc_exit(void)
-{
-       platform_driver_unregister(&puv3_rtcdrv);
-}
-
-module_init(puv3_rtc_init);
-module_exit(puv3_rtc_exit);
+module_platform_driver(puv3_rtc_driver);
 
 MODULE_DESCRIPTION("RTC Driver for the PKUnity v3 chip");
 MODULE_AUTHOR("Hu Dongliang");
index 971bc8e08da66008fecd1d16f29508bafc40d482..ce2ca8523ddd5d01146c1b17c64069e76dbc0229 100644 (file)
@@ -229,7 +229,6 @@ static int __devexit rs5c348_remove(struct spi_device *spi)
 static struct spi_driver rs5c348_driver = {
        .driver = {
                .name   = "rtc-rs5c348",
-               .bus    = &spi_bus_type,
                .owner  = THIS_MODULE,
        },
        .probe  = rs5c348_probe,
index 175067a17c46f31ccc7af5382859f0cfa0ec9e64..aef40bd2957be7ccb2fd5adfebb6c6e13c3b6683 100644 (file)
@@ -673,21 +673,7 @@ static struct platform_driver s3c_rtc_driver = {
        },
 };
 
-static char __initdata banner[] = "S3C24XX RTC, (c) 2004,2006 Simtec Electronics\n";
-
-static int __init s3c_rtc_init(void)
-{
-       printk(banner);
-       return platform_driver_register(&s3c_rtc_driver);
-}
-
-static void __exit s3c_rtc_exit(void)
-{
-       platform_driver_unregister(&s3c_rtc_driver);
-}
-
-module_init(s3c_rtc_init);
-module_exit(s3c_rtc_exit);
+module_platform_driver(s3c_rtc_driver);
 
 MODULE_DESCRIPTION("Samsung S3C RTC Driver");
 MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
index fc1ffe97fca191c0952740aadd8e80b4f7a2ded9..4595d3e645a7358676b5409b8ced918f306e07c2 100644 (file)
@@ -435,18 +435,7 @@ static struct platform_driver sa1100_rtc_driver = {
        },
 };
 
-static int __init sa1100_rtc_init(void)
-{
-       return platform_driver_register(&sa1100_rtc_driver);
-}
-
-static void __exit sa1100_rtc_exit(void)
-{
-       platform_driver_unregister(&sa1100_rtc_driver);
-}
-
-module_init(sa1100_rtc_init);
-module_exit(sa1100_rtc_exit);
+module_platform_driver(sa1100_rtc_driver);
 
 MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
 MODULE_DESCRIPTION("SA11x0/PXA2xx Realtime Clock Driver (RTC)");
index 893bac2bb21b61b6f5b004a76e08316a2174366e..19a28a671a8e74f8abbd5ed149f4aebc4769b75d 100644 (file)
@@ -516,17 +516,7 @@ static struct platform_driver spear_rtc_driver = {
        },
 };
 
-static int __init rtc_init(void)
-{
-       return platform_driver_register(&spear_rtc_driver);
-}
-module_init(rtc_init);
-
-static void __exit rtc_exit(void)
-{
-       platform_driver_unregister(&spear_rtc_driver);
-}
-module_exit(rtc_exit);
+module_platform_driver(spear_rtc_driver);
 
 MODULE_ALIAS("platform:rtc-spear");
 MODULE_AUTHOR("Rajeev Kumar <rajeev-dlh.kumar@st.com>");
index ed3e9b5990315cc2389208768ab38b2332735fe1..7621116bd20d25680a266914c66ab7527bb27da1 100644 (file)
@@ -370,18 +370,7 @@ static struct platform_driver stk17ta8_rtc_driver = {
        },
 };
 
-static __init int stk17ta8_init(void)
-{
-       return platform_driver_register(&stk17ta8_rtc_driver);
-}
-
-static __exit void stk17ta8_exit(void)
-{
-       platform_driver_unregister(&stk17ta8_rtc_driver);
-}
-
-module_init(stk17ta8_init);
-module_exit(stk17ta8_exit);
+module_platform_driver(stk17ta8_rtc_driver);
 
 MODULE_AUTHOR("Thomas Hommel <thomas.hommel@ge.com>");
 MODULE_DESCRIPTION("Simtek STK17TA8 RTC driver");
index 7315068daa59797b9372dbe871784ef12e5ea8c1..10287865e33012a154100b6b674f541273d132d3 100644 (file)
@@ -276,18 +276,7 @@ static struct platform_driver stmp3xxx_rtcdrv = {
        },
 };
 
-static int __init stmp3xxx_rtc_init(void)
-{
-       return platform_driver_register(&stmp3xxx_rtcdrv);
-}
-
-static void __exit stmp3xxx_rtc_exit(void)
-{
-       platform_driver_unregister(&stmp3xxx_rtcdrv);
-}
-
-module_init(stmp3xxx_rtc_init);
-module_exit(stmp3xxx_rtc_exit);
+module_platform_driver(stmp3xxx_rtcdrv);
 
 MODULE_DESCRIPTION("STMP3xxx RTC Driver");
 MODULE_AUTHOR("dmitry pervushin <dpervushin@embeddedalley.com> and "
index 20687d55e7a72d5eb2871fd8504d6a2fa457e9e5..d43b4f6eb4e420c79327566e3444351cf86c8185 100644 (file)
@@ -550,6 +550,11 @@ static int twl_rtc_resume(struct platform_device *pdev)
 #define twl_rtc_resume  NULL
 #endif
 
+static const struct of_device_id twl_rtc_of_match[] = {
+       {.compatible = "ti,twl4030-rtc", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, twl_rtc_of_match);
 MODULE_ALIAS("platform:twl_rtc");
 
 static struct platform_driver twl4030rtc_driver = {
@@ -559,8 +564,9 @@ static struct platform_driver twl4030rtc_driver = {
        .suspend        = twl_rtc_suspend,
        .resume         = twl_rtc_resume,
        .driver         = {
-               .owner  = THIS_MODULE,
-               .name   = "twl_rtc",
+               .owner          = THIS_MODULE,
+               .name           = "twl_rtc",
+               .of_match_table = twl_rtc_of_match,
        },
 };
 
index f71c3ce180369225938f8ba21a6960e605f9d251..bca5d677bc85385d31efcd5486a4f44ac08bed73 100644 (file)
@@ -393,18 +393,7 @@ static struct platform_driver rtc_device_driver = {
        },
 };
 
-static __init int v3020_init(void)
-{
-       return platform_driver_register(&rtc_device_driver);
-}
-
-static __exit void v3020_exit(void)
-{
-       platform_driver_unregister(&rtc_device_driver);
-}
-
-module_init(v3020_init);
-module_exit(v3020_exit);
+module_platform_driver(rtc_device_driver);
 
 MODULE_DESCRIPTION("V3020 RTC");
 MODULE_AUTHOR("Raphael Assenat");
index c5698cda366a910a6f16008248c3c78aac62823f..fcbfdda2993bea5e8dd2f2ca4a1dd74e3e5ff121 100644 (file)
@@ -405,15 +405,4 @@ static struct platform_driver rtc_platform_driver = {
        },
 };
 
-static int __init vr41xx_rtc_init(void)
-{
-       return platform_driver_register(&rtc_platform_driver);
-}
-
-static void __exit vr41xx_rtc_exit(void)
-{
-       platform_driver_unregister(&rtc_platform_driver);
-}
-
-module_init(vr41xx_rtc_init);
-module_exit(vr41xx_rtc_exit);
+module_platform_driver(rtc_platform_driver);
index f93f412423c6d4eaadcdfbe5412d4a65c80cc09e..9e94fb147c26afcffe92ad7175cb011051ca5fb8 100644 (file)
@@ -311,17 +311,7 @@ static struct platform_driver vt8500_rtc_driver = {
        },
 };
 
-static int __init vt8500_rtc_init(void)
-{
-       return platform_driver_register(&vt8500_rtc_driver);
-}
-module_init(vt8500_rtc_init);
-
-static void __exit vt8500_rtc_exit(void)
-{
-       platform_driver_unregister(&vt8500_rtc_driver);
-}
-module_exit(vt8500_rtc_exit);
+module_platform_driver(vt8500_rtc_driver);
 
 MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
 MODULE_DESCRIPTION("VIA VT8500 SoC Realtime Clock Driver (RTC)");
index bdc909bd56da0f4c7c615349df6f43023bd52517..3b6e6a67e765b34e7efd7e5cb82eea63a930d31e 100644 (file)
@@ -324,15 +324,6 @@ static irqreturn_t wm831x_alm_irq(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t wm831x_per_irq(int irq, void *data)
-{
-       struct wm831x_rtc *wm831x_rtc = data;
-
-       rtc_update_irq(wm831x_rtc->rtc, 1, RTC_IRQF | RTC_UF);
-
-       return IRQ_HANDLED;
-}
-
 static const struct rtc_class_ops wm831x_rtc_ops = {
        .read_time = wm831x_rtc_readtime,
        .set_mmss = wm831x_rtc_set_mmss,
@@ -405,11 +396,10 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
 {
        struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
        struct wm831x_rtc *wm831x_rtc;
-       int per_irq = platform_get_irq_byname(pdev, "PER");
        int alm_irq = platform_get_irq_byname(pdev, "ALM");
        int ret = 0;
 
-       wm831x_rtc = kzalloc(sizeof(*wm831x_rtc), GFP_KERNEL);
+       wm831x_rtc = devm_kzalloc(&pdev->dev, sizeof(*wm831x_rtc), GFP_KERNEL);
        if (wm831x_rtc == NULL)
                return -ENOMEM;
 
@@ -433,14 +423,6 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
                goto err;
        }
 
-       ret = request_threaded_irq(per_irq, NULL, wm831x_per_irq,
-                                  IRQF_TRIGGER_RISING, "RTC period",
-                                  wm831x_rtc);
-       if (ret != 0) {
-               dev_err(&pdev->dev, "Failed to request periodic IRQ %d: %d\n",
-                       per_irq, ret);
-       }
-
        ret = request_threaded_irq(alm_irq, NULL, wm831x_alm_irq,
                                   IRQF_TRIGGER_RISING, "RTC alarm",
                                   wm831x_rtc);
@@ -452,20 +434,16 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
        return 0;
 
 err:
-       kfree(wm831x_rtc);
        return ret;
 }
 
 static int __devexit wm831x_rtc_remove(struct platform_device *pdev)
 {
        struct wm831x_rtc *wm831x_rtc = platform_get_drvdata(pdev);
-       int per_irq = platform_get_irq_byname(pdev, "PER");
        int alm_irq = platform_get_irq_byname(pdev, "ALM");
 
        free_irq(alm_irq, wm831x_rtc);
-       free_irq(per_irq, wm831x_rtc);
        rtc_device_unregister(wm831x_rtc->rtc);
-       kfree(wm831x_rtc);
 
        return 0;
 }
@@ -490,17 +468,7 @@ static struct platform_driver wm831x_rtc_driver = {
        },
 };
 
-static int __init wm831x_rtc_init(void)
-{
-       return platform_driver_register(&wm831x_rtc_driver);
-}
-module_init(wm831x_rtc_init);
-
-static void __exit wm831x_rtc_exit(void)
-{
-       platform_driver_unregister(&wm831x_rtc_driver);
-}
-module_exit(wm831x_rtc_exit);
+module_platform_driver(wm831x_rtc_driver);
 
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
 MODULE_DESCRIPTION("RTC driver for the WM831x series PMICs");
index 66421426e404521c5ecafda56c49e0e34df594f9..c2e52d15abb295f0de5b0ae08afebf62c77ad009 100644 (file)
@@ -486,17 +486,7 @@ static struct platform_driver wm8350_rtc_driver = {
        },
 };
 
-static int __init wm8350_rtc_init(void)
-{
-       return platform_driver_register(&wm8350_rtc_driver);
-}
-module_init(wm8350_rtc_init);
-
-static void __exit wm8350_rtc_exit(void)
-{
-       platform_driver_unregister(&wm8350_rtc_driver);
-}
-module_exit(wm8350_rtc_exit);
+module_platform_driver(wm8350_rtc_driver);
 
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
 MODULE_DESCRIPTION("RTC driver for the WM8350");
index 797a43994b552cf945d61d6654eec298dcff2b0f..375756fa95cfbe5a5394ed9eb2b958b51460d51a 100644 (file)
@@ -1105,7 +1105,6 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
        struct be_status_bhs *sts_bhs =
                                (struct be_status_bhs *)io_task->cmd_bhs;
        struct iscsi_conn *conn = beiscsi_conn->conn;
-       unsigned int sense_len;
        unsigned char *sense;
        u32 resid = 0, exp_cmdsn, max_cmdsn;
        u8 rsp, status, flags;
@@ -1153,9 +1152,11 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
        }
 
        if (status == SAM_STAT_CHECK_CONDITION) {
+               u16 sense_len;
                unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
+
                sense = sts_bhs->sense_info + sizeof(unsigned short);
-               sense_len =  cpu_to_be16(*slen);
+               sense_len = be16_to_cpu(*slen);
                memcpy(task->sc->sense_buffer, sense,
                       min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
        }
index 7b3d235d20b4638fb62d270b5a5a37980230055e..b5a1595cc0a523f6e71e8b1c8c438257c5897f70 100644 (file)
@@ -902,7 +902,7 @@ struct sfp_mem_s {
 union sfp_xcvr_e10g_code_u {
        u8              b;
        struct {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
                u8      e10g_unall:1;   /* 10G Ethernet compliance */
                u8      e10g_lrm:1;
                u8      e10g_lr:1;
@@ -982,7 +982,7 @@ union sfp_xcvr_fc2_code_u {
 union sfp_xcvr_fc3_code_u {
        u8              b;
        struct {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
                u8      rsv4:1;
                u8      mb800:1;    /* 800 Mbytes/sec */
                u8      mb1600:1;   /* 1600 Mbytes/sec */
index 863c6ba7d5eb0c817bfa7f3975a9d3de981cc8be..78963be2c4fb308f09df6f38b3cf5097b3bb5f53 100644 (file)
 struct bfa_iocfc_intr_attr_s {
        u8              coalesce;       /*  enable/disable coalescing */
        u8              rsvd[3];
-       __be16  latency;        /*  latency in microseconds   */
-       __be16  delay;          /*  delay in microseconds     */
+       __be16          latency;        /*  latency in microseconds   */
+       __be16          delay;          /*  delay in microseconds     */
 };
 
 /*
  * IOC firmware configuraton
  */
 struct bfa_iocfc_fwcfg_s {
-       u16        num_fabrics; /*  number of fabrics           */
-       u16        num_lports;  /*  number of local lports      */
-       u16        num_rports;  /*  number of remote ports      */
-       u16        num_ioim_reqs;       /*  number of IO reqs           */
-       u16        num_tskim_reqs;      /*  task management requests    */
-       u16        num_fwtio_reqs;      /* number of TM IO reqs in FW */
-       u16        num_fcxp_reqs;       /*  unassisted FC exchanges     */
-       u16        num_uf_bufs; /*  unsolicited recv buffers    */
+       u16             num_fabrics;    /*  number of fabrics           */
+       u16             num_lports;     /*  number of local lports      */
+       u16             num_rports;     /*  number of remote ports      */
+       u16             num_ioim_reqs;  /*  number of IO reqs           */
+       u16             num_tskim_reqs; /*  task management requests    */
+       u16             num_fwtio_reqs; /* number of TM IO reqs in FW   */
+       u16             num_fcxp_reqs;  /*  unassisted FC exchanges     */
+       u16             num_uf_bufs;    /*  unsolicited recv buffers    */
        u8              num_cqs;
        u8              fw_tick_res;    /*  FW clock resolution in ms */
        u8              rsvd[2];
@@ -57,19 +57,19 @@ struct bfa_iocfc_fwcfg_s {
 #pragma pack()
 
 struct bfa_iocfc_drvcfg_s {
-       u16        num_reqq_elems;      /*  number of req queue elements */
-       u16        num_rspq_elems;      /*  number of rsp queue elements */
-       u16        num_sgpgs;   /*  number of total SG pages      */
-       u16        num_sboot_tgts;      /*  number of SAN boot targets    */
-       u16        num_sboot_luns;      /*  number of SAN boot luns       */
-       u16         ioc_recover;        /*  IOC recovery mode             */
-       u16         min_cfg;    /*  minimum configuration         */
-       u16        path_tov;    /*  device path timeout   */
-       u16             num_tio_reqs;   /*!< number of TM IO reqs       */
+       u16             num_reqq_elems; /*  number of req queue elements */
+       u16             num_rspq_elems; /*  number of rsp queue elements */
+       u16             num_sgpgs;      /*  number of total SG pages     */
+       u16             num_sboot_tgts; /*  number of SAN boot targets   */
+       u16             num_sboot_luns; /*  number of SAN boot luns      */
+       u16             ioc_recover;    /*  IOC recovery mode            */
+       u16             min_cfg;        /*  minimum configuration        */
+       u16             path_tov;       /*  device path timeout         */
+       u16             num_tio_reqs;   /* number of TM IO reqs */
        u8              port_mode;
        u8              rsvd_a;
-       bfa_boolean_t   delay_comp; /*  delay completion of
-                                                       failed inflight IOs */
+       bfa_boolean_t   delay_comp;     /* delay completion of failed
+                                        * inflight IOs */
        u16             num_ttsk_reqs;   /* TM task management requests */
        u32             rsvd;
 };
@@ -101,8 +101,8 @@ struct bfa_fw_ioim_stats_s {
        u32     fw_frm_drop;            /*  f/w drop the frame */
 
        u32     rec_timeout;            /*  FW rec timed out */
-       u32     error_rec;                      /*  FW sending rec on
-                                                       * an error condition*/
+       u32     error_rec;              /*  FW sending rec on
+                                         an error condition*/
        u32     wait_for_si;            /*  FW wait for SI */
        u32     rec_rsp_inval;          /*  REC rsp invalid */
        u32     seqr_io_abort;          /*  target does not know cmd so abort */
@@ -124,9 +124,9 @@ struct bfa_fw_ioim_stats_s {
        u32     unexp_fcp_rsp;          /*  fcp response in wrong state */
 
        u32     fcp_rsp_under_run;      /*  fcp rsp IO underrun */
-       u32        fcp_rsp_under_run_wr;   /*  fcp rsp IO underrun for write */
+       u32     fcp_rsp_under_run_wr;   /*  fcp rsp IO underrun for write */
        u32     fcp_rsp_under_run_err;  /*  fcp rsp IO underrun error */
-       u32        fcp_rsp_resid_inval;    /*  invalid residue */
+       u32     fcp_rsp_resid_inval;    /*  invalid residue */
        u32     fcp_rsp_over_run;       /*  fcp rsp IO overrun */
        u32     fcp_rsp_over_run_err;   /*  fcp rsp IO overrun error */
        u32     fcp_rsp_proto_err;      /*  protocol error in fcp rsp */
@@ -142,21 +142,20 @@ struct bfa_fw_ioim_stats_s {
        u32     ioh_hit_class2_event;   /*  IOH hit class2 */
        u32     ioh_miss_other_event;   /*  IOH miss other */
        u32     ioh_seq_cnt_err_event;  /*  IOH seq cnt error */
-       u32     ioh_len_err_event;              /*  IOH len error - fcp_dl !=
-                                                       * bytes xfered */
+       u32     ioh_len_err_event;      /*  IOH len error - fcp_dl !=
+                                         bytes xfered */
        u32     ioh_seq_len_err_event;  /*  IOH seq len error */
        u32     ioh_data_oor_event;     /*  Data out of range */
        u32     ioh_ro_ooo_event;       /*  Relative offset out of range */
        u32     ioh_cpu_owned_event;    /*  IOH hit -iost owned by f/w */
        u32     ioh_unexp_frame_event;  /*  unexpected frame received
-                                                *   count */
+                                        *  count */
        u32     ioh_err_int;            /*  IOH error int during data-phase
-                                                *   for scsi write
-                                                */
+                                        *  for scsi write */
 };
 
 struct bfa_fw_tio_stats_s {
-       u32     tio_conf_proc;  /* TIO CONF processed */
+       u32     tio_conf_proc;      /* TIO CONF processed */
        u32     tio_conf_drop;      /* TIO CONF dropped */
        u32     tio_cleanup_req;    /* TIO cleanup requested */
        u32     tio_cleanup_comp;   /* TIO cleanup completed */
@@ -164,34 +163,36 @@ struct bfa_fw_tio_stats_s {
        u32     tio_abort_rsp_comp; /* TIO abort rsp completed */
        u32     tio_abts_req;       /* TIO ABTS requested */
        u32     tio_abts_ack;       /* TIO ABTS ack-ed */
-       u32     tio_abts_ack_nocomp; /* TIO ABTS ack-ed but not completed */
+       u32     tio_abts_ack_nocomp;/* TIO ABTS ack-ed but not completed */
        u32     tio_abts_tmo;       /* TIO ABTS timeout */
        u32     tio_snsdata_dma;    /* TIO sense data DMA */
-       u32     tio_rxwchan_wait; /* TIO waiting for RX wait channel */
-       u32     tio_rxwchan_avail; /* TIO RX wait channel available */
+       u32     tio_rxwchan_wait;   /* TIO waiting for RX wait channel */
+       u32     tio_rxwchan_avail;  /* TIO RX wait channel available */
        u32     tio_hit_bls;        /* TIO IOH BLS event */
        u32     tio_uf_recv;        /* TIO received UF */
-       u32     tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */
-       u32     tio_wr_invalid_sm;/* TIO write reqst in wrong state machine */
+       u32     tio_rd_invalid_sm;  /* TIO read reqst in wrong state machine */
+       u32     tio_wr_invalid_sm;  /* TIO write reqst in wrong state machine */
 
-       u32     ds_rxwchan_wait; /* DS waiting for RX wait channel */
-       u32     ds_rxwchan_avail; /* DS RX wait channel available */
+       u32     ds_rxwchan_wait;    /* DS waiting for RX wait channel */
+       u32     ds_rxwchan_avail;   /* DS RX wait channel available */
        u32     ds_unaligned_rd;    /* DS unaligned read */
-       u32     ds_rdcomp_invalid_sm; /* DS read completed in wrong state machine */
-       u32     ds_wrcomp_invalid_sm; /* DS write completed in wrong state machine */
+       u32     ds_rdcomp_invalid_sm; /* DS read completed in wrong state
+                                      * machine */
+       u32     ds_wrcomp_invalid_sm; /* DS write completed in wrong state
+                                      * machine */
        u32     ds_flush_req;       /* DS flush requested */
        u32     ds_flush_comp;      /* DS flush completed */
        u32     ds_xfrdy_exp;       /* DS XFER_RDY expired */
        u32     ds_seq_cnt_err;     /* DS seq cnt error */
        u32     ds_seq_len_err;     /* DS seq len error */
        u32     ds_data_oor;        /* DS data out of order */
-       u32     ds_hit_bls;     /* DS hit BLS */
+       u32     ds_hit_bls;         /* DS hit BLS */
        u32     ds_edtov_timer_exp; /* DS edtov expired */
        u32     ds_cpu_owned;       /* DS cpu owned */
        u32     ds_hit_class2;      /* DS hit class2 */
        u32     ds_length_err;      /* DS length error */
        u32     ds_ro_ooo_err;      /* DS relative offset out-of-order error */
-       u32     ds_rectov_timer_exp;    /* DS rectov expired */
+       u32     ds_rectov_timer_exp;/* DS rectov expired */
        u32     ds_unexp_fr_err;    /* DS unexp frame error */
 };
 
@@ -208,119 +209,119 @@ struct bfa_fw_io_stats_s {
  */
 
 struct bfa_fw_port_fpg_stats_s {
-    u32    intr_evt;
-    u32    intr;
-    u32    intr_excess;
-    u32    intr_cause0;
-    u32    intr_other;
-    u32    intr_other_ign;
-    u32    sig_lost;
-    u32    sig_regained;
-    u32    sync_lost;
-    u32    sync_to;
-    u32    sync_regained;
-    u32    div2_overflow;
-    u32    div2_underflow;
-    u32    efifo_overflow;
-    u32    efifo_underflow;
-    u32    idle_rx;
-    u32    lrr_rx;
-    u32    lr_rx;
-    u32    ols_rx;
-    u32    nos_rx;
-    u32    lip_rx;
-    u32    arbf0_rx;
-    u32    arb_rx;
-    u32    mrk_rx;
-    u32    const_mrk_rx;
-    u32    prim_unknown;
+       u32    intr_evt;
+       u32    intr;
+       u32    intr_excess;
+       u32    intr_cause0;
+       u32    intr_other;
+       u32    intr_other_ign;
+       u32    sig_lost;
+       u32    sig_regained;
+       u32    sync_lost;
+       u32    sync_to;
+       u32    sync_regained;
+       u32    div2_overflow;
+       u32    div2_underflow;
+       u32    efifo_overflow;
+       u32    efifo_underflow;
+       u32    idle_rx;
+       u32    lrr_rx;
+       u32    lr_rx;
+       u32    ols_rx;
+       u32    nos_rx;
+       u32    lip_rx;
+       u32    arbf0_rx;
+       u32    arb_rx;
+       u32    mrk_rx;
+       u32    const_mrk_rx;
+       u32    prim_unknown;
 };
 
 
 struct bfa_fw_port_lksm_stats_s {
-    u32    hwsm_success;       /*  hwsm state machine success          */
-    u32    hwsm_fails;         /*  hwsm fails                          */
-    u32    hwsm_wdtov;         /*  hwsm timed out                      */
-    u32    swsm_success;       /*  swsm success                        */
-    u32    swsm_fails;         /*  swsm fails                          */
-    u32    swsm_wdtov;         /*  swsm timed out                      */
-    u32    busybufs;           /*  link init failed due to busybuf     */
-    u32    buf_waits;          /*  bufwait state entries               */
-    u32    link_fails;         /*  link failures                       */
-    u32    psp_errors;         /*  primitive sequence protocol errors  */
-    u32    lr_unexp;           /*  No. of times LR rx-ed unexpectedly  */
-    u32    lrr_unexp;          /*  No. of times LRR rx-ed unexpectedly */
-    u32    lr_tx;              /*  No. of times LR tx started          */
-    u32    lrr_tx;             /*  No. of times LRR tx started         */
-    u32    ols_tx;             /*  No. of times OLS tx started         */
-    u32    nos_tx;             /*  No. of times NOS tx started         */
-    u32    hwsm_lrr_rx;        /*  No. of times LRR rx-ed by HWSM      */
-    u32    hwsm_lr_rx;         /*  No. of times LR rx-ed by HWSM      */
-       u32     bbsc_lr;        /* LKSM LR tx for credit recovery       */
+       u32    hwsm_success;       /*  hwsm state machine success          */
+       u32    hwsm_fails;         /*  hwsm fails                          */
+       u32    hwsm_wdtov;         /*  hwsm timed out                      */
+       u32    swsm_success;       /*  swsm success                        */
+       u32    swsm_fails;         /*  swsm fails                          */
+       u32    swsm_wdtov;         /*  swsm timed out                      */
+       u32    busybufs;           /*  link init failed due to busybuf     */
+       u32    buf_waits;          /*  bufwait state entries               */
+       u32    link_fails;         /*  link failures                       */
+       u32    psp_errors;         /*  primitive sequence protocol errors  */
+       u32    lr_unexp;           /*  No. of times LR rx-ed unexpectedly  */
+       u32    lrr_unexp;          /*  No. of times LRR rx-ed unexpectedly */
+       u32    lr_tx;              /*  No. of times LR tx started          */
+       u32    lrr_tx;             /*  No. of times LRR tx started         */
+       u32    ols_tx;             /*  No. of times OLS tx started         */
+       u32    nos_tx;             /*  No. of times NOS tx started         */
+       u32    hwsm_lrr_rx;        /*  No. of times LRR rx-ed by HWSM      */
+       u32    hwsm_lr_rx;         /*  No. of times LR rx-ed by HWSM       */
+       u32    bbsc_lr;            /* LKSM LR tx for credit recovery       */
 };
 
 struct bfa_fw_port_snsm_stats_s {
-    u32    hwsm_success;       /*  Successful hwsm terminations        */
-    u32    hwsm_fails;         /*  hwsm fail count                     */
-    u32    hwsm_wdtov;         /*  hwsm timed out                      */
-    u32    swsm_success;       /*  swsm success                        */
-    u32    swsm_wdtov;         /*  swsm timed out                      */
-    u32    error_resets;       /*  error resets initiated by upsm      */
-    u32    sync_lost;          /*  Sync loss count                     */
-    u32    sig_lost;           /*  Signal loss count                   */
-       u32     asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
+       u32    hwsm_success;       /*  Successful hwsm terminations        */
+       u32    hwsm_fails;         /*  hwsm fail count                     */
+       u32    hwsm_wdtov;         /*  hwsm timed out                      */
+       u32    swsm_success;       /*  swsm success                        */
+       u32    swsm_wdtov;         /*  swsm timed out                      */
+       u32    error_resets;       /*  error resets initiated by upsm      */
+       u32    sync_lost;          /*  Sync loss count                     */
+       u32    sig_lost;           /*  Signal loss count                   */
+       u32    asn8g_attempts;     /* SNSM HWSM at 8Gbps attempts          */
 };
 
 struct bfa_fw_port_physm_stats_s {
-    u32    module_inserts;     /*  Module insert count                 */
-    u32    module_xtracts;     /*  Module extracts count               */
-    u32    module_invalids;    /*  Invalid module inserted count       */
-    u32    module_read_ign;    /*  Module validation status ignored    */
-    u32    laser_faults;       /*  Laser fault count                   */
-    u32    rsvd;
+       u32    module_inserts;     /*  Module insert count                 */
+       u32    module_xtracts;     /*  Module extracts count               */
+       u32    module_invalids;    /*  Invalid module inserted count       */
+       u32    module_read_ign;    /*  Module validation status ignored    */
+       u32    laser_faults;       /*  Laser fault count                   */
+       u32    rsvd;
 };
 
 struct bfa_fw_fip_stats_s {
-    u32    vlan_req;           /*  vlan discovery requests             */
-    u32    vlan_notify;        /*  vlan notifications                  */
-    u32    vlan_err;           /*  vlan response error                 */
-    u32    vlan_timeouts;      /*  vlan disvoery timeouts              */
-    u32    vlan_invalids;      /*  invalid vlan in discovery advert.   */
-    u32    disc_req;           /*  Discovery solicit requests          */
-    u32    disc_rsp;           /*  Discovery solicit response          */
-    u32    disc_err;           /*  Discovery advt. parse errors        */
-    u32    disc_unsol;         /*  Discovery unsolicited               */
-    u32    disc_timeouts;      /*  Discovery timeouts                  */
-    u32    disc_fcf_unavail;   /*  Discovery FCF Not Avail.            */
-    u32    linksvc_unsupp;     /*  Unsupported link service req        */
-    u32    linksvc_err;        /*  Parse error in link service req     */
-    u32    logo_req;           /*  FIP logos received                  */
-    u32    clrvlink_req;       /*  Clear virtual link req              */
-    u32    op_unsupp;          /*  Unsupported FIP operation           */
-    u32    untagged;           /*  Untagged frames (ignored)           */
-    u32    invalid_version;    /*  Invalid FIP version                 */
+       u32    vlan_req;           /*  vlan discovery requests             */
+       u32    vlan_notify;        /*  vlan notifications                  */
+       u32    vlan_err;           /*  vlan response error                 */
+       u32    vlan_timeouts;      /*  vlan disvoery timeouts              */
+       u32    vlan_invalids;      /*  invalid vlan in discovery advert.   */
+       u32    disc_req;           /*  Discovery solicit requests          */
+       u32    disc_rsp;           /*  Discovery solicit response          */
+       u32    disc_err;           /*  Discovery advt. parse errors        */
+       u32    disc_unsol;         /*  Discovery unsolicited               */
+       u32    disc_timeouts;      /*  Discovery timeouts                  */
+       u32    disc_fcf_unavail;   /*  Discovery FCF Not Avail.            */
+       u32    linksvc_unsupp;     /*  Unsupported link service req        */
+       u32    linksvc_err;        /*  Parse error in link service req     */
+       u32    logo_req;           /*  FIP logos received                  */
+       u32    clrvlink_req;       /*  Clear virtual link req              */
+       u32    op_unsupp;          /*  Unsupported FIP operation           */
+       u32    untagged;           /*  Untagged frames (ignored)           */
+       u32    invalid_version;    /*  Invalid FIP version                 */
 };
 
 struct bfa_fw_lps_stats_s {
-    u32    mac_invalids;       /*  Invalid mac assigned                */
-    u32    rsvd;
+       u32    mac_invalids;       /*  Invalid mac assigned                */
+       u32    rsvd;
 };
 
 struct bfa_fw_fcoe_stats_s {
-    u32    cee_linkups;        /*  CEE link up count                   */
-    u32    cee_linkdns;        /*  CEE link down count                 */
-    u32    fip_linkups;        /*  FIP link up count                   */
-    u32    fip_linkdns;        /*  FIP link up count                   */
-    u32    fip_fails;          /*  FIP fail count                      */
-    u32    mac_invalids;       /*  Invalid mac assigned                */
+       u32    cee_linkups;        /*  CEE link up count                   */
+       u32    cee_linkdns;        /*  CEE link down count                 */
+       u32    fip_linkups;        /*  FIP link up count                   */
+       u32    fip_linkdns;        /*  FIP link up count                   */
+       u32    fip_fails;          /*  FIP fail count                      */
+       u32    mac_invalids;       /*  Invalid mac assigned                */
 };
 
 /*
  * IOC firmware FCoE port stats
  */
 struct bfa_fw_fcoe_port_stats_s {
-       struct bfa_fw_fcoe_stats_s  fcoe_stats;
-       struct bfa_fw_fip_stats_s   fip_stats;
+       struct bfa_fw_fcoe_stats_s              fcoe_stats;
+       struct bfa_fw_fip_stats_s               fip_stats;
 };
 
 /*
@@ -335,8 +336,8 @@ struct bfa_fw_fc_uport_stats_s {
  * IOC firmware FC port stats
  */
 union bfa_fw_fc_port_stats_s {
-       struct bfa_fw_fc_uport_stats_s  fc_stats;
-       struct bfa_fw_fcoe_port_stats_s fcoe_stats;
+       struct bfa_fw_fc_uport_stats_s          fc_stats;
+       struct bfa_fw_fcoe_port_stats_s         fcoe_stats;
 };
 
 /*
@@ -366,25 +367,25 @@ struct bfa_fw_lpsm_stats_s {
  */
 struct bfa_fw_trunk_stats_s {
        u32 emt_recvd;          /*  Trunk EMT received          */
-       u32 emt_accepted;               /*  Trunk EMT Accepted          */
-       u32 emt_rejected;               /*  Trunk EMT rejected          */
+       u32 emt_accepted;       /*  Trunk EMT Accepted          */
+       u32 emt_rejected;       /*  Trunk EMT rejected          */
        u32 etp_recvd;          /*  Trunk ETP received          */
-       u32 etp_accepted;               /*  Trunk ETP Accepted          */
-       u32 etp_rejected;               /*  Trunk ETP rejected          */
+       u32 etp_accepted;       /*  Trunk ETP Accepted          */
+       u32 etp_rejected;       /*  Trunk ETP rejected          */
        u32 lr_recvd;           /*  Trunk LR received           */
-       u32 rsvd;                       /*  padding for 64 bit alignment */
+       u32 rsvd;               /*  padding for 64 bit alignment */
 };
 
 struct bfa_fw_advsm_stats_s {
        u32 flogi_sent;         /*  Flogi sent                  */
        u32 flogi_acc_recvd;    /*  Flogi Acc received          */
        u32 flogi_rjt_recvd;    /*  Flogi rejects received      */
-       u32 flogi_retries;              /*  Flogi retries               */
+       u32 flogi_retries;      /*  Flogi retries               */
 
        u32 elp_recvd;          /*  ELP received                */
-       u32 elp_accepted;               /*  ELP Accepted                */
-       u32 elp_rejected;               /*  ELP rejected                */
-       u32 elp_dropped;                /*  ELP dropped         */
+       u32 elp_accepted;       /*  ELP Accepted                */
+       u32 elp_rejected;       /*  ELP rejected                */
+       u32 elp_dropped;        /*  ELP dropped                 */
 };
 
 /*
@@ -521,7 +522,7 @@ struct bfa_qos_vc_attr_s {
        u16  total_vc_count;                    /*  Total VC Count */
        u16  shared_credit;
        u32  elp_opmode_flags;
-       struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC];  /*   as many as
+       struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC];  /* as many as
                                                            * total_vc_count */
 };
 
@@ -531,16 +532,16 @@ struct bfa_qos_vc_attr_s {
 struct bfa_qos_stats_s {
        u32     flogi_sent;             /*  QoS Flogi sent */
        u32     flogi_acc_recvd;        /*  QoS Flogi Acc received */
-       u32     flogi_rjt_recvd; /*  QoS Flogi rejects received */
+       u32     flogi_rjt_recvd;        /*  QoS Flogi rejects received */
        u32     flogi_retries;          /*  QoS Flogi retries */
 
        u32     elp_recvd;              /*  QoS ELP received */
        u32     elp_accepted;           /*  QoS ELP Accepted */
-       u32     elp_rejected;       /*  QoS ELP rejected */
-       u32     elp_dropped;        /*  QoS ELP dropped  */
+       u32     elp_rejected;           /*  QoS ELP rejected */
+       u32     elp_dropped;            /*  QoS ELP dropped  */
 
-       u32     qos_rscn_recvd;     /*  QoS RSCN received */
-       u32     rsvd;               /* padding for 64 bit alignment */
+       u32     qos_rscn_recvd;         /*  QoS RSCN received */
+       u32     rsvd;                   /* padding for 64 bit alignment */
 };
 
 /*
@@ -548,9 +549,9 @@ struct bfa_qos_stats_s {
  */
 struct bfa_fcoe_stats_s {
        u64     secs_reset;     /*  Seconds since stats reset        */
-       u64     cee_linkups;    /*  CEE link up              */
+       u64     cee_linkups;    /*  CEE link up                      */
        u64     cee_linkdns;    /*  CEE link down                    */
-       u64     fip_linkups;    /*  FIP link up              */
+       u64     fip_linkups;    /*  FIP link up                      */
        u64     fip_linkdns;    /*  FIP link down                    */
        u64     fip_fails;      /*  FIP failures                     */
        u64     mac_invalids;   /*  Invalid mac assignments          */
@@ -560,38 +561,38 @@ struct bfa_fcoe_stats_s {
        u64     vlan_timeouts;  /*  Vlan request timeouts            */
        u64     vlan_invalids;  /*  Vlan invalids                    */
        u64     disc_req;       /*  Discovery requests               */
-       u64     disc_rsp;       /*  Discovery responses      */
+       u64     disc_rsp;       /*  Discovery responses              */
        u64     disc_err;       /*  Discovery error frames           */
        u64     disc_unsol;     /*  Discovery unsolicited            */
        u64     disc_timeouts;  /*  Discovery timeouts               */
        u64     disc_fcf_unavail; /*  Discovery FCF not avail        */
-       u64     linksvc_unsupp; /*  FIP link service req unsupp.    */
-       u64     linksvc_err;    /*  FIP link service req errors     */
+       u64     linksvc_unsupp; /*  FIP link service req unsupp      */
+       u64     linksvc_err;    /*  FIP link service req errors      */
        u64     logo_req;       /*  FIP logos received               */
-       u64     clrvlink_req;   /*  Clear virtual link requests     */
+       u64     clrvlink_req;   /*  Clear virtual link requests      */
        u64     op_unsupp;      /*  FIP operation unsupp.            */
-       u64     untagged;       /*  FIP untagged frames      */
+       u64     untagged;       /*  FIP untagged frames              */
        u64     txf_ucast;      /*  Tx FCoE unicast frames           */
-       u64     txf_ucast_vlan; /*  Tx FCoE unicast vlan frames     */
+       u64     txf_ucast_vlan; /*  Tx FCoE unicast vlan frames      */
        u64     txf_ucast_octets; /*  Tx FCoE unicast octets         */
        u64     txf_mcast;      /*  Tx FCoE multicast frames         */
-       u64     txf_mcast_vlan; /*  Tx FCoE multicast vlan frames   */
+       u64     txf_mcast_vlan; /*  Tx FCoE multicast vlan frames    */
        u64     txf_mcast_octets; /*  Tx FCoE multicast octets       */
        u64     txf_bcast;      /*  Tx FCoE broadcast frames         */
-       u64     txf_bcast_vlan; /*  Tx FCoE broadcast vlan frames   */
+       u64     txf_bcast_vlan; /*  Tx FCoE broadcast vlan frames    */
        u64     txf_bcast_octets; /*  Tx FCoE broadcast octets       */
-       u64     txf_timeout;    /*  Tx timeouts              */
+       u64     txf_timeout;      /*  Tx timeouts                    */
        u64     txf_parity_errors; /*  Transmit parity err           */
-       u64     txf_fid_parity_errors; /*  Transmit FID parity err  */
+       u64     txf_fid_parity_errors; /*  Transmit FID parity err   */
        u64     rxf_ucast_octets; /*  Rx FCoE unicast octets         */
        u64     rxf_ucast;      /*  Rx FCoE unicast frames           */
-       u64     rxf_ucast_vlan; /*  Rx FCoE unicast vlan frames     */
+       u64     rxf_ucast_vlan; /*  Rx FCoE unicast vlan frames      */
        u64     rxf_mcast_octets; /*  Rx FCoE multicast octets       */
        u64     rxf_mcast;      /*  Rx FCoE multicast frames         */
-       u64     rxf_mcast_vlan; /*  Rx FCoE multicast vlan frames   */
+       u64     rxf_mcast_vlan; /*  Rx FCoE multicast vlan frames    */
        u64     rxf_bcast_octets; /*  Rx FCoE broadcast octets       */
        u64     rxf_bcast;      /*  Rx FCoE broadcast frames         */
-       u64     rxf_bcast_vlan; /*  Rx FCoE broadcast vlan frames   */
+       u64     rxf_bcast_vlan; /*  Rx FCoE broadcast vlan frames    */
 };
 
 /*
@@ -852,12 +853,12 @@ struct bfa_port_cfg_s {
        u8       tx_bbcredit;   /*  transmit buffer credits     */
        u8       ratelimit;     /*  ratelimit enabled or not    */
        u8       trl_def_speed; /*  ratelimit default speed     */
-       u8      bb_scn;         /*  BB_SCN value from FLOGI Exchg */
-       u8      bb_scn_state;   /*  Config state of BB_SCN */
-       u8      faa_state;      /*  FAA enabled/disabled        */
-       u8      rsvd[1];
-       u16 path_tov;   /*  device path timeout */
-       u16 q_depth;    /*  SCSI Queue depth            */
+       u8       bb_scn;        /*  BB_SCN value from FLOGI Exchg */
+       u8       bb_scn_state;  /*  Config state of BB_SCN */
+       u8       faa_state;     /*  FAA enabled/disabled        */
+       u8       rsvd[1];
+       u16      path_tov;      /*  device path timeout */
+       u16      q_depth;       /*  SCSI Queue depth            */
 };
 #pragma pack()
 
@@ -868,20 +869,21 @@ struct bfa_port_attr_s {
        /*
         * Static fields
         */
-       wwn_t      nwwn;                /*  node wwn */
-       wwn_t      pwwn;                /*  port wwn */
-       wwn_t      factorynwwn; /*  factory node wwn */
-       wwn_t      factorypwwn; /*  factory port wwn */
-       enum fc_cos     cos_supported;  /*  supported class of services */
-       u32     rsvd;
+       wwn_t                   nwwn;           /*  node wwn */
+       wwn_t                   pwwn;           /*  port wwn */
+       wwn_t                   factorynwwn;    /*  factory node wwn */
+       wwn_t                   factorypwwn;    /*  factory port wwn */
+       enum fc_cos             cos_supported;  /*  supported class of
+                                                *  services */
+       u32                     rsvd;
        struct fc_symname_s     port_symname;   /*  port symbolic name */
-       enum bfa_port_speed speed_supported; /*  supported speeds */
-       bfa_boolean_t   pbind_enabled;
+       enum bfa_port_speed     speed_supported; /* supported speeds */
+       bfa_boolean_t           pbind_enabled;
 
        /*
         * Configured values
         */
-       struct bfa_port_cfg_s pport_cfg;        /*  pport cfg */
+       struct bfa_port_cfg_s   pport_cfg;      /*  pport cfg */
 
        /*
         * Dynamic field - info from BFA
@@ -890,19 +892,20 @@ struct bfa_port_attr_s {
        enum bfa_port_speed     speed;          /*  current speed */
        enum bfa_port_topology  topology;       /*  current topology */
        bfa_boolean_t           beacon;         /*  current beacon status */
-       bfa_boolean_t           link_e2e_beacon; /*  link beacon is on */
-       bfa_boolean_t   bbsc_op_status; /* fc credit recovery oper state */
+       bfa_boolean_t           link_e2e_beacon; /* link beacon is on */
+       bfa_boolean_t           bbsc_op_status; /* fc credit recovery oper
+                                                * state */
 
        /*
         * Dynamic field - info from FCS
         */
-       u32             pid;            /*  port ID */
+       u32                     pid;            /*  port ID */
        enum bfa_port_type      port_type;      /*  current topology */
-       u32             loopback;       /*  external loopback */
-       u32             authfail;       /*  auth fail state */
+       u32                     loopback;       /*  external loopback */
+       u32                     authfail;       /*  auth fail state */
 
        /* FCoE specific  */
-       u16             fcoe_vlan;
+       u16                     fcoe_vlan;
        u8                      rsvd1[2];
 };
 
@@ -910,48 +913,48 @@ struct bfa_port_attr_s {
  *           Port FCP mappings.
  */
 struct bfa_port_fcpmap_s {
-       char            osdevname[256];
+       char    osdevname[256];
        u32     bus;
        u32     target;
        u32     oslun;
        u32     fcid;
-       wwn_t      nwwn;
-       wwn_t      pwwn;
+       wwn_t   nwwn;
+       wwn_t   pwwn;
        u64     fcplun;
-       char            luid[256];
+       char    luid[256];
 };
 
 /*
  *           Port RNID info.
  */
 struct bfa_port_rnid_s {
-       wwn_t        wwn;
+       wwn_t     wwn;
        u32       unittype;
        u32       portid;
        u32       attached_nodes_num;
        u16       ip_version;
        u16       udp_port;
-       u8         ipaddr[16];
+       u8        ipaddr[16];
        u16       rsvd;
        u16       topologydiscoveryflags;
 };
 
 #pragma pack(1)
 struct bfa_fcport_fcf_s {
-       wwn_t      name;           /*  FCF name          */
-       wwn_t      fabric_name;    /*  Fabric Name            */
-       u8              fipenabled;     /*  FIP enabled or not */
-       u8              fipfailed;      /*  FIP failed or not   */
-       u8              resv[2];
-       u8       pri;       /*  FCF priority         */
-       u8       version;       /*  FIP version used     */
-       u8       available;      /*  Available  for  login    */
-       u8       fka_disabled;   /*  FKA is disabled      */
-       u8       maxsz_verified; /*  FCoE max size verified   */
-       u8       fc_map[3];      /*  FC map                */
-       __be16   vlan;     /*  FCoE vlan tag/priority   */
-       u32     fka_adv_per;    /*  FIP  ka advert. period   */
-       mac_t      mac;     /*  FCF mac           */
+       wwn_t   name;           /*  FCF name               */
+       wwn_t   fabric_name;    /*  Fabric Name            */
+       u8      fipenabled;     /*  FIP enabled or not     */
+       u8      fipfailed;      /*  FIP failed or not      */
+       u8      resv[2];
+       u8      pri;            /*  FCF priority           */
+       u8      version;        /*  FIP version used       */
+       u8      available;      /*  Available for login    */
+       u8      fka_disabled;   /*  FKA is disabled        */
+       u8      maxsz_verified; /*  FCoE max size verified */
+       u8      fc_map[3];      /*  FC map                 */
+       __be16  vlan;           /*  FCoE vlan tag/priority */
+       u32     fka_adv_per;    /*  FIP  ka advert. period */
+       mac_t   mac;            /*  FCF mac                */
 };
 
 /*
@@ -981,7 +984,7 @@ struct bfa_port_link_s {
        u8       linkstate_rsn; /*  bfa_port_linkstate_rsn_t */
        u8       topology;      /*  P2P/LOOP bfa_port_topology */
        u8       speed;         /*  Link speed (1/2/4/8 G) */
-       u32     linkstate_opt;  /*  Linkstate optional data (debug) */
+       u32      linkstate_opt; /*  Linkstate optional data (debug) */
        u8       trunked;       /*  Trunked or not (1 or 0) */
        u8       resvd[3];
        struct bfa_qos_attr_s  qos_attr;   /* QoS Attributes */
@@ -1035,7 +1038,7 @@ struct bfa_rport_hal_stats_s {
        u32        sm_fwc_del;      /*  fw create: delete events   */
        u32        sm_fwc_off;      /*  fw create: offline events  */
        u32        sm_fwc_hwf;      /*  fw create: IOC down        */
-       u32        sm_fwc_unexp;            /*  fw create: exception events*/
+       u32        sm_fwc_unexp;    /*  fw create: exception events*/
        u32        sm_on_off;       /*  online: offline events     */
        u32        sm_on_del;       /*  online: delete events      */
        u32        sm_on_hwf;       /*  online: IOC down events    */
@@ -1043,25 +1046,25 @@ struct bfa_rport_hal_stats_s {
        u32        sm_fwd_rsp;      /*  fw delete: fw responses    */
        u32        sm_fwd_del;      /*  fw delete: delete events   */
        u32        sm_fwd_hwf;      /*  fw delete: IOC down events */
-       u32        sm_fwd_unexp;            /*  fw delete: exception events*/
+       u32        sm_fwd_unexp;    /*  fw delete: exception events*/
        u32        sm_off_del;      /*  offline: delete events     */
        u32        sm_off_on;       /*  offline: online events     */
        u32        sm_off_hwf;      /*  offline: IOC down events   */
-       u32        sm_off_unexp;            /*  offline: exception events  */
-       u32        sm_del_fwrsp;            /*  delete: fw responses       */
+       u32        sm_off_unexp;    /*  offline: exception events  */
+       u32        sm_del_fwrsp;    /*  delete: fw responses       */
        u32        sm_del_hwf;      /*  delete: IOC down events    */
-       u32        sm_del_unexp;            /*  delete: exception events   */
-       u32        sm_delp_fwrsp;           /*  delete pend: fw responses  */
+       u32        sm_del_unexp;    /*  delete: exception events   */
+       u32        sm_delp_fwrsp;   /*  delete pend: fw responses  */
        u32        sm_delp_hwf;     /*  delete pend: IOC downs     */
-       u32        sm_delp_unexp;           /*  delete pend: exceptions    */
-       u32        sm_offp_fwrsp;           /*  off-pending: fw responses  */
+       u32        sm_delp_unexp;   /*  delete pend: exceptions    */
+       u32        sm_offp_fwrsp;   /*  off-pending: fw responses  */
        u32        sm_offp_del;     /*  off-pending: deletes       */
        u32        sm_offp_hwf;     /*  off-pending: IOC downs     */
-       u32        sm_offp_unexp;           /*  off-pending: exceptions    */
+       u32        sm_offp_unexp;   /*  off-pending: exceptions    */
        u32        sm_iocd_off;     /*  IOC down: offline events   */
        u32        sm_iocd_del;     /*  IOC down: delete events    */
        u32        sm_iocd_on;      /*  IOC down: online events    */
-       u32        sm_iocd_unexp;           /*  IOC down: exceptions       */
+       u32        sm_iocd_unexp;   /*  IOC down: exceptions       */
        u32        rsvd;
 };
 #pragma pack(1)
@@ -1069,9 +1072,9 @@ struct bfa_rport_hal_stats_s {
  *  Rport's QoS attributes
  */
 struct bfa_rport_qos_attr_s {
-       u8                      qos_priority;  /*  rport's QoS priority   */
-       u8                      rsvd[3];
-       u32            qos_flow_id;       /*  QoS flow Id        */
+       u8              qos_priority;   /*  rport's QoS priority   */
+       u8              rsvd[3];
+       u32             qos_flow_id;    /*  QoS flow Id  */
 };
 #pragma pack()
 
index 1ac5aecf25a68a14c2c98011cf1af357b60ead26..eca7ab78085bef2664d63d173e4a5510cdb61cfd 100644 (file)
@@ -3727,11 +3727,11 @@ bfa_sfp_media_get(struct bfa_sfp_s *sfp)
                         (xmtr_tech & SFP_XMTR_TECH_SA))
                        *media = BFA_SFP_MEDIA_SW;
                /* Check 10G Ethernet Compilance code */
-               else if (e10g.b & 0x10)
+               else if (e10g.r.e10g_sr)
                        *media = BFA_SFP_MEDIA_SW;
-               else if (e10g.b & 0x60)
+               else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
                        *media = BFA_SFP_MEDIA_LW;
-               else if (e10g.r.e10g_unall & 0x80)
+               else if (e10g.r.e10g_unall)
                        *media = BFA_SFP_MEDIA_UNKNOWN;
                else
                        bfa_trc(sfp, 0);
index caca9b7c83093206492543fd0011705a3674e89f..439c012be763646c48be2910d67a3acce50c3d48 100644 (file)
@@ -557,8 +557,7 @@ bfad_debugfs_exit(struct bfad_port_s *port)
                }
        }
 
-       /*
-        * Remove the pci_dev debugfs directory for the port */
+       /* Remove the pci_dev debugfs directory for the port */
        if (port->port_debugfs_root) {
                debugfs_remove(port->port_debugfs_root);
                port->port_debugfs_root = NULL;
index 23149b9e297c29ae82dff0330498f09423a0e557..48e46f5b77cca0dd26311d9b4f1cccca18226a69 100644 (file)
@@ -28,7 +28,6 @@
 
 static DEFINE_SPINLOCK(list_lock);
 static LIST_HEAD(scsi_dh_list);
-static int scsi_dh_list_idx = 1;
 
 static struct scsi_device_handler *get_device_handler(const char *name)
 {
@@ -45,21 +44,6 @@ static struct scsi_device_handler *get_device_handler(const char *name)
        return found;
 }
 
-static struct scsi_device_handler *get_device_handler_by_idx(int idx)
-{
-       struct scsi_device_handler *tmp, *found = NULL;
-
-       spin_lock(&list_lock);
-       list_for_each_entry(tmp, &scsi_dh_list, list) {
-               if (tmp->idx == idx) {
-                       found = tmp;
-                       break;
-               }
-       }
-       spin_unlock(&list_lock);
-       return found;
-}
-
 /*
  * device_handler_match_function - Match a device handler to a device
  * @sdev - SCSI device to be tested
@@ -83,23 +67,6 @@ device_handler_match_function(struct scsi_device *sdev)
        return found_dh;
 }
 
-/*
- * device_handler_match_devlist - Match a device handler to a device
- * @sdev - SCSI device to be tested
- *
- * Tests @sdev against all device_handler registered in the devlist.
- * Returns the found device handler or NULL if not found.
- */
-static struct scsi_device_handler *
-device_handler_match_devlist(struct scsi_device *sdev)
-{
-       int idx;
-
-       idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model,
-                                         SCSI_DEVINFO_DH);
-       return get_device_handler_by_idx(idx);
-}
-
 /*
  * device_handler_match - Attach a device handler to a device
  * @scsi_dh - The device handler to match against or NULL
@@ -116,8 +83,6 @@ device_handler_match(struct scsi_device_handler *scsi_dh,
        struct scsi_device_handler *found_dh;
 
        found_dh = device_handler_match_function(sdev);
-       if (!found_dh)
-               found_dh = device_handler_match_devlist(sdev);
 
        if (scsi_dh && found_dh != scsi_dh)
                found_dh = NULL;
@@ -361,25 +326,14 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data)
  */
 int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
 {
-       int i;
 
        if (get_device_handler(scsi_dh->name))
                return -EBUSY;
 
        spin_lock(&list_lock);
-       scsi_dh->idx = scsi_dh_list_idx++;
        list_add(&scsi_dh->list, &scsi_dh_list);
        spin_unlock(&list_lock);
 
-       for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) {
-               scsi_dev_info_list_add_keyed(0,
-                                       scsi_dh->devlist[i].vendor,
-                                       scsi_dh->devlist[i].model,
-                                       NULL,
-                                       scsi_dh->idx,
-                                       SCSI_DEVINFO_DH);
-       }
-
        bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
        printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
 
@@ -396,7 +350,6 @@ EXPORT_SYMBOL_GPL(scsi_register_device_handler);
  */
 int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
 {
-       int i;
 
        if (!get_device_handler(scsi_dh->name))
                return -ENODEV;
@@ -404,12 +357,6 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
        bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
                         scsi_dh_notifier_remove);
 
-       for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) {
-               scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor,
-                                            scsi_dh->devlist[i].model,
-                                            SCSI_DEVINFO_DH);
-       }
-
        spin_lock(&list_lock);
        list_del(&scsi_dh->list);
        spin_unlock(&list_lock);
@@ -588,10 +535,6 @@ static int __init scsi_dh_init(void)
 {
        int r;
 
-       r = scsi_dev_info_add_list(SCSI_DEVINFO_DH, "SCSI Device Handler");
-       if (r)
-               return r;
-
        r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
 
        if (!r)
@@ -606,7 +549,6 @@ static void __exit scsi_dh_exit(void)
        bus_for_each_dev(&scsi_bus_type, NULL, NULL,
                         scsi_dh_sysfs_attr_remove);
        bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
-       scsi_dev_info_remove_list(SCSI_DEVINFO_DH);
 }
 
 module_init(scsi_dh_init);
index 591186cf1896abbd0031e4a2bd45682e2b6d5191..e1c8be06de9de2f50dd608f0ec27f7bf81617adb 100644 (file)
@@ -629,6 +629,24 @@ static const struct scsi_dh_devlist clariion_dev_list[] = {
        {NULL, NULL},
 };
 
+static bool clariion_match(struct scsi_device *sdev)
+{
+       int i;
+
+       if (scsi_device_tpgs(sdev))
+               return false;
+
+       for (i = 0; clariion_dev_list[i].vendor; i++) {
+               if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
+                       strlen(clariion_dev_list[i].vendor)) &&
+                   !strncmp(sdev->model, clariion_dev_list[i].model,
+                       strlen(clariion_dev_list[i].model))) {
+                       return true;
+               }
+       }
+       return false;
+}
+
 static int clariion_bus_attach(struct scsi_device *sdev);
 static void clariion_bus_detach(struct scsi_device *sdev);
 
@@ -642,6 +660,7 @@ static struct scsi_device_handler clariion_dh = {
        .activate       = clariion_activate,
        .prep_fn        = clariion_prep_fn,
        .set_params     = clariion_set_params,
+       .match          = clariion_match,
 };
 
 static int clariion_bus_attach(struct scsi_device *sdev)
index 0f86a18b157de47dc7dd8028ee727aa7d56a8b18..084062bb8ee9de341ded2d0bb2d1b4b2ea897ecb 100644 (file)
@@ -320,6 +320,24 @@ static const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
        {NULL, NULL},
 };
 
+static bool hp_sw_match(struct scsi_device *sdev)
+{
+       int i;
+
+       if (scsi_device_tpgs(sdev))
+               return false;
+
+       for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
+               if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
+                       strlen(hp_sw_dh_data_list[i].vendor)) &&
+                   !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
+                       strlen(hp_sw_dh_data_list[i].model))) {
+                       return true;
+               }
+       }
+       return false;
+}
+
 static int hp_sw_bus_attach(struct scsi_device *sdev);
 static void hp_sw_bus_detach(struct scsi_device *sdev);
 
@@ -331,6 +349,7 @@ static struct scsi_device_handler hp_sw_dh = {
        .detach         = hp_sw_bus_detach,
        .activate       = hp_sw_activate,
        .prep_fn        = hp_sw_prep_fn,
+       .match          = hp_sw_match,
 };
 
 static int hp_sw_bus_attach(struct scsi_device *sdev)
index 1d31279200639074b87aa075475d1e84f77bf2eb..841ebf4a6788fc3895d5ddd31b8a67f7ce49ab55 100644 (file)
@@ -820,6 +820,24 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
        {NULL, NULL},
 };
 
+static bool rdac_match(struct scsi_device *sdev)
+{
+       int i;
+
+       if (scsi_device_tpgs(sdev))
+               return false;
+
+       for (i = 0; rdac_dev_list[i].vendor; i++) {
+               if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
+                       strlen(rdac_dev_list[i].vendor)) &&
+                   !strncmp(sdev->model, rdac_dev_list[i].model,
+                       strlen(rdac_dev_list[i].model))) {
+                       return true;
+               }
+       }
+       return false;
+}
+
 static int rdac_bus_attach(struct scsi_device *sdev);
 static void rdac_bus_detach(struct scsi_device *sdev);
 
@@ -832,6 +850,7 @@ static struct scsi_device_handler rdac_dh = {
        .attach = rdac_bus_attach,
        .detach = rdac_bus_detach,
        .activate = rdac_activate,
+       .match = rdac_match,
 };
 
 static int rdac_bus_attach(struct scsi_device *sdev)
index 865d452542be923ef7c02bc0f4eea816dfc898b3..5140f5d0fd6be610f5038368fdec6627be4e9cea 100644 (file)
@@ -293,12 +293,14 @@ static u32 unresettable_controller[] = {
        0x3215103C, /* Smart Array E200i */
        0x3237103C, /* Smart Array E500 */
        0x323D103C, /* Smart Array P700m */
+       0x40800E11, /* Smart Array 5i */
        0x409C0E11, /* Smart Array 6400 */
        0x409D0E11, /* Smart Array 6400 EM */
 };
 
 /* List of controllers which cannot even be soft reset */
 static u32 soft_unresettable_controller[] = {
+       0x40800E11, /* Smart Array 5i */
        /* Exclude 640x boards.  These are two pci devices in one slot
         * which share a battery backed cache module.  One controls the
         * cache, the other accesses the cache through the one that controls
@@ -4072,10 +4074,10 @@ static int hpsa_request_irq(struct ctlr_info *h,
 
        if (h->msix_vector || h->msi_vector)
                rc = request_irq(h->intr[h->intr_mode], msixhandler,
-                               IRQF_DISABLED, h->devname, h);
+                               0, h->devname, h);
        else
                rc = request_irq(h->intr[h->intr_mode], intxhandler,
-                               IRQF_DISABLED, h->devname, h);
+                               IRQF_SHARED, h->devname, h);
        if (rc) {
                dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
                       h->intr[h->intr_mode], h->devname);
index bb4c8e0584e23b343392a2c9a692273daadf8d45..825f9307417a8dcdb4240763476ad41107c0d02b 100644 (file)
@@ -247,18 +247,6 @@ struct lpfc_stats {
        uint32_t fcpLocalErr;
 };
 
-enum sysfs_mbox_state {
-       SMBOX_IDLE,
-       SMBOX_WRITING,
-       SMBOX_READING
-};
-
-struct lpfc_sysfs_mbox {
-       enum sysfs_mbox_state state;
-       size_t                offset;
-       struct lpfcMboxq *    mbox;
-};
-
 struct lpfc_hba;
 
 
@@ -783,8 +771,6 @@ struct lpfc_hba {
        uint64_t bg_apptag_err_cnt;
        uint64_t bg_reftag_err_cnt;
 
-       struct lpfc_sysfs_mbox sysfs_mbox;
-
        /* fastpath list. */
        spinlock_t scsi_buf_list_lock;
        struct list_head lpfc_scsi_buf_list;
index d0ebaeb7ef60e96857c18999a6048a39247126e2..f6697cb0e21607fe154753859582c719693225c7 100644 (file)
@@ -351,10 +351,23 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
        struct Scsi_Host  *shost = class_to_shost(dev);
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
+       uint32_t if_type;
+       uint8_t sli_family;
        char fwrev[32];
+       int len;
 
        lpfc_decode_firmware_rev(phba, fwrev, 1);
-       return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev);
+       if_type = phba->sli4_hba.pc_sli4_params.if_type;
+       sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
+
+       if (phba->sli_rev < LPFC_SLI_REV4)
+               len = snprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
+                              fwrev, phba->sli_rev);
+       else
+               len = snprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
+                              fwrev, phba->sli_rev, if_type, sli_family);
+
+       return len;
 }
 
 /**
@@ -487,6 +500,34 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
        return len;
 }
 
+/**
+ * lpfc_sli4_protocol_show - Return the fip mode of the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+       struct lpfc_hba *phba = vport->phba;
+
+       if (phba->sli_rev < LPFC_SLI_REV4)
+               return snprintf(buf, PAGE_SIZE, "fc\n");
+
+       if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
+               if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
+                       return snprintf(buf, PAGE_SIZE, "fcoe\n");
+               if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
+                       return snprintf(buf, PAGE_SIZE, "fc\n");
+       }
+       return snprintf(buf, PAGE_SIZE, "unknown\n");
+}
+
 /**
  * lpfc_link_state_store - Transition the link_state on an HBA port
  * @dev: class device that is converted into a Scsi_host.
@@ -773,7 +814,12 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
  * the readyness after performing a firmware reset.
  *
  * Returns:
- * zero for success
+ * zero for success, -EPERM when port does not have privilage to perform the
+ * reset, -EIO when port timeout from recovering from the reset.
+ *
+ * Note:
+ * As the caller will interpret the return code by value, be careful in making
+ * change or addition to return codes.
  **/
 int
 lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
@@ -826,9 +872,11 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
 {
        struct completion online_compl;
        struct pci_dev *pdev = phba->pcidev;
+       uint32_t before_fc_flag;
+       uint32_t sriov_nr_virtfn;
        uint32_t reg_val;
-       int status = 0;
-       int rc;
+       int status = 0, rc = 0;
+       int job_posted = 1, sriov_err;
 
        if (!phba->cfg_enable_hba_reset)
                return -EACCES;
@@ -838,6 +886,10 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
             LPFC_SLI_INTF_IF_TYPE_2))
                return -EPERM;
 
+       /* Keep state if we need to restore back */
+       before_fc_flag = phba->pport->fc_flag;
+       sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
+
        /* Disable SR-IOV virtual functions if enabled */
        if (phba->cfg_sriov_nr_virtfn) {
                pci_disable_sriov(pdev);
@@ -869,21 +921,44 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
        /* delay driver action following IF_TYPE_2 reset */
        rc = lpfc_sli4_pdev_status_reg_wait(phba);
 
-       if (rc)
+       if (rc == -EPERM) {
+               /* no privilage for reset, restore if needed */
+               if (before_fc_flag & FC_OFFLINE_MODE)
+                       goto out;
+       } else if (rc == -EIO) {
+               /* reset failed, there is nothing more we can do */
                return rc;
+       }
+
+       /* keep the original port state */
+       if (before_fc_flag & FC_OFFLINE_MODE)
+               goto out;
 
        init_completion(&online_compl);
-       rc = lpfc_workq_post_event(phba, &status, &online_compl,
-                                  LPFC_EVT_ONLINE);
-       if (rc == 0)
-               return -ENOMEM;
+       job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
+                                          LPFC_EVT_ONLINE);
+       if (!job_posted)
+               goto out;
 
        wait_for_completion(&online_compl);
 
-       if (status != 0)
-               return -EIO;
+out:
+       /* in any case, restore the virtual functions enabled as before */
+       if (sriov_nr_virtfn) {
+               sriov_err =
+                       lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
+               if (!sriov_err)
+                       phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
+       }
 
-       return 0;
+       /* return proper error code */
+       if (!rc) {
+               if (!job_posted)
+                       rc = -ENOMEM;
+               else if (status)
+                       rc = -EIO;
+       }
+       return rc;
 }
 
 /**
@@ -955,33 +1030,38 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
        struct completion online_compl;
-       int status=0;
+       char *board_mode_str = NULL;
+       int status = 0;
        int rc;
 
-       if (!phba->cfg_enable_hba_reset)
-               return -EACCES;
+       if (!phba->cfg_enable_hba_reset) {
+               status = -EACCES;
+               goto board_mode_out;
+       }
 
        lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-               "3050 lpfc_board_mode set to %s\n", buf);
+                        "3050 lpfc_board_mode set to %s\n", buf);
 
        init_completion(&online_compl);
 
        if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
                rc = lpfc_workq_post_event(phba, &status, &online_compl,
                                      LPFC_EVT_ONLINE);
-               if (rc == 0)
-                       return -ENOMEM;
+               if (rc == 0) {
+                       status = -ENOMEM;
+                       goto board_mode_out;
+               }
                wait_for_completion(&online_compl);
        } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
                status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
        else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
                if (phba->sli_rev == LPFC_SLI_REV4)
-                       return -EINVAL;
+                       status = -EINVAL;
                else
                        status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
        else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
                if (phba->sli_rev == LPFC_SLI_REV4)
-                       return -EINVAL;
+                       status = -EINVAL;
                else
                        status = lpfc_do_offline(phba, LPFC_EVT_KILL);
        else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
@@ -991,12 +1071,21 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
        else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
                status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
        else
-               return -EINVAL;
+               status = -EINVAL;
 
+board_mode_out:
        if (!status)
                return strlen(buf);
-       else
+       else {
+               board_mode_str = strchr(buf, '\n');
+               if (board_mode_str)
+                       *board_mode_str = '\0';
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+                                "3097 Failed \"%s\", status(%d), "
+                                "fc_flag(x%x)\n",
+                                buf, status, phba->pport->fc_flag);
                return status;
+       }
 }
 
 /**
@@ -1942,6 +2031,7 @@ static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
 static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
 static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
                   lpfc_sriov_hw_max_virtfn_show, NULL);
+static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
 
 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
 
@@ -2687,6 +2777,14 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
        if (val >= 0 && val <= 6) {
                prev_val = phba->cfg_topology;
                phba->cfg_topology = val;
+               if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
+                       val == 4) {
+                       lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+                               "3113 Loop mode not supported at speed %d\n",
+                               phba->cfg_link_speed);
+                       phba->cfg_topology = prev_val;
+                       return -EINVAL;
+               }
                if (nolip)
                        return strlen(buf);
 
@@ -3132,6 +3230,14 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
                                val);
                return -EINVAL;
        }
+       if (val == LPFC_USER_LINK_SPEED_16G &&
+                phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3112 lpfc_link_speed attribute cannot be set "
+                               "to %d. Speed is not supported in loop mode.\n",
+                               val);
+               return -EINVAL;
+       }
        if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
            (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
                prev_val = phba->cfg_link_speed;
@@ -3176,6 +3282,13 @@ lpfc_param_show(link_speed)
 static int
 lpfc_link_speed_init(struct lpfc_hba *phba, int val)
 {
+       if (val == LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "3111 lpfc_link_speed of %d cannot "
+                       "support loop mode, setting topology to default.\n",
+                        val);
+               phba->cfg_topology = 0;
+       }
        if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
            (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
                phba->cfg_link_speed = val;
@@ -3830,6 +3943,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_fips_rev,
        &dev_attr_lpfc_dss,
        &dev_attr_lpfc_sriov_hw_max_virtfn,
+       &dev_attr_protocol,
        NULL,
 };
 
@@ -3987,23 +4101,6 @@ static struct bin_attribute sysfs_ctlreg_attr = {
        .write = sysfs_ctlreg_write,
 };
 
-/**
- * sysfs_mbox_idle - frees the sysfs mailbox
- * @phba: lpfc_hba pointer
- **/
-static void
-sysfs_mbox_idle(struct lpfc_hba *phba)
-{
-       phba->sysfs_mbox.state = SMBOX_IDLE;
-       phba->sysfs_mbox.offset = 0;
-
-       if (phba->sysfs_mbox.mbox) {
-               mempool_free(phba->sysfs_mbox.mbox,
-                            phba->mbox_mem_pool);
-               phba->sysfs_mbox.mbox = NULL;
-       }
-}
-
 /**
  * sysfs_mbox_write - Write method for writing information via mbox
  * @filp: open sysfs file
@@ -4014,71 +4111,18 @@ sysfs_mbox_idle(struct lpfc_hba *phba)
  * @count: bytes to transfer.
  *
  * Description:
- * Accessed via /sys/class/scsi_host/hostxxx/mbox.
- * Uses the sysfs mbox to send buf contents to the adapter.
+ * Deprecated function. All mailbox access from user space is performed via the
+ * bsg interface.
  *
  * Returns:
- * -ERANGE off and count combo out of range
- * -EINVAL off, count or buff address invalid
- * zero if count is zero
- * -EPERM adapter is offline
- * -ENOMEM failed to allocate memory for the mail box
- * -EAGAIN offset, state or mbox is NULL
- * count number of bytes transferred
+ * -EPERM operation not permitted
  **/
 static ssize_t
 sysfs_mbox_write(struct file *filp, struct kobject *kobj,
                 struct bin_attribute *bin_attr,
                 char *buf, loff_t off, size_t count)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct Scsi_Host  *shost = class_to_shost(dev);
-       struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
-       struct lpfc_hba   *phba = vport->phba;
-       struct lpfcMboxq  *mbox = NULL;
-
-       if ((count + off) > MAILBOX_CMD_SIZE)
-               return -ERANGE;
-
-       if (off % 4 ||  count % 4 || (unsigned long)buf % 4)
-               return -EINVAL;
-
-       if (count == 0)
-               return 0;
-
-       if (off == 0) {
-               mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-               if (!mbox)
-                       return -ENOMEM;
-               memset(mbox, 0, sizeof (LPFC_MBOXQ_t));
-       }
-
-       spin_lock_irq(&phba->hbalock);
-
-       if (off == 0) {
-               if (phba->sysfs_mbox.mbox)
-                       mempool_free(mbox, phba->mbox_mem_pool);
-               else
-                       phba->sysfs_mbox.mbox = mbox;
-               phba->sysfs_mbox.state = SMBOX_WRITING;
-       } else {
-               if (phba->sysfs_mbox.state  != SMBOX_WRITING ||
-                   phba->sysfs_mbox.offset != off           ||
-                   phba->sysfs_mbox.mbox   == NULL) {
-                       sysfs_mbox_idle(phba);
-                       spin_unlock_irq(&phba->hbalock);
-                       return -EAGAIN;
-               }
-       }
-
-       memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
-              buf, count);
-
-       phba->sysfs_mbox.offset = off + count;
-
-       spin_unlock_irq(&phba->hbalock);
-
-       return count;
+       return -EPERM;
 }
 
 /**
@@ -4091,201 +4135,18 @@ sysfs_mbox_write(struct file *filp, struct kobject *kobj,
  * @count: bytes to transfer.
  *
  * Description:
- * Accessed via /sys/class/scsi_host/hostxxx/mbox.
- * Uses the sysfs mbox to receive data from to the adapter.
+ * Deprecated function. All mailbox access from user space is performed via the
+ * bsg interface.
  *
  * Returns:
- * -ERANGE off greater than mailbox command size
- * -EINVAL off, count or buff address invalid
- * zero if off and count are zero
- * -EACCES adapter over temp
- * -EPERM garbage can value to catch a multitude of errors
- * -EAGAIN management IO not permitted, state or off error
- * -ETIME mailbox timeout
- * -ENODEV mailbox error
- * count number of bytes transferred
+ * -EPERM operation not permitted
  **/
 static ssize_t
 sysfs_mbox_read(struct file *filp, struct kobject *kobj,
                struct bin_attribute *bin_attr,
                char *buf, loff_t off, size_t count)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct Scsi_Host  *shost = class_to_shost(dev);
-       struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
-       struct lpfc_hba   *phba = vport->phba;
-       LPFC_MBOXQ_t *mboxq;
-       MAILBOX_t *pmb;
-       uint32_t mbox_tmo;
-       int rc;
-
-       if (off > MAILBOX_CMD_SIZE)
-               return -ERANGE;
-
-       if ((count + off) > MAILBOX_CMD_SIZE)
-               count = MAILBOX_CMD_SIZE - off;
-
-       if (off % 4 ||  count % 4 || (unsigned long)buf % 4)
-               return -EINVAL;
-
-       if (off && count == 0)
-               return 0;
-
-       spin_lock_irq(&phba->hbalock);
-
-       if (phba->over_temp_state == HBA_OVER_TEMP) {
-               sysfs_mbox_idle(phba);
-               spin_unlock_irq(&phba->hbalock);
-               return  -EACCES;
-       }
-
-       if (off == 0 &&
-           phba->sysfs_mbox.state  == SMBOX_WRITING &&
-           phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
-               mboxq = (LPFC_MBOXQ_t *)&phba->sysfs_mbox.mbox;
-               pmb = &mboxq->u.mb;
-               switch (pmb->mbxCommand) {
-                       /* Offline only */
-               case MBX_INIT_LINK:
-               case MBX_DOWN_LINK:
-               case MBX_CONFIG_LINK:
-               case MBX_CONFIG_RING:
-               case MBX_RESET_RING:
-               case MBX_UNREG_LOGIN:
-               case MBX_CLEAR_LA:
-               case MBX_DUMP_CONTEXT:
-               case MBX_RUN_DIAGS:
-               case MBX_RESTART:
-               case MBX_SET_MASK:
-               case MBX_SET_DEBUG:
-                       if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
-                               printk(KERN_WARNING "mbox_read:Command 0x%x "
-                                      "is illegal in on-line state\n",
-                                      pmb->mbxCommand);
-                               sysfs_mbox_idle(phba);
-                               spin_unlock_irq(&phba->hbalock);
-                               return -EPERM;
-                       }
-               case MBX_WRITE_NV:
-               case MBX_WRITE_VPARMS:
-               case MBX_LOAD_SM:
-               case MBX_READ_NV:
-               case MBX_READ_CONFIG:
-               case MBX_READ_RCONFIG:
-               case MBX_READ_STATUS:
-               case MBX_READ_XRI:
-               case MBX_READ_REV:
-               case MBX_READ_LNK_STAT:
-               case MBX_DUMP_MEMORY:
-               case MBX_DOWN_LOAD:
-               case MBX_UPDATE_CFG:
-               case MBX_KILL_BOARD:
-               case MBX_LOAD_AREA:
-               case MBX_LOAD_EXP_ROM:
-               case MBX_BEACON:
-               case MBX_DEL_LD_ENTRY:
-               case MBX_SET_VARIABLE:
-               case MBX_WRITE_WWN:
-               case MBX_PORT_CAPABILITIES:
-               case MBX_PORT_IOV_CONTROL:
-                       break;
-               case MBX_SECURITY_MGMT:
-               case MBX_AUTH_PORT:
-                       if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
-                               printk(KERN_WARNING "mbox_read:Command 0x%x "
-                                      "is not permitted\n", pmb->mbxCommand);
-                               sysfs_mbox_idle(phba);
-                               spin_unlock_irq(&phba->hbalock);
-                               return -EPERM;
-                       }
-                       break;
-               case MBX_READ_SPARM64:
-               case MBX_READ_TOPOLOGY:
-               case MBX_REG_LOGIN:
-               case MBX_REG_LOGIN64:
-               case MBX_CONFIG_PORT:
-               case MBX_RUN_BIU_DIAG:
-                       printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
-                              pmb->mbxCommand);
-                       sysfs_mbox_idle(phba);
-                       spin_unlock_irq(&phba->hbalock);
-                       return -EPERM;
-               default:
-                       printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
-                              pmb->mbxCommand);
-                       sysfs_mbox_idle(phba);
-                       spin_unlock_irq(&phba->hbalock);
-                       return -EPERM;
-               }
-
-               /* If HBA encountered an error attention, allow only DUMP
-                * or RESTART mailbox commands until the HBA is restarted.
-                */
-               if (phba->pport->stopped &&
-                   pmb->mbxCommand != MBX_DUMP_MEMORY &&
-                   pmb->mbxCommand != MBX_RESTART &&
-                   pmb->mbxCommand != MBX_WRITE_VPARMS &&
-                   pmb->mbxCommand != MBX_WRITE_WWN)
-                       lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
-                                       "1259 mbox: Issued mailbox cmd "
-                                       "0x%x while in stopped state.\n",
-                                       pmb->mbxCommand);
-
-               phba->sysfs_mbox.mbox->vport = vport;
-
-               /* Don't allow mailbox commands to be sent when blocked
-                * or when in the middle of discovery
-                */
-               if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
-                       sysfs_mbox_idle(phba);
-                       spin_unlock_irq(&phba->hbalock);
-                       return  -EAGAIN;
-               }
-
-               if ((vport->fc_flag & FC_OFFLINE_MODE) ||
-                   (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
-
-                       spin_unlock_irq(&phba->hbalock);
-                       rc = lpfc_sli_issue_mbox (phba,
-                                                 phba->sysfs_mbox.mbox,
-                                                 MBX_POLL);
-                       spin_lock_irq(&phba->hbalock);
-
-               } else {
-                       spin_unlock_irq(&phba->hbalock);
-                       mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
-                       rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
-                       spin_lock_irq(&phba->hbalock);
-               }
-
-               if (rc != MBX_SUCCESS) {
-                       if (rc == MBX_TIMEOUT) {
-                               phba->sysfs_mbox.mbox = NULL;
-                       }
-                       sysfs_mbox_idle(phba);
-                       spin_unlock_irq(&phba->hbalock);
-                       return  (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
-               }
-               phba->sysfs_mbox.state = SMBOX_READING;
-       }
-       else if (phba->sysfs_mbox.offset != off ||
-                phba->sysfs_mbox.state  != SMBOX_READING) {
-               printk(KERN_WARNING  "mbox_read: Bad State\n");
-               sysfs_mbox_idle(phba);
-               spin_unlock_irq(&phba->hbalock);
-               return -EAGAIN;
-       }
-
-       memcpy(buf, (uint8_t *) &pmb + off, count);
-
-       phba->sysfs_mbox.offset = off + count;
-
-       if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE)
-               sysfs_mbox_idle(phba);
-
-       spin_unlock_irq(&phba->hbalock);
-
-       return count;
+       return -EPERM;
 }
 
 static struct bin_attribute sysfs_mbox_attr = {
@@ -4429,8 +4290,13 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
                case LPFC_LINK_UP:
                case LPFC_CLEAR_LA:
                case LPFC_HBA_READY:
-                       /* Links up, beyond this port_type reports state */
-                       fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+                       /* Links up, reports port state accordingly */
+                       if (vport->port_state < LPFC_VPORT_READY)
+                               fc_host_port_state(shost) =
+                                                       FC_PORTSTATE_BYPASSED;
+                       else
+                               fc_host_port_state(shost) =
+                                                       FC_PORTSTATE_ONLINE;
                        break;
                case LPFC_HBA_ERROR:
                        fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
index 6760c69f525381f2d4c89be25ef7b4c5d5bc36ed..56a86baece5b035cbbb0116f3a790b13a896fc84 100644 (file)
@@ -916,9 +916,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                                } else {
                                        switch (cmd) {
                                        case ELX_LOOPBACK_DATA:
-                                               diag_cmd_data_free(phba,
-                                               (struct lpfc_dmabufext *)
-                                                       dmabuf);
+                                               if (phba->sli_rev <
+                                                   LPFC_SLI_REV4)
+                                                       diag_cmd_data_free(phba,
+                                                       (struct lpfc_dmabufext
+                                                        *)dmabuf);
                                                break;
                                        case ELX_LOOPBACK_XRI_SETUP:
                                                if ((phba->sli_rev ==
@@ -1000,7 +1002,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 error_ct_unsol_exit:
        if (!list_empty(&head))
                list_del(&head);
-       if (evt_req_id == SLI_CT_ELX_LOOPBACK)
+       if ((phba->sli_rev < LPFC_SLI_REV4) &&
+           (evt_req_id == SLI_CT_ELX_LOOPBACK))
                return 0;
        return 1;
 }
@@ -1566,7 +1569,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
        struct diag_mode_set *loopback_mode;
        uint32_t link_flags;
        uint32_t timeout;
-       LPFC_MBOXQ_t *pmboxq;
+       LPFC_MBOXQ_t *pmboxq  = NULL;
        int mbxstatus = MBX_SUCCESS;
        int i = 0;
        int rc = 0;
@@ -1615,7 +1618,6 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
                                rc = -ETIMEDOUT;
                                goto loopback_mode_exit;
                        }
-
                        msleep(10);
                }
 
@@ -1635,7 +1637,9 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
                if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
                        rc = -ENODEV;
                else {
+                       spin_lock_irq(&phba->hbalock);
                        phba->link_flag |= LS_LOOPBACK_MODE;
+                       spin_unlock_irq(&phba->hbalock);
                        /* wait for the link attention interrupt */
                        msleep(100);
 
@@ -1659,7 +1663,7 @@ loopback_mode_exit:
        /*
         * Let SLI layer release mboxq if mbox command completed after timeout.
         */
-       if (mbxstatus != MBX_TIMEOUT)
+       if (pmboxq && mbxstatus != MBX_TIMEOUT)
                mempool_free(pmboxq, phba->mbox_mem_pool);
 
 job_error:
@@ -1700,11 +1704,16 @@ lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
                rc = -ENOMEM;
                goto link_diag_state_set_out;
        }
+       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                       "3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
+                       diag, phba->sli4_hba.lnk_info.lnk_tp,
+                       phba->sli4_hba.lnk_info.lnk_no);
+
        link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
        bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
-              phba->sli4_hba.link_state.number);
+              phba->sli4_hba.lnk_info.lnk_no);
        bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
-              phba->sli4_hba.link_state.type);
+              phba->sli4_hba.lnk_info.lnk_tp);
        if (diag)
                bf_set(lpfc_mbx_set_diag_state_diag,
                       &link_diag_state->u.req, 1);
@@ -1726,6 +1735,79 @@ link_diag_state_set_out:
        return rc;
 }
 
+/**
+ * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is responsible for issuing a sli4 mailbox command for setting
+ * up internal loopback diagnostic.
+ */
+static int
+lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
+{
+       LPFC_MBOXQ_t *pmboxq;
+       uint32_t req_len, alloc_len;
+       struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
+       int mbxstatus = MBX_SUCCESS, rc = 0;
+
+       pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!pmboxq)
+               return -ENOMEM;
+       req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
+                  sizeof(struct lpfc_sli4_cfg_mhdr));
+       alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+                               LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
+                               req_len, LPFC_SLI4_MBX_EMBED);
+       if (alloc_len != req_len) {
+               mempool_free(pmboxq, phba->mbox_mem_pool);
+               return -ENOMEM;
+       }
+       link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
+       bf_set(lpfc_mbx_set_diag_state_link_num,
+              &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
+       bf_set(lpfc_mbx_set_diag_state_link_type,
+              &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
+       bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
+              LPFC_DIAG_LOOPBACK_TYPE_SERDES);
+
+       mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+       if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+                               "3127 Failed setup loopback mode mailbox "
+                               "command, rc:x%x, status:x%x\n", mbxstatus,
+                               pmboxq->u.mb.mbxStatus);
+               rc = -ENODEV;
+       }
+       if (pmboxq && (mbxstatus != MBX_TIMEOUT))
+               mempool_free(pmboxq, phba->mbox_mem_pool);
+       return rc;
+}
+
+/**
+ * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
+ * @phba: Pointer to HBA context object.
+ *
+ * This function set up SLI4 FC port registrations for diagnostic run, which
+ * includes all the rpis, vfi, and also vpi.
+ */
+static int
+lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
+{
+       int rc;
+
+       if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+                               "3136 Port still had vfi registered: "
+                               "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
+                               phba->pport->fc_myDID, phba->fcf.fcfi,
+                               phba->sli4_hba.vfi_ids[phba->pport->vfi],
+                               phba->vpi_ids[phba->pport->vpi]);
+               return -EINVAL;
+       }
+       rc = lpfc_issue_reg_vfi(phba->pport);
+       return rc;
+}
+
 /**
  * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
  * @phba: Pointer to HBA context object.
@@ -1738,10 +1820,8 @@ static int
 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
 {
        struct diag_mode_set *loopback_mode;
-       uint32_t link_flags, timeout, req_len, alloc_len;
-       struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
-       LPFC_MBOXQ_t *pmboxq = NULL;
-       int mbxstatus = MBX_SUCCESS, i, rc = 0;
+       uint32_t link_flags, timeout;
+       int i, rc = 0;
 
        /* no data to return just the return code */
        job->reply->reply_payload_rcv_len = 0;
@@ -1762,65 +1842,100 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
        if (rc)
                goto job_error;
 
+       /* indicate we are in loobpack diagnostic mode */
+       spin_lock_irq(&phba->hbalock);
+       phba->link_flag |= LS_LOOPBACK_MODE;
+       spin_unlock_irq(&phba->hbalock);
+
+       /* reset port to start frome scratch */
+       rc = lpfc_selective_reset(phba);
+       if (rc)
+               goto job_error;
+
        /* bring the link to diagnostic mode */
+       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                       "3129 Bring link to diagnostic state.\n");
        loopback_mode = (struct diag_mode_set *)
                job->request->rqst_data.h_vendor.vendor_cmd;
        link_flags = loopback_mode->type;
        timeout = loopback_mode->timeout * 100;
 
        rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
-       if (rc)
+       if (rc) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+                               "3130 Failed to bring link to diagnostic "
+                               "state, rc:x%x\n", rc);
                goto loopback_mode_exit;
+       }
 
        /* wait for link down before proceeding */
        i = 0;
        while (phba->link_state != LPFC_LINK_DOWN) {
                if (i++ > timeout) {
                        rc = -ETIMEDOUT;
+                       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                       "3131 Timeout waiting for link to "
+                                       "diagnostic mode, timeout:%d ms\n",
+                                       timeout * 10);
                        goto loopback_mode_exit;
                }
                msleep(10);
        }
+
        /* set up loopback mode */
-       pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-       if (!pmboxq) {
-               rc = -ENOMEM;
-               goto loopback_mode_exit;
-       }
-       req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
-                  sizeof(struct lpfc_sli4_cfg_mhdr));
-       alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
-                               LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
-                               req_len, LPFC_SLI4_MBX_EMBED);
-       if (alloc_len != req_len) {
-               rc = -ENOMEM;
+       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                       "3132 Set up loopback mode:x%x\n", link_flags);
+
+       if (link_flags == INTERNAL_LOOP_BACK)
+               rc = lpfc_sli4_bsg_set_internal_loopback(phba);
+       else if (link_flags == EXTERNAL_LOOP_BACK)
+               rc = lpfc_hba_init_link_fc_topology(phba,
+                                                   FLAGS_TOPOLOGY_MODE_PT_PT,
+                                                   MBX_NOWAIT);
+       else {
+               rc = -EINVAL;
+               lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                               "3141 Loopback mode:x%x not supported\n",
+                               link_flags);
                goto loopback_mode_exit;
        }
-       link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
-       bf_set(lpfc_mbx_set_diag_state_link_num,
-              &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
-       bf_set(lpfc_mbx_set_diag_state_link_type,
-              &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
-       if (link_flags == INTERNAL_LOOP_BACK)
-               bf_set(lpfc_mbx_set_diag_lpbk_type,
-                      &link_diag_loopback->u.req,
-                      LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
-       else
-               bf_set(lpfc_mbx_set_diag_lpbk_type,
-                      &link_diag_loopback->u.req,
-                      LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
 
-       mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
-       if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
-               rc = -ENODEV;
-       else {
-               phba->link_flag |= LS_LOOPBACK_MODE;
+       if (!rc) {
                /* wait for the link attention interrupt */
                msleep(100);
                i = 0;
+               while (phba->link_state < LPFC_LINK_UP) {
+                       if (i++ > timeout) {
+                               rc = -ETIMEDOUT;
+                               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                       "3137 Timeout waiting for link up "
+                                       "in loopback mode, timeout:%d ms\n",
+                                       timeout * 10);
+                               break;
+                       }
+                       msleep(10);
+               }
+       }
+
+       /* port resource registration setup for loopback diagnostic */
+       if (!rc) {
+               /* set up a none zero myDID for loopback test */
+               phba->pport->fc_myDID = 1;
+               rc = lpfc_sli4_diag_fcport_reg_setup(phba);
+       } else
+               goto loopback_mode_exit;
+
+       if (!rc) {
+               /* wait for the port ready */
+               msleep(100);
+               i = 0;
                while (phba->link_state != LPFC_HBA_READY) {
                        if (i++ > timeout) {
                                rc = -ETIMEDOUT;
+                               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                       "3133 Timeout waiting for port "
+                                       "loopback mode ready, timeout:%d ms\n",
+                                       timeout * 10);
                                break;
                        }
                        msleep(10);
@@ -1828,14 +1943,14 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
        }
 
 loopback_mode_exit:
+       /* clear loopback diagnostic mode */
+       if (rc) {
+               spin_lock_irq(&phba->hbalock);
+               phba->link_flag &= ~LS_LOOPBACK_MODE;
+               spin_unlock_irq(&phba->hbalock);
+       }
        lpfc_bsg_diag_mode_exit(phba);
 
-       /*
-        * Let SLI layer release mboxq if mbox command completed after timeout.
-        */
-       if (pmboxq && (mbxstatus != MBX_TIMEOUT))
-               mempool_free(pmboxq, phba->mbox_mem_pool);
-
 job_error:
        /* make error code available to userspace */
        job->reply->result = rc;
@@ -1879,7 +1994,6 @@ lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
                rc = -ENODEV;
 
        return rc;
-
 }
 
 /**
@@ -1895,7 +2009,9 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
        struct Scsi_Host *shost;
        struct lpfc_vport *vport;
        struct lpfc_hba *phba;
-       int rc;
+       struct diag_mode_set *loopback_mode_end_cmd;
+       uint32_t timeout;
+       int rc, i;
 
        shost = job->shost;
        if (!shost)
@@ -1913,11 +2029,47 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
            LPFC_SLI_INTF_IF_TYPE_2)
                return -ENODEV;
 
+       /* clear loopback diagnostic mode */
+       spin_lock_irq(&phba->hbalock);
+       phba->link_flag &= ~LS_LOOPBACK_MODE;
+       spin_unlock_irq(&phba->hbalock);
+       loopback_mode_end_cmd = (struct diag_mode_set *)
+                       job->request->rqst_data.h_vendor.vendor_cmd;
+       timeout = loopback_mode_end_cmd->timeout * 100;
+
        rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+                               "3139 Failed to bring link to diagnostic "
+                               "state, rc:x%x\n", rc);
+               goto loopback_mode_end_exit;
+       }
 
-       if (!rc)
-               rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+       /* wait for link down before proceeding */
+       i = 0;
+       while (phba->link_state != LPFC_LINK_DOWN) {
+               if (i++ > timeout) {
+                       rc = -ETIMEDOUT;
+                       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                       "3140 Timeout waiting for link to "
+                                       "diagnostic mode_end, timeout:%d ms\n",
+                                       timeout * 10);
+                       /* there is nothing much we can do here */
+                       break;
+               }
+               msleep(10);
+       }
+
+       /* reset port resource registrations */
+       rc = lpfc_selective_reset(phba);
+       phba->pport->fc_myDID = 0;
 
+loopback_mode_end_exit:
+       /* make return code available to userspace */
+       job->reply->result = rc;
+       /* complete the job back to userspace if no error */
+       if (rc == 0)
+               job->job_done(job);
        return rc;
 }
 
@@ -2012,9 +2164,9 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
        }
        run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
        bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
-              phba->sli4_hba.link_state.number);
+              phba->sli4_hba.lnk_info.lnk_no);
        bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
-              phba->sli4_hba.link_state.type);
+              phba->sli4_hba.lnk_info.lnk_tp);
        bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
               link_diag_test_cmd->test_id);
        bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
@@ -2091,10 +2243,18 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
        if (!mbox)
                return -ENOMEM;
 
-       if (phba->sli_rev == LPFC_SLI_REV4)
+       if (phba->sli_rev < LPFC_SLI_REV4)
+               status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
+                               (uint8_t *)&phba->pport->fc_sparam,
+                               mbox, *rpi);
+       else {
                *rpi = lpfc_sli4_alloc_rpi(phba);
-       status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
-                             (uint8_t *)&phba->pport->fc_sparam, mbox, *rpi);
+               status = lpfc_reg_rpi(phba, phba->pport->vpi,
+                               phba->pport->fc_myDID,
+                               (uint8_t *)&phba->pport->fc_sparam,
+                               mbox, *rpi);
+       }
+
        if (status) {
                mempool_free(mbox, phba->mbox_mem_pool);
                if (phba->sli_rev == LPFC_SLI_REV4)
@@ -2117,7 +2277,8 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
                return -ENODEV;
        }
 
-       *rpi = mbox->u.mb.un.varWords[0];
+       if (phba->sli_rev < LPFC_SLI_REV4)
+               *rpi = mbox->u.mb.un.varWords[0];
 
        lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
        kfree(dmabuff);
@@ -2142,7 +2303,12 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
        if (mbox == NULL)
                return -ENOMEM;
 
-       lpfc_unreg_login(phba, 0, rpi, mbox);
+       if (phba->sli_rev < LPFC_SLI_REV4)
+               lpfc_unreg_login(phba, 0, rpi, mbox);
+       else
+               lpfc_unreg_login(phba, phba->pport->vpi,
+                                phba->sli4_hba.rpi_ids[rpi], mbox);
+
        status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
 
        if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
@@ -2630,15 +2796,15 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
        uint32_t full_size;
        size_t segment_len = 0, segment_offset = 0, current_offset = 0;
        uint16_t rpi = 0;
-       struct lpfc_iocbq *cmdiocbq, *rspiocbq;
-       IOCB_t *cmd, *rsp;
+       struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
+       IOCB_t *cmd, *rsp = NULL;
        struct lpfc_sli_ct_request *ctreq;
        struct lpfc_dmabuf *txbmp;
        struct ulp_bde64 *txbpl = NULL;
        struct lpfc_dmabufext *txbuffer = NULL;
        struct list_head head;
        struct lpfc_dmabuf  *curr;
-       uint16_t txxri, rxxri;
+       uint16_t txxri = 0, rxxri;
        uint32_t num_bde;
        uint8_t *ptr = NULL, *rx_databuf = NULL;
        int rc = 0;
@@ -2665,7 +2831,6 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
                rc = -EINVAL;
                goto loopback_test_exit;
        }
-
        diag_mode = (struct diag_mode_test *)
                job->request->rqst_data.h_vendor.vendor_cmd;
 
@@ -2720,18 +2885,19 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
        if (rc)
                goto loopback_test_exit;
 
-       rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
-       if (rc) {
-               lpfcdiag_loop_self_unreg(phba, rpi);
-               goto loopback_test_exit;
-       }
+       if (phba->sli_rev < LPFC_SLI_REV4) {
+               rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
+               if (rc) {
+                       lpfcdiag_loop_self_unreg(phba, rpi);
+                       goto loopback_test_exit;
+               }
 
-       rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
-       if (rc) {
-               lpfcdiag_loop_self_unreg(phba, rpi);
-               goto loopback_test_exit;
+               rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
+               if (rc) {
+                       lpfcdiag_loop_self_unreg(phba, rpi);
+                       goto loopback_test_exit;
+               }
        }
-
        evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
                                SLI_CT_ELX_LOOPBACK);
        if (!evt) {
@@ -2746,7 +2912,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
        spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
        cmdiocbq = lpfc_sli_get_iocbq(phba);
-       rspiocbq = lpfc_sli_get_iocbq(phba);
+       if (phba->sli_rev < LPFC_SLI_REV4)
+               rspiocbq = lpfc_sli_get_iocbq(phba);
        txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
 
        if (txbmp) {
@@ -2759,14 +2926,18 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
                }
        }
 
-       if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer ||
-               !txbmp->virt) {
+       if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
+               rc = -ENOMEM;
+               goto err_loopback_test_exit;
+       }
+       if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
                rc = -ENOMEM;
                goto err_loopback_test_exit;
        }
 
        cmd = &cmdiocbq->iocb;
-       rsp = &rspiocbq->iocb;
+       if (phba->sli_rev < LPFC_SLI_REV4)
+               rsp = &rspiocbq->iocb;
 
        INIT_LIST_HEAD(&head);
        list_add_tail(&head, &txbuffer->dma.list);
@@ -2796,7 +2967,6 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
        list_del(&head);
 
        /* Build the XMIT_SEQUENCE iocb */
-
        num_bde = (uint32_t)txbuffer->flag;
 
        cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
@@ -2813,16 +2983,27 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
        cmd->ulpBdeCount = 1;
        cmd->ulpLe = 1;
        cmd->ulpClass = CLASS3;
-       cmd->ulpContext = txxri;
 
+       if (phba->sli_rev < LPFC_SLI_REV4) {
+               cmd->ulpContext = txxri;
+       } else {
+               cmd->un.xseq64.bdl.ulpIoTag32 = 0;
+               cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
+               cmdiocbq->context3 = txbmp;
+               cmdiocbq->sli4_xritag = NO_XRI;
+               cmd->unsli3.rcvsli3.ox_id = 0xffff;
+       }
        cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
        cmdiocbq->vport = phba->pport;
-
        iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
                                             rspiocbq, (phba->fc_ratov * 2) +
                                             LPFC_DRVR_TIMEOUT);
 
-       if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
+       if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) &&
+                                          (rsp->ulpStatus != IOCB_SUCCESS))) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                               "3126 Failed loopback test issue iocb: "
+                               "iocb_stat:x%x\n", iocb_stat);
                rc = -EIO;
                goto err_loopback_test_exit;
        }
@@ -2832,9 +3013,12 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
                evt->wq, !list_empty(&evt->events_to_see),
                ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
        evt->waiting = 0;
-       if (list_empty(&evt->events_to_see))
+       if (list_empty(&evt->events_to_see)) {
                rc = (time_left) ? -EINTR : -ETIMEDOUT;
-       else {
+               lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                               "3125 Not receiving unsolicited event, "
+                               "rc:x%x\n", rc);
+       } else {
                spin_lock_irqsave(&phba->ct_ev_lock, flags);
                list_move(evt->events_to_see.prev, &evt->events_to_get);
                evdat = list_entry(evt->events_to_get.prev,
@@ -2891,7 +3075,7 @@ loopback_test_exit:
        job->reply->result = rc;
        job->dd_data = NULL;
        /* complete the job back to userspace if no error */
-       if (rc == 0)
+       if (rc == IOCB_SUCCESS)
                job->job_done(job);
        return rc;
 }
@@ -3078,7 +3262,9 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
                        && (mb->un.varWords[1] == 1)) {
                        phba->wait_4_mlo_maint_flg = 1;
                } else if (mb->un.varWords[0] == SETVAR_MLORST) {
+                       spin_lock_irq(&phba->hbalock);
                        phba->link_flag &= ~LS_LOOPBACK_MODE;
+                       spin_unlock_irq(&phba->hbalock);
                        phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
                }
                break;
@@ -3140,6 +3326,9 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
        unsigned long flags;
        uint32_t size;
        int rc = 0;
+       struct lpfc_dmabuf *dmabuf;
+       struct lpfc_sli_config_mbox *sli_cfg_mbx;
+       uint8_t *pmbx;
 
        spin_lock_irqsave(&phba->ct_ev_lock, flags);
        dd_data = pmboxq->context1;
@@ -3156,7 +3345,19 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
         */
        pmb = (uint8_t *)&pmboxq->u.mb;
        pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+       /* Copy the byte swapped response mailbox back to the user */
        memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
+       /* if there is any non-embedded extended data copy that too */
+       dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
+       sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+       if (!bsg_bf_get(lpfc_mbox_hdr_emb,
+           &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
+               pmbx = (uint8_t *)dmabuf->virt;
+               /* byte swap the extended data following the mailbox command */
+               lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
+                       &pmbx[sizeof(MAILBOX_t)],
+                       sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
+       }
 
        job = dd_data->context_un.mbox.set_job;
        if (job) {
@@ -3519,6 +3720,18 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
        /* state change */
        phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
 
+       /*
+        * Non-embedded mailbox subcommand data gets byte swapped here because
+        * the lower level driver code only does the first 64 mailbox words.
+        */
+       if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
+           &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
+               (nemb_tp == nemb_mse))
+               lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
+                       &pmbx[sizeof(MAILBOX_t)],
+                               sli_cfg_mbx->un.sli_config_emb0_subsys.
+                                       mse[0].buf_len);
+
        rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
        if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
                lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3575,7 +3788,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
                        &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
                if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
-                                       "2953 Handled SLI_CONFIG(mse) wr, "
+                                       "2953 Failed SLI_CONFIG(mse) wr, "
                                        "ext_buf_cnt(%d) out of range(%d)\n",
                                        ext_buf_cnt,
                                        LPFC_MBX_SLI_CONFIG_MAX_MSE);
@@ -3593,7 +3806,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
                ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
                if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
-                                       "2954 Handled SLI_CONFIG(hbd) wr, "
+                                       "2954 Failed SLI_CONFIG(hbd) wr, "
                                        "ext_buf_cnt(%d) out of range(%d)\n",
                                        ext_buf_cnt,
                                        LPFC_MBX_SLI_CONFIG_MAX_HBD);
@@ -3687,6 +3900,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
                                "2956 Failed to issue SLI_CONFIG ext-buffer "
                                "maibox command, rc:x%x\n", rc);
                rc = -EPIPE;
+               goto job_error;
        }
 
        /* wait for additoinal external buffers */
@@ -3721,7 +3935,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
        uint32_t opcode;
        int rc = SLI_CONFIG_NOT_HANDLED;
 
-       /* state change */
+       /* state change on new multi-buffer pass-through mailbox command */
        phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
 
        sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -3752,18 +3966,36 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
                                break;
                        default:
                                lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
-                                               "2959 Not handled SLI_CONFIG "
+                                               "2959 Reject SLI_CONFIG "
                                                "subsys_fcoe, opcode:x%x\n",
                                                opcode);
-                               rc = SLI_CONFIG_NOT_HANDLED;
+                               rc = -EPERM;
+                               break;
+                       }
+               } else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
+                       switch (opcode) {
+                       case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
+                               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                               "3106 Handled SLI_CONFIG "
+                                               "subsys_fcoe, opcode:x%x\n",
+                                               opcode);
+                               rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+                                                       nemb_mse, dmabuf);
+                               break;
+                       default:
+                               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                               "3107 Reject SLI_CONFIG "
+                                               "subsys_fcoe, opcode:x%x\n",
+                                               opcode);
+                               rc = -EPERM;
                                break;
                        }
                } else {
                        lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
-                                       "2977 Handled SLI_CONFIG "
+                                       "2977 Reject SLI_CONFIG "
                                        "subsys:x%d, opcode:x%x\n",
                                        subsys, opcode);
-                       rc = SLI_CONFIG_NOT_HANDLED;
+                       rc = -EPERM;
                }
        } else {
                subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
@@ -3799,12 +4031,17 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
                        }
                } else {
                        lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
-                                       "2978 Handled SLI_CONFIG "
+                                       "2978 Not handled SLI_CONFIG "
                                        "subsys:x%d, opcode:x%x\n",
                                        subsys, opcode);
                        rc = SLI_CONFIG_NOT_HANDLED;
                }
        }
+
+       /* state reset on not handled new multi-buffer mailbox command */
+       if (rc != SLI_CONFIG_HANDLED)
+               phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
+
        return rc;
 }
 
@@ -4262,11 +4499,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 
        /* extended mailbox commands will need an extended buffer */
        if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
-               /* any data for the device? */
-               if (mbox_req->inExtWLen) {
-                       from = pmbx;
-                       ext = from + sizeof(MAILBOX_t);
-               }
+               from = pmbx;
+               ext = from + sizeof(MAILBOX_t);
                pmboxq->context2 = ext;
                pmboxq->in_ext_byte_len =
                        mbox_req->inExtWLen * sizeof(uint32_t);
index c8c2b47ea886f14cb992ec486108a8fafea2adf9..edfe61fc52b18517f222b020cdfe30d10c61667a 100644 (file)
@@ -96,7 +96,7 @@ struct get_mgmt_rev {
 };
 
 #define MANAGEMENT_MAJOR_REV   1
-#define MANAGEMENT_MINOR_REV   0
+#define MANAGEMENT_MINOR_REV   1
 
 /* the MgmtRevInfo structure */
 struct MgmtRevInfo {
@@ -248,6 +248,7 @@ struct lpfc_sli_config_emb1_subsys {
 #define COMN_OPCODE_WRITE_OBJECT       0xAC
 #define COMN_OPCODE_READ_OBJECT_LIST   0xAD
 #define COMN_OPCODE_DELETE_OBJECT      0xAE
+#define COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES   0x79
        uint32_t timeout;
        uint32_t request_length;
        uint32_t word9;
index 75e2e569dedefb343e585333559d8e577176d466..c88e556ea62e54769bfdf5850239e262838cff0a 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2005 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -82,7 +82,8 @@ lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
 static inline void
 lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
 {
-       __iowrite32_copy(dest, src, bytes);
+       /* convert bytes in argument list to word count for copy function */
+       __iowrite32_copy(dest, src, bytes / sizeof(uint32_t));
 }
 
 static inline void
index 60f95347babff7ae907753259a067f0909607b5c..26924b7a6cdebf1feca844dbe9233be6cc51c63c 100644 (file)
@@ -26,7 +26,7 @@ void lpfc_sli_read_link_ste(struct lpfc_hba *);
 void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
 void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
 int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
-int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
+int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *, struct lpfcMboxq *);
 void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 
@@ -78,6 +78,7 @@ void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
 void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
 struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@@ -106,7 +107,7 @@ void lpfc_cleanup(struct lpfc_vport *);
 void lpfc_disc_timeout(unsigned long);
 
 struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
-
+struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
 void lpfc_worker_wake_up(struct lpfc_hba *);
 int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
 int lpfc_do_work(void *);
@@ -453,3 +454,11 @@ int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
 uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
 int lpfc_sli4_queue_create(struct lpfc_hba *);
 void lpfc_sli4_queue_destroy(struct lpfc_hba *);
+void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *,
+                               struct sli4_wcqe_xri_aborted *);
+int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t);
+int lpfc_issue_reg_vfi(struct lpfc_vport *);
+int lpfc_issue_unreg_vfi(struct lpfc_vport *);
+int lpfc_selective_reset(struct lpfc_hba *);
+int lpfc_sli4_read_config(struct lpfc_hba *phba);
+int lpfc_scsi_buf_update(struct lpfc_hba *phba);
index 28382596fb9a929545ae6bb9e4984b5d385aa274..3587a3fe8fcb62bb627e0f3dfac15ef45bc1df7e 100644 (file)
@@ -1997,7 +1997,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
        /* Get slow-path event queue information */
        len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "Slow-path EQ information:\n");
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+       if (phba->sli4_hba.sp_eq) {
+               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "\tEQID[%02d], "
                        "QE-COUNT[%04d], QE-SIZE[%04d], "
                        "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2006,12 +2007,17 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
                        phba->sli4_hba.sp_eq->entry_size,
                        phba->sli4_hba.sp_eq->host_index,
                        phba->sli4_hba.sp_eq->hba_index);
+       }
 
        /* Get fast-path event queue information */
        len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "Fast-path EQ information:\n");
-       for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
-               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+       if (phba->sli4_hba.fp_eq) {
+               for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+                    fcp_qidx++) {
+                       if (phba->sli4_hba.fp_eq[fcp_qidx]) {
+                               len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
                                "\tEQID[%02d], "
                                "QE-COUNT[%04d], QE-SIZE[%04d], "
                                "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2020,16 +2026,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
                                phba->sli4_hba.fp_eq[fcp_qidx]->entry_size,
                                phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
                                phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
+                       }
+               }
        }
        len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
 
        /* Get mailbox complete queue information */
        len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "Slow-path MBX CQ information:\n");
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+       if (phba->sli4_hba.mbx_cq) {
+               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "Associated EQID[%02d]:\n",
                        phba->sli4_hba.mbx_cq->assoc_qid);
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "\tCQID[%02d], "
                        "QE-COUNT[%04d], QE-SIZE[%04d], "
                        "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2038,14 +2047,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
                        phba->sli4_hba.mbx_cq->entry_size,
                        phba->sli4_hba.mbx_cq->host_index,
                        phba->sli4_hba.mbx_cq->hba_index);
+       }
 
        /* Get slow-path complete queue information */
        len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "Slow-path ELS CQ information:\n");
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+       if (phba->sli4_hba.els_cq) {
+               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "Associated EQID[%02d]:\n",
                        phba->sli4_hba.els_cq->assoc_qid);
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "\tCQID [%02d], "
                        "QE-COUNT[%04d], QE-SIZE[%04d], "
                        "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2054,16 +2065,21 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
                        phba->sli4_hba.els_cq->entry_size,
                        phba->sli4_hba.els_cq->host_index,
                        phba->sli4_hba.els_cq->hba_index);
+       }
 
        /* Get fast-path complete queue information */
        len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "Fast-path FCP CQ information:\n");
        fcp_qidx = 0;
-       do {
-               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+       if (phba->sli4_hba.fcp_cq) {
+               do {
+                       if (phba->sli4_hba.fcp_cq[fcp_qidx]) {
+                               len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
                                "Associated EQID[%02d]:\n",
                                phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
-               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
                                "\tCQID[%02d], "
                                "QE-COUNT[%04d], QE-SIZE[%04d], "
                                "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2072,16 +2088,20 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
                                phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
                                phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
                                phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
-       } while (++fcp_qidx < phba->cfg_fcp_eq_count);
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+                       }
+               } while (++fcp_qidx < phba->cfg_fcp_eq_count);
+               len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+       }
 
        /* Get mailbox queue information */
        len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "Slow-path MBX MQ information:\n");
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+       if (phba->sli4_hba.mbx_wq) {
+               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "Associated CQID[%02d]:\n",
                        phba->sli4_hba.mbx_wq->assoc_qid);
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "\tWQID[%02d], "
                        "QE-COUNT[%04d], QE-SIZE[%04d], "
                        "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2090,14 +2110,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
                        phba->sli4_hba.mbx_wq->entry_size,
                        phba->sli4_hba.mbx_wq->host_index,
                        phba->sli4_hba.mbx_wq->hba_index);
+       }
 
        /* Get slow-path work queue information */
        len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "Slow-path ELS WQ information:\n");
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+       if (phba->sli4_hba.els_wq) {
+               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "Associated CQID[%02d]:\n",
                        phba->sli4_hba.els_wq->assoc_qid);
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "\tWQID[%02d], "
                        "QE-COUNT[%04d], QE-SIZE[%04d], "
                        "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2106,15 +2128,22 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
                        phba->sli4_hba.els_wq->entry_size,
                        phba->sli4_hba.els_wq->host_index,
                        phba->sli4_hba.els_wq->hba_index);
+       }
 
        /* Get fast-path work queue information */
        len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "Fast-path FCP WQ information:\n");
-       for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) {
-               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+       if (phba->sli4_hba.fcp_wq) {
+               for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
+                    fcp_qidx++) {
+                       if (!phba->sli4_hba.fcp_wq[fcp_qidx])
+                               continue;
+                       len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
                                "Associated CQID[%02d]:\n",
                                phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
-               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                       len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
                                "\tWQID[%02d], "
                                "QE-COUNT[%04d], WQE-SIZE[%04d], "
                                "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2123,16 +2152,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
                                phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size,
                                phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
                                phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
+               }
+               len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
        }
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
 
        /* Get receive queue information */
        len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "Slow-path RQ information:\n");
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+       if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
+               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "Associated CQID[%02d]:\n",
                        phba->sli4_hba.hdr_rq->assoc_qid);
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "\tHQID[%02d], "
                        "QE-COUNT[%04d], QE-SIZE[%04d], "
                        "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2141,7 +2173,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
                        phba->sli4_hba.hdr_rq->entry_size,
                        phba->sli4_hba.hdr_rq->host_index,
                        phba->sli4_hba.hdr_rq->hba_index);
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "\tDQID[%02d], "
                        "QE-COUNT[%04d], QE-SIZE[%04d], "
                        "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2150,7 +2182,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
                        phba->sli4_hba.dat_rq->entry_size,
                        phba->sli4_hba.dat_rq->host_index,
                        phba->sli4_hba.dat_rq->hba_index);
-
+       }
        return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
 }
 
@@ -2360,7 +2392,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
        switch (quetp) {
        case LPFC_IDIAG_EQ:
                /* Slow-path event queue */
-               if (phba->sli4_hba.sp_eq->queue_id == queid) {
+               if (phba->sli4_hba.sp_eq &&
+                   phba->sli4_hba.sp_eq->queue_id == queid) {
                        /* Sanity check */
                        rc = lpfc_idiag_que_param_check(
                                        phba->sli4_hba.sp_eq, index, count);
@@ -2370,23 +2403,29 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
                        goto pass_check;
                }
                /* Fast-path event queue */
-               for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
-                       if (phba->sli4_hba.fp_eq[qidx]->queue_id == queid) {
-                               /* Sanity check */
-                               rc = lpfc_idiag_que_param_check(
+               if (phba->sli4_hba.fp_eq) {
+                       for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
+                               if (phba->sli4_hba.fp_eq[qidx] &&
+                                   phba->sli4_hba.fp_eq[qidx]->queue_id ==
+                                   queid) {
+                                       /* Sanity check */
+                                       rc = lpfc_idiag_que_param_check(
                                                phba->sli4_hba.fp_eq[qidx],
                                                index, count);
-                               if (rc)
-                                       goto error_out;
-                               idiag.ptr_private = phba->sli4_hba.fp_eq[qidx];
-                               goto pass_check;
+                                       if (rc)
+                                               goto error_out;
+                                       idiag.ptr_private =
+                                               phba->sli4_hba.fp_eq[qidx];
+                                       goto pass_check;
+                               }
                        }
                }
                goto error_out;
                break;
        case LPFC_IDIAG_CQ:
                /* MBX complete queue */
-               if (phba->sli4_hba.mbx_cq->queue_id == queid) {
+               if (phba->sli4_hba.mbx_cq &&
+                   phba->sli4_hba.mbx_cq->queue_id == queid) {
                        /* Sanity check */
                        rc = lpfc_idiag_que_param_check(
                                        phba->sli4_hba.mbx_cq, index, count);
@@ -2396,7 +2435,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
                        goto pass_check;
                }
                /* ELS complete queue */
-               if (phba->sli4_hba.els_cq->queue_id == queid) {
+               if (phba->sli4_hba.els_cq &&
+                   phba->sli4_hba.els_cq->queue_id == queid) {
                        /* Sanity check */
                        rc = lpfc_idiag_que_param_check(
                                        phba->sli4_hba.els_cq, index, count);
@@ -2406,25 +2446,30 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
                        goto pass_check;
                }
                /* FCP complete queue */
-               qidx = 0;
-               do {
-                       if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
-                               /* Sanity check */
-                               rc = lpfc_idiag_que_param_check(
+               if (phba->sli4_hba.fcp_cq) {
+                       qidx = 0;
+                       do {
+                               if (phba->sli4_hba.fcp_cq[qidx] &&
+                                   phba->sli4_hba.fcp_cq[qidx]->queue_id ==
+                                   queid) {
+                                       /* Sanity check */
+                                       rc = lpfc_idiag_que_param_check(
                                                phba->sli4_hba.fcp_cq[qidx],
                                                index, count);
-                               if (rc)
-                                       goto error_out;
-                               idiag.ptr_private =
+                                       if (rc)
+                                               goto error_out;
+                                       idiag.ptr_private =
                                                phba->sli4_hba.fcp_cq[qidx];
-                               goto pass_check;
-                       }
-               } while (++qidx < phba->cfg_fcp_eq_count);
+                                       goto pass_check;
+                               }
+                       } while (++qidx < phba->cfg_fcp_eq_count);
+               }
                goto error_out;
                break;
        case LPFC_IDIAG_MQ:
                /* MBX work queue */
-               if (phba->sli4_hba.mbx_wq->queue_id == queid) {
+               if (phba->sli4_hba.mbx_wq &&
+                   phba->sli4_hba.mbx_wq->queue_id == queid) {
                        /* Sanity check */
                        rc = lpfc_idiag_que_param_check(
                                        phba->sli4_hba.mbx_wq, index, count);
@@ -2433,10 +2478,12 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
                        idiag.ptr_private = phba->sli4_hba.mbx_wq;
                        goto pass_check;
                }
+               goto error_out;
                break;
        case LPFC_IDIAG_WQ:
                /* ELS work queue */
-               if (phba->sli4_hba.els_wq->queue_id == queid) {
+               if (phba->sli4_hba.els_wq &&
+                   phba->sli4_hba.els_wq->queue_id == queid) {
                        /* Sanity check */
                        rc = lpfc_idiag_que_param_check(
                                        phba->sli4_hba.els_wq, index, count);
@@ -2446,24 +2493,30 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
                        goto pass_check;
                }
                /* FCP work queue */
-               for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
-                       if (phba->sli4_hba.fcp_wq[qidx]->queue_id == queid) {
-                               /* Sanity check */
-                               rc = lpfc_idiag_que_param_check(
+               if (phba->sli4_hba.fcp_wq) {
+                       for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
+                               if (!phba->sli4_hba.fcp_wq[qidx])
+                                       continue;
+                               if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
+                                   queid) {
+                                       /* Sanity check */
+                                       rc = lpfc_idiag_que_param_check(
                                                phba->sli4_hba.fcp_wq[qidx],
                                                index, count);
-                               if (rc)
-                                       goto error_out;
-                               idiag.ptr_private =
-                                       phba->sli4_hba.fcp_wq[qidx];
-                               goto pass_check;
+                                       if (rc)
+                                               goto error_out;
+                                       idiag.ptr_private =
+                                               phba->sli4_hba.fcp_wq[qidx];
+                                       goto pass_check;
+                               }
                        }
                }
                goto error_out;
                break;
        case LPFC_IDIAG_RQ:
                /* HDR queue */
-               if (phba->sli4_hba.hdr_rq->queue_id == queid) {
+               if (phba->sli4_hba.hdr_rq &&
+                   phba->sli4_hba.hdr_rq->queue_id == queid) {
                        /* Sanity check */
                        rc = lpfc_idiag_que_param_check(
                                        phba->sli4_hba.hdr_rq, index, count);
@@ -2473,7 +2526,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
                        goto pass_check;
                }
                /* DAT queue */
-               if (phba->sli4_hba.dat_rq->queue_id == queid) {
+               if (phba->sli4_hba.dat_rq &&
+                   phba->sli4_hba.dat_rq->queue_id == queid) {
                        /* Sanity check */
                        rc = lpfc_idiag_que_param_check(
                                        phba->sli4_hba.dat_rq, index, count);
index 445826a4c9814a1e77188c356538b14b4d1ade62..7afc757338de2de3137ed2643e5c4a1ac8701af1 100644 (file)
@@ -421,13 +421,13 @@ fail:
  * @vport: pointer to a host virtual N_Port data structure.
  *
  * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
- * the @vport. This mailbox command is necessary for FCoE only.
+ * the @vport. This mailbox command is necessary for SLI4 port only.
  *
  * Return code
  *   0 - successfully issued REG_VFI for @vport
  *   A failure code otherwise.
  **/
-static int
+int
 lpfc_issue_reg_vfi(struct lpfc_vport *vport)
 {
        struct lpfc_hba  *phba = vport->phba;
@@ -438,10 +438,14 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
        int rc = 0;
 
        sp = &phba->fc_fabparam;
-       ndlp = lpfc_findnode_did(vport, Fabric_DID);
-       if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
-               rc = -ENODEV;
-               goto fail;
+       /* move forward in case of SLI4 FC port loopback test */
+       if ((phba->sli_rev == LPFC_SLI_REV4) &&
+           !(phba->link_flag & LS_LOOPBACK_MODE)) {
+               ndlp = lpfc_findnode_did(vport, Fabric_DID);
+               if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+                       rc = -ENODEV;
+                       goto fail;
+               }
        }
 
        dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
@@ -486,6 +490,54 @@ fail:
        return rc;
 }
 
+/**
+ * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
+ * the @vport. This mailbox command is necessary for SLI4 port only.
+ *
+ * Return code
+ *   0 - successfully issued REG_VFI for @vport
+ *   A failure code otherwise.
+ **/
+int
+lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
+{
+       struct lpfc_hba *phba = vport->phba;
+       struct Scsi_Host *shost;
+       LPFC_MBOXQ_t *mboxq;
+       int rc;
+
+       mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+                               "2556 UNREG_VFI mbox allocation failed"
+                               "HBA state x%x\n", phba->pport->port_state);
+               return -ENOMEM;
+       }
+
+       lpfc_unreg_vfi(mboxq, vport);
+       mboxq->vport = vport;
+       mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
+
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+       if (rc == MBX_NOT_FINISHED) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+                               "2557 UNREG_VFI issue mbox failed rc x%x "
+                               "HBA state x%x\n",
+                               rc, phba->pport->port_state);
+               mempool_free(mboxq, phba->mbox_mem_pool);
+               return -EIO;
+       }
+
+       shost = lpfc_shost_from_vport(vport);
+       spin_lock_irq(shost->host_lock);
+       vport->fc_flag &= ~FC_VFI_REGISTERED;
+       spin_unlock_irq(shost->host_lock);
+       return 0;
+}
+
 /**
  * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
  * @vport: pointer to a host virtual N_Port data structure.
@@ -615,7 +667,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                                         "1816 FLOGI NPIV supported, "
                                         "response data 0x%x\n",
                                         sp->cmn.response_multiple_NPort);
+                       spin_lock_irq(&phba->hbalock);
                        phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
+                       spin_unlock_irq(&phba->hbalock);
                } else {
                        /* Because we asked f/w for NPIV it still expects us
                        to call reg_vnpid atleast for the physcial host */
@@ -623,7 +677,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                                         LOG_ELS | LOG_VPORT,
                                         "1817 Fabric does not support NPIV "
                                         "- configuring single port mode.\n");
+                       spin_lock_irq(&phba->hbalock);
                        phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+                       spin_unlock_irq(&phba->hbalock);
                }
        }
 
@@ -686,11 +742,16 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                        lpfc_do_scr_ns_plogi(phba, vport);
                } else if (vport->fc_flag & FC_VFI_REGISTERED)
                        lpfc_issue_init_vpi(vport);
-               else
+               else {
+                       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                                       "3135 Need register VFI: (x%x/%x)\n",
+                                       vport->fc_prevDID, vport->fc_myDID);
                        lpfc_issue_reg_vfi(vport);
+               }
        }
        return 0;
 }
+
 /**
  * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
  * @vport: pointer to a host virtual N_Port data structure.
@@ -907,17 +968,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
                 * alpa map would take too long otherwise.
                 */
-               if (phba->alpa_map[0] == 0) {
+               if (phba->alpa_map[0] == 0)
                        vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
-                       if ((phba->sli_rev == LPFC_SLI_REV4) &&
-                           (!(vport->fc_flag & FC_VFI_REGISTERED) ||
-                            (vport->fc_prevDID != vport->fc_myDID))) {
-                               if (vport->fc_flag & FC_VFI_REGISTERED)
-                                       lpfc_sli4_unreg_all_rpis(vport);
-                               lpfc_issue_reg_vfi(vport);
-                               lpfc_nlp_put(ndlp);
-                               goto out;
-                       }
+               if ((phba->sli_rev == LPFC_SLI_REV4) &&
+                   (!(vport->fc_flag & FC_VFI_REGISTERED) ||
+                    (vport->fc_prevDID != vport->fc_myDID))) {
+                       if (vport->fc_flag & FC_VFI_REGISTERED)
+                               lpfc_sli4_unreg_all_rpis(vport);
+                       lpfc_issue_reg_vfi(vport);
+                       lpfc_nlp_put(ndlp);
+                       goto out;
                }
                goto flogifail;
        }
@@ -1075,6 +1135,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        /* Setup CSPs accordingly for Fabric */
        sp->cmn.e_d_tov = 0;
        sp->cmn.w2.r_a_tov = 0;
+       sp->cmn.virtual_fabric_support = 0;
        sp->cls1.classValid = 0;
        sp->cls2.seqDelivery = 1;
        sp->cls3.seqDelivery = 1;
@@ -1163,8 +1224,7 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
        spin_lock_irq(&phba->hbalock);
        list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
                icmd = &iocb->iocb;
-               if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
-                   icmd->un.elsreq64.bdl.ulpIoTag32) {
+               if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
                        ndlp = (struct lpfc_nodelist *)(iocb->context1);
                        if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
                            (ndlp->nlp_DID == Fabric_DID))
@@ -3066,17 +3126,22 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        if (did == FDMI_DID)
                retry = 1;
 
-       if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
+       if ((cmd == ELS_CMD_FLOGI) &&
            (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
            !lpfc_error_lost_link(irsp)) {
                /* FLOGI retry policy */
                retry = 1;
-               /* retry forever */
+               /* retry FLOGI forever */
                maxretry = 0;
                if (cmdiocb->retry >= 100)
                        delay = 5000;
                else if (cmdiocb->retry >= 32)
                        delay = 1000;
+       } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
+               /* retry FDISCs every second up to devloss */
+               retry = 1;
+               maxretry = vport->cfg_devloss_tmo;
+               delay = 1000;
        }
 
        cmdiocb->retry++;
@@ -3389,11 +3454,17 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
        /*
         * The driver received a LOGO from the rport and has ACK'd it.
-        * At this point, the driver is done so release the IOCB and
-        * remove the ndlp reference.
+        * At this point, the driver is done so release the IOCB
         */
        lpfc_els_free_iocb(phba, cmdiocb);
-       lpfc_nlp_put(ndlp);
+
+       /*
+        * Remove the ndlp reference if it's a fabric node that has
+        * sent us an unsolicted LOGO.
+        */
+       if (ndlp->nlp_type & NLP_FABRIC)
+               lpfc_nlp_put(ndlp);
+
        return;
 }
 
@@ -4867,23 +4938,31 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                            sizeof(struct lpfc_name));
 
                if (!rc) {
-                       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-                       if (!mbox)
+                       if (phba->sli_rev < LPFC_SLI_REV4) {
+                               mbox = mempool_alloc(phba->mbox_mem_pool,
+                                                    GFP_KERNEL);
+                               if (!mbox)
+                                       return 1;
+                               lpfc_linkdown(phba);
+                               lpfc_init_link(phba, mbox,
+                                              phba->cfg_topology,
+                                              phba->cfg_link_speed);
+                               mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
+                               mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+                               mbox->vport = vport;
+                               rc = lpfc_sli_issue_mbox(phba, mbox,
+                                                        MBX_NOWAIT);
+                               lpfc_set_loopback_flag(phba);
+                               if (rc == MBX_NOT_FINISHED)
+                                       mempool_free(mbox, phba->mbox_mem_pool);
                                return 1;
-
-                       lpfc_linkdown(phba);
-                       lpfc_init_link(phba, mbox,
-                                      phba->cfg_topology,
-                                      phba->cfg_link_speed);
-                       mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
-                       mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-                       mbox->vport = vport;
-                       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
-                       lpfc_set_loopback_flag(phba);
-                       if (rc == MBX_NOT_FINISHED) {
-                               mempool_free(mbox, phba->mbox_mem_pool);
+                       } else {
+                               /* abort the flogi coming back to ourselves
+                                * due to external loopback on the port.
+                                */
+                               lpfc_els_abort_flogi(phba);
+                               return 0;
                        }
-                       return 1;
                } else if (rc > 0) {    /* greater than */
                        spin_lock_irq(shost->host_lock);
                        vport->fc_flag |= FC_PT2PT_PLOGI;
@@ -5838,8 +5917,12 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                        vport->fc_myDID = vport->fc_prevDID;
                        if (phba->sli_rev < LPFC_SLI_REV4)
                                lpfc_issue_fabric_reglogin(vport);
-                       else
+                       else {
+                               lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                                       "3138 Need register VFI: (x%x/%x)\n",
+                                       vport->fc_prevDID, vport->fc_myDID);
                                lpfc_issue_reg_vfi(vport);
+                       }
                }
        }
        return 0;
@@ -6595,56 +6678,6 @@ dropit:
        phba->fc_stat.elsRcvDrop++;
 }
 
-/**
- * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
- * @phba: pointer to lpfc hba data structure.
- * @vpi: host virtual N_Port identifier.
- *
- * This routine finds a vport on a HBA (referred by @phba) through a
- * @vpi. The function walks the HBA's vport list and returns the address
- * of the vport with the matching @vpi.
- *
- * Return code
- *    NULL - No vport with the matching @vpi found
- *    Otherwise - Address to the vport with the matching @vpi.
- **/
-struct lpfc_vport *
-lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
-{
-       struct lpfc_vport *vport;
-       unsigned long flags;
-       int i = 0;
-
-       /* The physical ports are always vpi 0 - translate is unnecessary. */
-       if (vpi > 0) {
-               /*
-                * Translate the physical vpi to the logical vpi.  The
-                * vport stores the logical vpi.
-                */
-               for (i = 0; i < phba->max_vpi; i++) {
-                       if (vpi == phba->vpi_ids[i])
-                               break;
-               }
-
-               if (i >= phba->max_vpi) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
-                                        "2936 Could not find Vport mapped "
-                                        "to vpi %d\n", vpi);
-                       return NULL;
-               }
-       }
-
-       spin_lock_irqsave(&phba->hbalock, flags);
-       list_for_each_entry(vport, &phba->port_list, listentry) {
-               if (vport->vpi == i) {
-                       spin_unlock_irqrestore(&phba->hbalock, flags);
-                       return vport;
-               }
-       }
-       spin_unlock_irqrestore(&phba->hbalock, flags);
-       return NULL;
-}
-
 /**
  * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
  * @phba: pointer to lpfc hba data structure.
@@ -7281,6 +7314,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        /* Setup CSPs accordingly for Fabric */
        sp->cmn.e_d_tov = 0;
        sp->cmn.w2.r_a_tov = 0;
+       sp->cmn.virtual_fabric_support = 0;
        sp->cls1.classValid = 0;
        sp->cls2.seqDelivery = 1;
        sp->cls3.seqDelivery = 1;
index 091f68e5cb70a0e4a327073943d66de63de43446..678a4b11059c3ad7feccadf7a8428d6436a3e218 100644 (file)
@@ -1074,6 +1074,12 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 
        mempool_free(pmb, phba->mbox_mem_pool);
 
+       /* don't perform discovery for SLI4 loopback diagnostic test */
+       if ((phba->sli_rev == LPFC_SLI_REV4) &&
+           !(phba->hba_flag & HBA_FCOE_MODE) &&
+           (phba->link_flag & LS_LOOPBACK_MODE))
+               return;
+
        if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
            vport->fc_flag & FC_PUBLIC_LOOP &&
            !(vport->fc_flag & FC_LBIT)) {
@@ -2646,9 +2652,14 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 {
        struct lpfc_vport *vport = mboxq->vport;
 
-       /* VFI not supported on interface type 0, just do the flogi */
-       if (mboxq->u.mb.mbxStatus && (bf_get(lpfc_sli_intf_if_type,
-           &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_0)) {
+       /*
+        * VFI not supported on interface type 0, just do the flogi
+        * Also continue if the VFI is in use - just use the same one.
+        */
+       if (mboxq->u.mb.mbxStatus &&
+           (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+                       LPFC_SLI_INTF_IF_TYPE_0) &&
+           mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
                lpfc_printf_vlog(vport, KERN_ERR,
                                LOG_MBOX,
                                "2891 Init VFI mailbox failed 0x%x\n",
@@ -2842,10 +2853,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                        lpfc_disc_list_loopmap(vport);
                        /* Start discovery */
                        lpfc_disc_start(vport);
-                       goto fail_free_mem;
+                       goto out_free_mem;
                }
                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
-               goto fail_free_mem;
+               goto out_free_mem;
        }
        /* The VPI is implicitly registered when the VFI is registered */
        spin_lock_irq(shost->host_lock);
@@ -2855,10 +2866,16 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
        vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
        spin_unlock_irq(shost->host_lock);
 
+       /* In case SLI4 FC loopback test, we are ready */
+       if ((phba->sli_rev == LPFC_SLI_REV4) &&
+           (phba->link_flag & LS_LOOPBACK_MODE)) {
+               phba->link_state = LPFC_HBA_READY;
+               goto out_free_mem;
+       }
+
        if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
                /* For private loop just start discovery and we are done. */
                if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
-                   (phba->alpa_map[0] == 0) &&
                    !(vport->fc_flag & FC_PUBLIC_LOOP)) {
                        /* Use loop map to make discovery list */
                        lpfc_disc_list_loopmap(vport);
@@ -2870,7 +2887,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                }
        }
 
-fail_free_mem:
+out_free_mem:
        mempool_free(mboxq, phba->mbox_mem_pool);
        lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
        kfree(dmabuf);
@@ -2923,6 +2940,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
 {
        struct lpfc_vport *vport = phba->pport;
        LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
+       struct Scsi_Host *shost;
        int i;
        struct lpfc_dmabuf *mp;
        int rc;
@@ -2946,6 +2964,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
        phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
        phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
 
+       shost = lpfc_shost_from_vport(vport);
        if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
                phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
 
@@ -2957,8 +2976,11 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
                                "1309 Link Up Event npiv not supported in loop "
                                "topology\n");
                                /* Get Loop Map information */
-               if (bf_get(lpfc_mbx_read_top_il, la))
+               if (bf_get(lpfc_mbx_read_top_il, la)) {
+                       spin_lock_irq(shost->host_lock);
                        vport->fc_flag |= FC_LBIT;
+                       spin_unlock_irq(shost->host_lock);
+               }
 
                vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
                i = la->lilpBde64.tus.f.bdeSize;
@@ -3003,11 +3025,13 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
        } else {
                if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
                        if (phba->max_vpi && phba->cfg_enable_npiv &&
-                          (phba->sli_rev == 3))
+                          (phba->sli_rev >= LPFC_SLI_REV3))
                                phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
                }
                vport->fc_myDID = phba->fc_pref_DID;
+               spin_lock_irq(shost->host_lock);
                vport->fc_flag |= FC_LBIT;
+               spin_unlock_irq(shost->host_lock);
        }
        spin_unlock_irq(&phba->hbalock);
 
@@ -3224,15 +3248,14 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        } else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
                   LPFC_ATT_LINK_DOWN) {
                phba->fc_stat.LinkDown++;
-               if (phba->link_flag & LS_LOOPBACK_MODE) {
+               if (phba->link_flag & LS_LOOPBACK_MODE)
                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
                                "1308 Link Down Event in loop back mode "
                                "x%x received "
                                "Data: x%x x%x x%x\n",
                                la->eventTag, phba->fc_eventTag,
                                phba->pport->port_state, vport->fc_flag);
-               }
-               else {
+               else
                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
                                "1305 Link Down Event x%x received "
                                "Data: x%x x%x x%x x%x x%x\n",
@@ -3240,7 +3263,6 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                                phba->pport->port_state, vport->fc_flag,
                                bf_get(lpfc_mbx_read_top_mm, la),
                                bf_get(lpfc_mbx_read_top_fa, la));
-               }
                lpfc_mbx_issue_link_down(phba);
        }
        if ((bf_get(lpfc_mbx_read_top_mm, la)) &&
@@ -3594,6 +3616,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        MAILBOX_t *mb = &pmb->u.mb;
        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
        struct lpfc_nodelist *ndlp;
+       struct Scsi_Host *shost;
 
        ndlp = (struct lpfc_nodelist *) pmb->context2;
        pmb->context1 = NULL;
@@ -3639,8 +3662,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                 * vport discovery */
                if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
                        lpfc_start_fdiscs(phba);
-               else
+               else {
+                       shost = lpfc_shost_from_vport(vport);
+                       spin_lock_irq(shost->host_lock);
                        vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
+                       spin_unlock_irq(shost->host_lock);
+               }
                lpfc_do_scr_ns_plogi(phba, vport);
        }
 
@@ -5353,6 +5380,73 @@ lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
        return ndlp;
 }
 
+/*
+ * This routine looks up the ndlp lists for the given RPI. If the rpi
+ * is found, the routine returns the node element list pointer else
+ * return NULL.
+ */
+struct lpfc_nodelist *
+lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
+{
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+       struct lpfc_nodelist *ndlp;
+
+       spin_lock_irq(shost->host_lock);
+       ndlp = __lpfc_findnode_rpi(vport, rpi);
+       spin_unlock_irq(shost->host_lock);
+       return ndlp;
+}
+
+/**
+ * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: the physical host virtual N_Port identifier.
+ *
+ * This routine finds a vport on a HBA (referred by @phba) through a
+ * @vpi. The function walks the HBA's vport list and returns the address
+ * of the vport with the matching @vpi.
+ *
+ * Return code
+ *    NULL - No vport with the matching @vpi found
+ *    Otherwise - Address to the vport with the matching @vpi.
+ **/
+struct lpfc_vport *
+lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
+{
+       struct lpfc_vport *vport;
+       unsigned long flags;
+       int i = 0;
+
+       /* The physical ports are always vpi 0 - translate is unnecessary. */
+       if (vpi > 0) {
+               /*
+                * Translate the physical vpi to the logical vpi.  The
+                * vport stores the logical vpi.
+                */
+               for (i = 0; i < phba->max_vpi; i++) {
+                       if (vpi == phba->vpi_ids[i])
+                               break;
+               }
+
+               if (i >= phba->max_vpi) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+                                        "2936 Could not find Vport mapped "
+                                        "to vpi %d\n", vpi);
+                       return NULL;
+               }
+       }
+
+       spin_lock_irqsave(&phba->hbalock, flags);
+       list_for_each_entry(vport, &phba->port_list, listentry) {
+               if (vport->vpi == i) {
+                       spin_unlock_irqrestore(&phba->hbalock, flags);
+                       return vport;
+               }
+       }
+       spin_unlock_irqrestore(&phba->hbalock, flags);
+       return NULL;
+}
+
 void
 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
              uint32_t did)
@@ -5599,7 +5693,7 @@ out:
  *
  * This function frees memory associated with the mailbox command.
  */
-static void
+void
 lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 {
        struct lpfc_vport *vport = mboxq->vport;
@@ -5651,7 +5745,6 @@ lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 int
 lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
 {
-       LPFC_MBOXQ_t *mbox;
        struct lpfc_vport **vports;
        struct lpfc_nodelist *ndlp;
        struct Scsi_Host *shost;
@@ -5687,35 +5780,9 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
        /* Cleanup any outstanding ELS commands */
        lpfc_els_flush_all_cmd(phba);
 
-       /* Unregister VFI */
-       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-       if (!mbox) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
-                               "2556 UNREG_VFI mbox allocation failed"
-                               "HBA state x%x\n", phba->pport->port_state);
-               return -ENOMEM;
-       }
-
-       lpfc_unreg_vfi(mbox, phba->pport);
-       mbox->vport = phba->pport;
-       mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
-
-       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
-       if (rc == MBX_NOT_FINISHED) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
-                               "2557 UNREG_VFI issue mbox failed rc x%x "
-                               "HBA state x%x\n",
-                               rc, phba->pport->port_state);
-               mempool_free(mbox, phba->mbox_mem_pool);
-               return -EIO;
-       }
-
-       shost = lpfc_shost_from_vport(phba->pport);
-       spin_lock_irq(shost->host_lock);
-       phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
-       spin_unlock_irq(shost->host_lock);
-
-       return 0;
+       /* Unregister the physical port VFI */
+       rc = lpfc_issue_unreg_vfi(phba->pport);
+       return rc;
 }
 
 /**
index 046edc4ab35f7a858dc6c536b06c41c2cb3defe8..7245bead3755a03fd3e2f17a1d65034b1b98d348 100644 (file)
@@ -349,6 +349,12 @@ struct csp {
  * Word 1 Bit 31 in FLOGI response is clean address bit
  */
 #define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */
+/*
+ * Word 1 Bit 30 in common service parameter is overloaded.
+ * Word 1 Bit 30 in FLOGI request is Virtual Fabrics
+ * Word 1 Bit 30 in PLOGI request is random offset
+ */
+#define virtual_fabric_support randomOffset /* Word 1, bit 30 */
 #ifdef __BIG_ENDIAN_BITFIELD
        uint16_t request_multiple_Nport:1;      /* FC Word 1, bit 31 */
        uint16_t randomOffset:1;        /* FC Word 1, bit 30 */
@@ -1852,8 +1858,8 @@ typedef struct {
        uint8_t fabric_AL_PA;   /* If using a Fabric Assigned AL_PA */
 #endif
 
-#define FLAGS_LOCAL_LB               0x01 /* link_flags (=1) ENDEC loopback */
 #define FLAGS_TOPOLOGY_MODE_LOOP_PT  0x00 /* Attempt loop then pt-pt */
+#define FLAGS_LOCAL_LB               0x01 /* link_flags (=1) ENDEC loopback */
 #define FLAGS_TOPOLOGY_MODE_PT_PT    0x02 /* Attempt pt-pt only */
 #define FLAGS_TOPOLOGY_MODE_LOOP     0x04 /* Attempt loop only */
 #define FLAGS_TOPOLOGY_MODE_PT_LOOP  0x06 /* Attempt pt-pt then loop */
@@ -2819,7 +2825,8 @@ typedef struct {
 #ifdef __BIG_ENDIAN_BITFIELD
        uint32_t rsvd1     : 19;  /* Reserved                             */
        uint32_t cdss      :  1;  /* Configure Data Security SLI          */
-       uint32_t rsvd2     :  3;  /* Reserved                             */
+       uint32_t casabt    :  1;  /* Configure async abts status notice   */
+       uint32_t rsvd2     :  2;  /* Reserved                             */
        uint32_t cbg       :  1;  /* Configure BlockGuard                 */
        uint32_t cmv       :  1;  /* Configure Max VPIs                   */
        uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
@@ -2839,14 +2846,16 @@ typedef struct {
        uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
        uint32_t cmv       :  1;  /* Configure Max VPIs                   */
        uint32_t cbg       :  1;  /* Configure BlockGuard                 */
-       uint32_t rsvd2     :  3;  /* Reserved                             */
+       uint32_t rsvd2     :  2;  /* Reserved                             */
+       uint32_t casabt    :  1;  /* Configure async abts status notice   */
        uint32_t cdss      :  1;  /* Configure Data Security SLI          */
        uint32_t rsvd1     : 19;  /* Reserved                             */
 #endif
 #ifdef __BIG_ENDIAN_BITFIELD
        uint32_t rsvd3     : 19;  /* Reserved                             */
        uint32_t gdss      :  1;  /* Configure Data Security SLI          */
-       uint32_t rsvd4     :  3;  /* Reserved                             */
+       uint32_t gasabt    :  1;  /* Grant async abts status notice       */
+       uint32_t rsvd4     :  2;  /* Reserved                             */
        uint32_t gbg       :  1;  /* Grant BlockGuard                     */
        uint32_t gmv       :  1;  /* Grant Max VPIs                       */
        uint32_t gcrp      :  1;  /* Grant Command Ring Polling           */
@@ -2866,7 +2875,8 @@ typedef struct {
        uint32_t gcrp      :  1;  /* Grant Command Ring Polling           */
        uint32_t gmv       :  1;  /* Grant Max VPIs                       */
        uint32_t gbg       :  1;  /* Grant BlockGuard                     */
-       uint32_t rsvd4     :  3;  /* Reserved                             */
+       uint32_t rsvd4     :  2;  /* Reserved                             */
+       uint32_t gasabt    :  1;  /* Grant async abts status notice       */
        uint32_t gdss      :  1;  /* Configure Data Security SLI          */
        uint32_t rsvd3     : 19;  /* Reserved                             */
 #endif
@@ -3465,6 +3475,7 @@ typedef struct {
 } ASYNCSTAT_FIELDS;
 #define ASYNC_TEMP_WARN                0x100
 #define ASYNC_TEMP_SAFE                0x101
+#define ASYNC_STATUS_CN                0x102
 
 /* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7)
    or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
index 98d21521f5391b149d41d709c28bcb4b0db79e93..e5bfa7f334e38c9a5f872225cd27bb82f9ac84fb 100644 (file)
@@ -1351,11 +1351,11 @@ struct lpfc_mbx_set_link_diag_loopback {
                struct {
                        uint32_t word0;
 #define lpfc_mbx_set_diag_lpbk_type_SHIFT      0
-#define lpfc_mbx_set_diag_lpbk_type_MASK       0x00000001
+#define lpfc_mbx_set_diag_lpbk_type_MASK       0x00000003
 #define lpfc_mbx_set_diag_lpbk_type_WORD       word0
 #define LPFC_DIAG_LOOPBACK_TYPE_DISABLE                0x0
 #define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL       0x1
-#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL       0x2
+#define LPFC_DIAG_LOOPBACK_TYPE_SERDES         0x2
 #define lpfc_mbx_set_diag_lpbk_link_num_SHIFT  16
 #define lpfc_mbx_set_diag_lpbk_link_num_MASK   0x0000003F
 #define lpfc_mbx_set_diag_lpbk_link_num_WORD   word0
@@ -1830,6 +1830,8 @@ struct lpfc_mbx_init_vfi {
 #define lpfc_init_vfi_hop_count_MASK   0x000000FF
 #define lpfc_init_vfi_hop_count_WORD   word4
 };
+#define MBX_VFI_IN_USE                 0x9F02
+
 
 struct lpfc_mbx_reg_vfi {
        uint32_t word1;
@@ -2104,6 +2106,8 @@ struct lpfc_mbx_read_config {
 #define lpfc_mbx_rd_conf_lnk_type_SHIFT                6
 #define lpfc_mbx_rd_conf_lnk_type_MASK         0x00000003
 #define lpfc_mbx_rd_conf_lnk_type_WORD         word2
+#define LPFC_LNK_TYPE_GE       0
+#define LPFC_LNK_TYPE_FC       1
 #define lpfc_mbx_rd_conf_lnk_ldv_SHIFT         8
 #define lpfc_mbx_rd_conf_lnk_ldv_MASK          0x00000001
 #define lpfc_mbx_rd_conf_lnk_ldv_WORD          word2
@@ -3320,6 +3324,9 @@ struct wqe_rctl_dfctl {
 #define wqe_la_SHIFT 3
 #define wqe_la_MASK  0x000000001
 #define wqe_la_WORD  word5
+#define wqe_xo_SHIFT   6
+#define wqe_xo_MASK    0x000000001
+#define wqe_xo_WORD    word5
 #define wqe_ls_SHIFT 7
 #define wqe_ls_MASK  0x000000001
 #define wqe_ls_WORD  word5
index 55bc4fc7376f5a753f08e2d8aa44b3846cc377e6..dfea2dada02c2aa798e19863f7949031a2133df6 100644 (file)
@@ -62,7 +62,6 @@ static int lpfc_post_rcv_buf(struct lpfc_hba *);
 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
 static int lpfc_setup_endian_order(struct lpfc_hba *);
-static int lpfc_sli4_read_config(struct lpfc_hba *);
 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
 static void lpfc_free_sgl_list(struct lpfc_hba *);
 static int lpfc_init_sgl_list(struct lpfc_hba *);
@@ -475,27 +474,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
        /* Get the default values for Model Name and Description */
        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
 
-       if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G)
-           || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G)
-               && !(phba->lmt & LMT_1Gb))
-           || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G)
-               && !(phba->lmt & LMT_2Gb))
-           || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G)
-               && !(phba->lmt & LMT_4Gb))
-           || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G)
-               && !(phba->lmt & LMT_8Gb))
-           || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G)
-               && !(phba->lmt & LMT_10Gb))
-           || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
-               && !(phba->lmt & LMT_16Gb))) {
-               /* Reset link speed to auto */
-               lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
-                       "1302 Invalid speed for this board: "
-                       "Reset link speed to auto: x%x\n",
-                       phba->cfg_link_speed);
-                       phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
-       }
-
        phba->link_state = LPFC_LINK_DOWN;
 
        /* Only process IOCBs on ELS ring till hba_state is READY */
@@ -585,28 +563,10 @@ lpfc_config_port_post(struct lpfc_hba *phba)
                        return -EIO;
                }
        } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
-               lpfc_init_link(phba, pmb, phba->cfg_topology,
-                       phba->cfg_link_speed);
-               pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-               lpfc_set_loopback_flag(phba);
-               rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
-               if (rc != MBX_SUCCESS) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "0454 Adapter failed to init, mbxCmd x%x "
-                               "INIT_LINK, mbxStatus x%x\n",
-                               mb->mbxCommand, mb->mbxStatus);
-
-                       /* Clear all interrupt enable conditions */
-                       writel(0, phba->HCregaddr);
-                       readl(phba->HCregaddr); /* flush */
-                       /* Clear all pending interrupts */
-                       writel(0xffffffff, phba->HAregaddr);
-                       readl(phba->HAregaddr); /* flush */
-                       phba->link_state = LPFC_HBA_ERROR;
-                       if (rc != MBX_BUSY)
-                               mempool_free(pmb, phba->mbox_mem_pool);
-                       return -EIO;
-               }
+               mempool_free(pmb, phba->mbox_mem_pool);
+               rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+               if (rc)
+                       return rc;
        }
        /* MBOX buffer will be freed in mbox compl */
        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -667,6 +627,28 @@ lpfc_config_port_post(struct lpfc_hba *phba)
  **/
 int
 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
+{
+       return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
+}
+
+/**
+ * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
+ * @phba: pointer to lpfc hba data structure.
+ * @fc_topology: desired fc topology.
+ * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
+ *
+ * This routine will issue the INIT_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use as a delayed link up mechanism with the
+ * module parameter lpfc_suppress_link_up.
+ *
+ * Return code
+ *              0 - success
+ *              Any other value - error
+ **/
+int
+lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
+                              uint32_t flag)
 {
        struct lpfc_vport *vport = phba->pport;
        LPFC_MBOXQ_t *pmb;
@@ -681,9 +663,30 @@ lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
        mb = &pmb->u.mb;
        pmb->vport = vport;
 
-       lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
+       if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
+           ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
+            !(phba->lmt & LMT_1Gb)) ||
+           ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
+            !(phba->lmt & LMT_2Gb)) ||
+           ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
+            !(phba->lmt & LMT_4Gb)) ||
+           ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
+            !(phba->lmt & LMT_8Gb)) ||
+           ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
+            !(phba->lmt & LMT_10Gb)) ||
+           ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
+            !(phba->lmt & LMT_16Gb))) {
+               /* Reset link speed to auto */
+               lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                       "1302 Invalid speed for this board:%d "
+                       "Reset link speed to auto.\n",
+                       phba->cfg_link_speed);
+                       phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
+       }
+       lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
        pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-       lpfc_set_loopback_flag(phba);
+       if (phba->sli_rev < LPFC_SLI_REV4)
+               lpfc_set_loopback_flag(phba);
        rc = lpfc_sli_issue_mbox(phba, pmb, flag);
        if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1437,7 +1440,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
        uint32_t event_data;
        struct Scsi_Host *shost;
        uint32_t if_type;
-       struct lpfc_register portstat_reg;
+       struct lpfc_register portstat_reg = {0};
+       uint32_t reg_err1, reg_err2;
+       uint32_t uerrlo_reg, uemasklo_reg;
+       uint32_t pci_rd_rc1, pci_rd_rc2;
        int rc;
 
        /* If the pci channel is offline, ignore possible errors, since
@@ -1449,38 +1455,52 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
        if (!phba->cfg_enable_hba_reset)
                return;
 
-       /* Send an internal error event to mgmt application */
-       lpfc_board_errevt_to_mgmt(phba);
-
-       /* For now, the actual action for SLI4 device handling is not
-        * specified yet, just treated it as adaptor hardware failure
-        */
-       event_data = FC_REG_DUMP_EVENT;
-       shost = lpfc_shost_from_vport(vport);
-       fc_host_post_vendor_event(shost, fc_get_event_number(),
-                                 sizeof(event_data), (char *) &event_data,
-                                 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
-
        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
        switch (if_type) {
        case LPFC_SLI_INTF_IF_TYPE_0:
+               pci_rd_rc1 = lpfc_readl(
+                               phba->sli4_hba.u.if_type0.UERRLOregaddr,
+                               &uerrlo_reg);
+               pci_rd_rc2 = lpfc_readl(
+                               phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
+                               &uemasklo_reg);
+               /* consider PCI bus read error as pci_channel_offline */
+               if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
+                       return;
                lpfc_sli4_offline_eratt(phba);
                break;
        case LPFC_SLI_INTF_IF_TYPE_2:
-               portstat_reg.word0 =
-                       readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
-
+               pci_rd_rc1 = lpfc_readl(
+                               phba->sli4_hba.u.if_type2.STATUSregaddr,
+                               &portstat_reg.word0);
+               /* consider PCI bus read error as pci_channel_offline */
+               if (pci_rd_rc1 == -EIO)
+                       return;
+               reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
+               reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
                if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
                        /* TODO: Register for Overtemp async events. */
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "2889 Port Overtemperature event, "
-                               "taking port\n");
+                               "taking port offline\n");
                        spin_lock_irq(&phba->hbalock);
                        phba->over_temp_state = HBA_OVER_TEMP;
                        spin_unlock_irq(&phba->hbalock);
                        lpfc_sli4_offline_eratt(phba);
-                       return;
+                       break;
                }
+               if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+                   reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "3143 Port Down: Firmware Restarted\n");
+               else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+                        reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "3144 Port Down: Debug Dump\n");
+               else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+                        reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "3145 Port Down: Provisioning\n");
                /*
                 * On error status condition, driver need to wait for port
                 * ready before performing reset.
@@ -1489,14 +1509,19 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
                if (!rc) {
                        /* need reset: attempt for port recovery */
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "2887 Port Error: Attempting "
-                                       "Port Recovery\n");
+                                       "2887 Reset Needed: Attempting Port "
+                                       "Recovery...\n");
                        lpfc_offline_prep(phba);
                        lpfc_offline(phba);
                        lpfc_sli_brdrestart(phba);
                        if (lpfc_online(phba) == 0) {
                                lpfc_unblock_mgmt_io(phba);
-                               return;
+                               /* don't report event on forced debug dump */
+                               if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+                                   reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+                                       return;
+                               else
+                                       break;
                        }
                        /* fall through for not able to recover */
                }
@@ -1506,6 +1531,16 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
        default:
                break;
        }
+       lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                       "3123 Report dump event to upper layer\n");
+       /* Send an internal error event to mgmt application */
+       lpfc_board_errevt_to_mgmt(phba);
+
+       event_data = FC_REG_DUMP_EVENT;
+       shost = lpfc_shost_from_vport(vport);
+       fc_host_post_vendor_event(shost, fc_get_event_number(),
+                                 sizeof(event_data), (char *) &event_data,
+                                 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
 }
 
 /**
@@ -2673,6 +2708,32 @@ lpfc_offline(struct lpfc_hba *phba)
        lpfc_destroy_vport_work_array(phba, vports);
 }
 
+/**
+ * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine goes through all the scsi buffers in the system and updates the
+ * Physical XRIs assigned to the SCSI buffer because these may change after any
+ * firmware reset
+ *
+ * Return codes
+ *   0 - successful (for now, it always returns 0)
+ **/
+int
+lpfc_scsi_buf_update(struct lpfc_hba *phba)
+{
+       struct lpfc_scsi_buf *sb, *sb_next;
+
+       spin_lock_irq(&phba->hbalock);
+       spin_lock(&phba->scsi_buf_list_lock);
+       list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list)
+               sb->cur_iocbq.sli4_xritag =
+                       phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
+       spin_unlock(&phba->scsi_buf_list_lock);
+       spin_unlock_irq(&phba->hbalock);
+       return 0;
+}
+
 /**
  * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
  * @phba: pointer to lpfc hba data structure.
@@ -5040,15 +5101,8 @@ lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
        struct lpfc_rpi_hdr *rpi_hdr;
 
        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
-       /*
-        * If the SLI4 port supports extents, posting the rpi header isn't
-        * required.  Set the expected maximum count and let the actual value
-        * get set when extents are fully allocated.
-        */
-       if (!phba->sli4_hba.rpi_hdrs_in_use) {
-               phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+       if (!phba->sli4_hba.rpi_hdrs_in_use)
                return rc;
-       }
        if (phba->sli4_hba.extents_in_use)
                return -EIO;
 
@@ -5942,7 +5996,7 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
  *     -ENOMEM - No available memory
  *      -EIO - The mailbox failed to complete successfully.
  **/
-static int
+int
 lpfc_sli4_read_config(struct lpfc_hba *phba)
 {
        LPFC_MBOXQ_t *pmb;
@@ -5974,6 +6028,20 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                rc = -EIO;
        } else {
                rd_config = &pmb->u.mqe.un.rd_config;
+               if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
+                       phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
+                       phba->sli4_hba.lnk_info.lnk_tp =
+                               bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
+                       phba->sli4_hba.lnk_info.lnk_no =
+                               bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
+                       lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+                                       "3081 lnk_type:%d, lnk_numb:%d\n",
+                                       phba->sli4_hba.lnk_info.lnk_tp,
+                                       phba->sli4_hba.lnk_info.lnk_no);
+               } else
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                                       "3082 Mailbox (x%x) returned ldv:x0\n",
+                                       bf_get(lpfc_mqe_command, &pmb->u.mqe));
                phba->sli4_hba.extents_in_use =
                        bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
                phba->sli4_hba.max_cfg_param.max_xri =
@@ -6462,6 +6530,7 @@ out_free_fcp_wq:
                phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
        }
        kfree(phba->sli4_hba.fcp_wq);
+       phba->sli4_hba.fcp_wq = NULL;
 out_free_els_wq:
        lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
        phba->sli4_hba.els_wq = NULL;
@@ -6474,6 +6543,7 @@ out_free_fcp_cq:
                phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
        }
        kfree(phba->sli4_hba.fcp_cq);
+       phba->sli4_hba.fcp_cq = NULL;
 out_free_els_cq:
        lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
        phba->sli4_hba.els_cq = NULL;
@@ -6486,6 +6556,7 @@ out_free_fp_eq:
                phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
        }
        kfree(phba->sli4_hba.fp_eq);
+       phba->sli4_hba.fp_eq = NULL;
 out_free_sp_eq:
        lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
        phba->sli4_hba.sp_eq = NULL;
@@ -6519,8 +6590,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
        phba->sli4_hba.els_wq = NULL;
 
        /* Release FCP work queue */
-       for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
-               lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
+       if (phba->sli4_hba.fcp_wq != NULL)
+               for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
+                    fcp_qidx++)
+                       lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
        kfree(phba->sli4_hba.fcp_wq);
        phba->sli4_hba.fcp_wq = NULL;
 
@@ -6540,15 +6613,18 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
 
        /* Release FCP response complete queue */
        fcp_qidx = 0;
-       do
-               lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
-       while (++fcp_qidx < phba->cfg_fcp_eq_count);
+       if (phba->sli4_hba.fcp_cq != NULL)
+               do
+                       lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
+               while (++fcp_qidx < phba->cfg_fcp_eq_count);
        kfree(phba->sli4_hba.fcp_cq);
        phba->sli4_hba.fcp_cq = NULL;
 
        /* Release fast-path event queue */
-       for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
-               lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
+       if (phba->sli4_hba.fp_eq != NULL)
+               for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+                    fcp_qidx++)
+                       lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
        kfree(phba->sli4_hba.fp_eq);
        phba->sli4_hba.fp_eq = NULL;
 
@@ -6601,11 +6677,18 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                        phba->sli4_hba.sp_eq->queue_id);
 
        /* Set up fast-path event queue */
+       if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3147 Fast-path EQs not allocated\n");
+               rc = -ENOMEM;
+               goto out_destroy_sp_eq;
+       }
        for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
                if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0522 Fast-path EQ (%d) not "
                                        "allocated\n", fcp_eqidx);
+                       rc = -ENOMEM;
                        goto out_destroy_fp_eq;
                }
                rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
@@ -6630,6 +6713,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
        if (!phba->sli4_hba.mbx_cq) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0528 Mailbox CQ not allocated\n");
+               rc = -ENOMEM;
                goto out_destroy_fp_eq;
        }
        rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
@@ -6649,6 +6733,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
        if (!phba->sli4_hba.els_cq) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0530 ELS CQ not allocated\n");
+               rc = -ENOMEM;
                goto out_destroy_mbx_cq;
        }
        rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
@@ -6665,12 +6750,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                        phba->sli4_hba.sp_eq->queue_id);
 
        /* Set up fast-path FCP Response Complete Queue */
+       if (!phba->sli4_hba.fcp_cq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3148 Fast-path FCP CQ array not "
+                               "allocated\n");
+               rc = -ENOMEM;
+               goto out_destroy_els_cq;
+       }
        fcp_cqidx = 0;
        do {
                if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0526 Fast-path FCP CQ (%d) not "
                                        "allocated\n", fcp_cqidx);
+                       rc = -ENOMEM;
                        goto out_destroy_fcp_cq;
                }
                if (phba->cfg_fcp_eq_count)
@@ -6709,6 +6802,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
        if (!phba->sli4_hba.mbx_wq) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0538 Slow-path MQ not allocated\n");
+               rc = -ENOMEM;
                goto out_destroy_fcp_cq;
        }
        rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
@@ -6728,6 +6822,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
        if (!phba->sli4_hba.els_wq) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0536 Slow-path ELS WQ not allocated\n");
+               rc = -ENOMEM;
                goto out_destroy_mbx_wq;
        }
        rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
@@ -6744,11 +6839,19 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                        phba->sli4_hba.els_cq->queue_id);
 
        /* Set up fast-path FCP Work Queue */
+       if (!phba->sli4_hba.fcp_wq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3149 Fast-path FCP WQ array not "
+                               "allocated\n");
+               rc = -ENOMEM;
+               goto out_destroy_els_wq;
+       }
        for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
                if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0534 Fast-path FCP WQ (%d) not "
                                        "allocated\n", fcp_wqidx);
+                       rc = -ENOMEM;
                        goto out_destroy_fcp_wq;
                }
                rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
@@ -6779,6 +6882,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
        if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0540 Receive Queue not allocated\n");
+               rc = -ENOMEM;
                goto out_destroy_fcp_wq;
        }
 
@@ -6805,18 +6909,21 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
 out_destroy_fcp_wq:
        for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
                lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
+out_destroy_els_wq:
        lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
 out_destroy_mbx_wq:
        lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
 out_destroy_fcp_cq:
        for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
                lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
+out_destroy_els_cq:
        lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
 out_destroy_mbx_cq:
        lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
 out_destroy_fp_eq:
        for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
                lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
+out_destroy_sp_eq:
        lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
 out_error:
        return rc;
@@ -6853,13 +6960,18 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
        /* Unset ELS complete queue */
        lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
        /* Unset FCP response complete queue */
-       fcp_qidx = 0;
-       do {
-               lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
-       } while (++fcp_qidx < phba->cfg_fcp_eq_count);
+       if (phba->sli4_hba.fcp_cq) {
+               fcp_qidx = 0;
+               do {
+                       lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
+               } while (++fcp_qidx < phba->cfg_fcp_eq_count);
+       }
        /* Unset fast-path event queue */
-       for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
-               lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+       if (phba->sli4_hba.fp_eq) {
+               for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+                    fcp_qidx++)
+                       lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+       }
        /* Unset slow-path event queue */
        lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
 }
@@ -7398,22 +7510,25 @@ out:
 static void
 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
 {
-       struct pci_dev *pdev;
-
-       /* Obtain PCI device reference */
-       if (!phba->pcidev)
-               return;
-       else
-               pdev = phba->pcidev;
-
-       /* Free coherent DMA memory allocated */
-
-       /* Unmap I/O memory space */
-       iounmap(phba->sli4_hba.drbl_regs_memmap_p);
-       iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
-       iounmap(phba->sli4_hba.conf_regs_memmap_p);
+       uint32_t if_type;
+       if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
 
-       return;
+       switch (if_type) {
+       case LPFC_SLI_INTF_IF_TYPE_0:
+               iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+               iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+               iounmap(phba->sli4_hba.conf_regs_memmap_p);
+               break;
+       case LPFC_SLI_INTF_IF_TYPE_2:
+               iounmap(phba->sli4_hba.conf_regs_memmap_p);
+               break;
+       case LPFC_SLI_INTF_IF_TYPE_1:
+       default:
+               dev_printk(KERN_ERR, &phba->pcidev->dev,
+                          "FATAL - unsupported SLI4 interface type - %d\n",
+                          if_type);
+               break;
+       }
 }
 
 /**
@@ -9198,12 +9313,15 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        /* Perform post initialization setup */
        lpfc_post_init_setup(phba);
 
-       /* check for firmware upgrade or downgrade */
-       snprintf(file_name, 16, "%s.grp", phba->ModelName);
-       error = request_firmware(&fw, file_name, &phba->pcidev->dev);
-       if (!error) {
-               lpfc_write_firmware(phba, fw);
-               release_firmware(fw);
+       /* check for firmware upgrade or downgrade (if_type 2 only) */
+       if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+           LPFC_SLI_INTF_IF_TYPE_2) {
+               snprintf(file_name, 16, "%s.grp", phba->ModelName);
+               error = request_firmware(&fw, file_name, &phba->pcidev->dev);
+               if (!error) {
+                       lpfc_write_firmware(phba, fw);
+                       release_firmware(fw);
+               }
        }
 
        /* Check if there are static vports to be created. */
index 2ebc7d2540c0ea0183c5cb4be8e49725e23f4439..20336f09fb3cede76ddcc5e808a58d64c3e76b66 100644 (file)
@@ -1293,6 +1293,10 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                phba->sli_rev = LPFC_SLI_REV2;
        mb->un.varCfgPort.sli_mode = phba->sli_rev;
 
+       /* If this is an SLI3 port, configure async status notification. */
+       if (phba->sli_rev == LPFC_SLI_REV3)
+               mb->un.varCfgPort.casabt = 1;
+
        /* Now setup pcb */
        phba->pcb->type = TYPE_NATIVE_SLI2;
        phba->pcb->feature = FEATURE_INITIAL_SLI2;
@@ -2129,6 +2133,14 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
        reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
        reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
        bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
+                       "3134 Register VFI, mydid:x%x, fcfi:%d, "
+                       " vfi:%d, vpi:%d, fc_pname:%x%x\n",
+                       vport->fc_myDID,
+                       vport->phba->fcf.fcfi,
+                       vport->phba->sli4_hba.vfi_ids[vport->vfi],
+                       vport->phba->vpi_ids[vport->vpi],
+                       reg_vfi->wwn[0], reg_vfi->wwn[1]);
 }
 
 /**
@@ -2175,16 +2187,15 @@ lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
 }
 
 /**
- * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
+ * lpfc_sli4_dump_cfg_rg23 - Dump sli4 port config region 23
  * @phba: pointer to the hba structure containing.
  * @mbox: pointer to lpfc mbox command to initialize.
  *
- * This function create a SLI4 dump mailbox command to dump FCoE
- * parameters stored in region 23.
+ * This function create a SLI4 dump mailbox command to dump configure
+ * region 23.
  **/
 int
-lpfc_dump_fcoe_param(struct lpfc_hba *phba,
-               struct lpfcMboxq *mbox)
+lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
 {
        struct lpfc_dmabuf *mp = NULL;
        MAILBOX_t *mb;
@@ -2198,9 +2209,9 @@ lpfc_dump_fcoe_param(struct lpfc_hba *phba,
 
        if (!mp || !mp->virt) {
                kfree(mp);
-               /* dump_fcoe_param failed to allocate memory */
+               /* dump config region 23 failed to allocate memory */
                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
-                       "2569 lpfc_dump_fcoe_param: memory"
+                       "2569 lpfc dump config region 23: memory"
                        " allocation failed\n");
                return 1;
        }
index 10d5b5e4149999aeb493028c2ad0b6fba9885654..ade763d3930a7576e37b62285ab80b35869488aa 100644 (file)
@@ -389,7 +389,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
 {
        struct hbq_dmabuf *hbqbp;
 
-       hbqbp = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+       hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
        if (!hbqbp)
                return NULL;
 
@@ -441,7 +441,7 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
 {
        struct hbq_dmabuf *dma_buf;
 
-       dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+       dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
        if (!dma_buf)
                return NULL;
 
index 2ddd02f7c60336130e90f8a0eb5f0ba230484451..e8bb0055994316a790db82111163295efa5ba7be 100644 (file)
@@ -782,6 +782,14 @@ lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        return NLP_STE_FREED_NODE;
 }
 
+static uint32_t
+lpfc_device_recov_unused_node(struct lpfc_vport *vport,
+                       struct lpfc_nodelist *ndlp,
+                          void *arg, uint32_t evt)
+{
+       return ndlp->nlp_state;
+}
+
 static uint32_t
 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                           void *arg, uint32_t evt)
@@ -2147,7 +2155,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
        lpfc_disc_illegal,              /* CMPL_ADISC      */
        lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
        lpfc_device_rm_unused_node,     /* DEVICE_RM       */
-       lpfc_disc_illegal,              /* DEVICE_RECOVERY */
+       lpfc_device_recov_unused_node,  /* DEVICE_RECOVERY */
 
        lpfc_rcv_plogi_plogi_issue,     /* RCV_PLOGI   PLOGI_ISSUE    */
        lpfc_rcv_prli_plogi_issue,      /* RCV_PRLI        */
index 2e1e54e5c3ae89514ef1de659fe8963237ee41c9..c60f5d0b38696c022326c76e937f57e1d72a6626 100644 (file)
@@ -681,8 +681,10 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
 
                        rrq_empty = list_empty(&phba->active_rrq_list);
                        spin_unlock_irqrestore(&phba->hbalock, iflag);
-                       if (ndlp)
+                       if (ndlp) {
                                lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
+                               lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+                       }
                        lpfc_release_scsi_buf_s4(phba, psb);
                        if (rrq_empty)
                                lpfc_worker_wake_up(phba);
@@ -2911,8 +2913,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
        int_to_scsilun(lpfc_cmd->pCmd->device->lun,
                        &lpfc_cmd->fcp_cmnd->fcp_lun);
 
-       memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
-
+       memset(&fcp_cmnd->fcpCdb[0], 0, LPFC_FCP_CDB_LEN);
+       memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
        if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
                switch (tag[0]) {
                case HEAD_OF_QUEUE_TAG:
@@ -3236,6 +3238,15 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
                cmnd->result = err;
                goto out_fail_command;
        }
+       /*
+        * Do not let the mid-layer retry I/O too fast. If an I/O is retried
+        * without waiting a bit then indicate that the device is busy.
+        */
+       if (cmnd->retries &&
+           time_before(jiffies, (cmnd->jiffies_at_alloc +
+                                 msecs_to_jiffies(LPFC_RETRY_PAUSE *
+                                                  cmnd->retries))))
+               return SCSI_MLQUEUE_DEVICE_BUSY;
        ndlp = rdata->pnode;
 
        if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
index ce645b20a6ad3eda03f03c5ccc1059406476d1f2..9075a08cf78155af9ee0a635453bbd6ff9512f31 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/byteorder.h>
 
 struct lpfc_hba;
+#define LPFC_FCP_CDB_LEN 16
 
 #define list_remove_head(list, entry, type, member)            \
        do {                                                    \
@@ -102,7 +103,7 @@ struct fcp_cmnd {
 #define  WRITE_DATA      0x01  /* Bit 0 */
 #define  READ_DATA       0x02  /* Bit 1 */
 
-       uint8_t fcpCdb[16];     /* SRB cdb field is copied here */
+       uint8_t fcpCdb[LPFC_FCP_CDB_LEN]; /* SRB cdb field is copied here */
        uint32_t fcpDl;         /* Total transfer length */
 
 };
@@ -153,5 +154,5 @@ struct lpfc_scsi_buf {
 
 #define LPFC_SCSI_DMA_EXT_SIZE 264
 #define LPFC_BPL_SIZE          1024
-
+#define LPFC_RETRY_PAUSE       300
 #define MDAC_DIRECT_CMD                  0x22
index 4d4104f38c9897d6be6414abf3db11c318c68e79..23a27592388cac6d7c72c9ee10163a54b5882a27 100644 (file)
@@ -89,15 +89,20 @@ lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
 static uint32_t
 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
 {
-       union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
+       union lpfc_wqe *temp_wqe;
        struct lpfc_register doorbell;
        uint32_t host_index;
 
+       /* sanity check on queue memory */
+       if (unlikely(!q))
+               return -ENOMEM;
+       temp_wqe = q->qe[q->host_index].wqe;
+
        /* If the host has not yet processed the next entry then we are done */
        if (((q->host_index + 1) % q->entry_count) == q->hba_index)
                return -ENOMEM;
        /* set consumption flag every once in a while */
-       if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
+       if (!((q->host_index + 1) % q->entry_repost))
                bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
        if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
                bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
@@ -134,6 +139,10 @@ lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
 {
        uint32_t released = 0;
 
+       /* sanity check on queue memory */
+       if (unlikely(!q))
+               return 0;
+
        if (q->hba_index == index)
                return 0;
        do {
@@ -158,10 +167,15 @@ lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
 static uint32_t
 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
 {
-       struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
+       struct lpfc_mqe *temp_mqe;
        struct lpfc_register doorbell;
        uint32_t host_index;
 
+       /* sanity check on queue memory */
+       if (unlikely(!q))
+               return -ENOMEM;
+       temp_mqe = q->qe[q->host_index].mqe;
+
        /* If the host has not yet processed the next entry then we are done */
        if (((q->host_index + 1) % q->entry_count) == q->hba_index)
                return -ENOMEM;
@@ -195,6 +209,10 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
 static uint32_t
 lpfc_sli4_mq_release(struct lpfc_queue *q)
 {
+       /* sanity check on queue memory */
+       if (unlikely(!q))
+               return 0;
+
        /* Clear the mailbox pointer for completion */
        q->phba->mbox = NULL;
        q->hba_index = ((q->hba_index + 1) % q->entry_count);
@@ -213,7 +231,12 @@ lpfc_sli4_mq_release(struct lpfc_queue *q)
 static struct lpfc_eqe *
 lpfc_sli4_eq_get(struct lpfc_queue *q)
 {
-       struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
+       struct lpfc_eqe *eqe;
+
+       /* sanity check on queue memory */
+       if (unlikely(!q))
+               return NULL;
+       eqe = q->qe[q->hba_index].eqe;
 
        /* If the next EQE is not valid then we are done */
        if (!bf_get_le32(lpfc_eqe_valid, eqe))
@@ -248,6 +271,10 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
        struct lpfc_eqe *temp_eqe;
        struct lpfc_register doorbell;
 
+       /* sanity check on queue memory */
+       if (unlikely(!q))
+               return 0;
+
        /* while there are valid entries */
        while (q->hba_index != q->host_index) {
                temp_eqe = q->qe[q->host_index].eqe;
@@ -288,6 +315,10 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
 {
        struct lpfc_cqe *cqe;
 
+       /* sanity check on queue memory */
+       if (unlikely(!q))
+               return NULL;
+
        /* If the next CQE is not valid then we are done */
        if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
                return NULL;
@@ -322,6 +353,9 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
        struct lpfc_cqe *temp_qe;
        struct lpfc_register doorbell;
 
+       /* sanity check on queue memory */
+       if (unlikely(!q))
+               return 0;
        /* while there are valid entries */
        while (q->hba_index != q->host_index) {
                temp_qe = q->qe[q->host_index].cqe;
@@ -359,11 +393,17 @@ static int
 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
                 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
 {
-       struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
-       struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
+       struct lpfc_rqe *temp_hrqe;
+       struct lpfc_rqe *temp_drqe;
        struct lpfc_register doorbell;
        int put_index = hq->host_index;
 
+       /* sanity check on queue memory */
+       if (unlikely(!hq) || unlikely(!dq))
+               return -ENOMEM;
+       temp_hrqe = hq->qe[hq->host_index].rqe;
+       temp_drqe = dq->qe[dq->host_index].rqe;
+
        if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
                return -EINVAL;
        if (hq->host_index != dq->host_index)
@@ -402,6 +442,10 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
 static uint32_t
 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
 {
+       /* sanity check on queue memory */
+       if (unlikely(!hq) || unlikely(!dq))
+               return 0;
+
        if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
                return 0;
        hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
@@ -3575,8 +3619,8 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
  * lpfc_reset_barrier - Make HBA ready for HBA reset
  * @phba: Pointer to HBA context object.
  *
- * This function is called before resetting an HBA. This
- * function requests HBA to quiesce DMAs before a reset.
+ * This function is called before resetting an HBA. This function is called
+ * with hbalock held and requests HBA to quiesce DMAs before a reset.
  **/
 void lpfc_reset_barrier(struct lpfc_hba *phba)
 {
@@ -3851,7 +3895,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
 {
        struct lpfc_sli *psli = &phba->sli;
        uint16_t cfg_value;
-       uint8_t qindx;
 
        /* Reset HBA */
        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3867,19 +3910,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
        spin_lock_irq(&phba->hbalock);
        psli->sli_flag &= ~(LPFC_PROCESS_LA);
        phba->fcf.fcf_flag = 0;
-       /* Clean up the child queue list for the CQs */
-       list_del_init(&phba->sli4_hba.mbx_wq->list);
-       list_del_init(&phba->sli4_hba.els_wq->list);
-       list_del_init(&phba->sli4_hba.hdr_rq->list);
-       list_del_init(&phba->sli4_hba.dat_rq->list);
-       list_del_init(&phba->sli4_hba.mbx_cq->list);
-       list_del_init(&phba->sli4_hba.els_cq->list);
-       for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
-               list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
-       qindx = 0;
-       do
-               list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
-       while (++qindx < phba->cfg_fcp_eq_count);
        spin_unlock_irq(&phba->hbalock);
 
        /* Now physically reset the device */
@@ -3892,6 +3922,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
                              ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
 
        /* Perform FCoE PCI function reset */
+       lpfc_sli4_queue_destroy(phba);
        lpfc_pci_function_reset(phba);
 
        /* Restore PCI cmd register */
@@ -4339,6 +4370,11 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
                        phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
                        spin_unlock_irq(&phba->hbalock);
                        done = 1;
+
+                       if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
+                           (pmb->u.mb.un.varCfgPort.gasabt == 0))
+                               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                                       "3110 Port did not grant ASABT\n");
                }
        }
        if (!done) {
@@ -4551,9 +4587,9 @@ lpfc_sli_hba_setup_error:
  * data structure.
  **/
 static int
-lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
-               LPFC_MBOXQ_t *mboxq)
+lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
 {
+       LPFC_MBOXQ_t *mboxq;
        struct lpfc_dmabuf *mp;
        struct lpfc_mqe *mqe;
        uint32_t data_length;
@@ -4565,10 +4601,16 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
        phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
        phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
 
-       mqe = &mboxq->u.mqe;
-       if (lpfc_dump_fcoe_param(phba, mboxq))
+       mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq)
                return -ENOMEM;
 
+       mqe = &mboxq->u.mqe;
+       if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
+               rc = -ENOMEM;
+               goto out_free_mboxq;
+       }
+
        mp = (struct lpfc_dmabuf *) mboxq->context1;
        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
 
@@ -4596,19 +4638,25 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
        if (rc) {
                lpfc_mbuf_free(phba, mp->virt, mp->phys);
                kfree(mp);
-               return -EIO;
+               rc = -EIO;
+               goto out_free_mboxq;
        }
        data_length = mqe->un.mb_words[5];
        if (data_length > DMP_RGN23_SIZE) {
                lpfc_mbuf_free(phba, mp->virt, mp->phys);
                kfree(mp);
-               return -EIO;
+               rc = -EIO;
+               goto out_free_mboxq;
        }
 
        lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
-       return 0;
+       rc = 0;
+
+out_free_mboxq:
+       mempool_free(mboxq, phba->mbox_mem_pool);
+       return rc;
 }
 
 /**
@@ -4706,7 +4754,6 @@ static int
 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
 {
        LPFC_MBOXQ_t *mboxq;
-       struct lpfc_mbx_read_config *rd_config;
        struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
        struct lpfc_controller_attribute *cntl_attr;
        struct lpfc_mbx_get_port_name *get_port_name;
@@ -4724,33 +4771,11 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
        mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!mboxq)
                return -ENOMEM;
-
        /* obtain link type and link number via READ_CONFIG */
-       lpfc_read_config(phba, mboxq);
-       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
-       if (rc == MBX_SUCCESS) {
-               rd_config = &mboxq->u.mqe.un.rd_config;
-               if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
-                       phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
-                       phba->sli4_hba.lnk_info.lnk_tp =
-                               bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
-                       phba->sli4_hba.lnk_info.lnk_no =
-                               bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
-                       lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-                                       "3081 lnk_type:%d, lnk_numb:%d\n",
-                                       phba->sli4_hba.lnk_info.lnk_tp,
-                                       phba->sli4_hba.lnk_info.lnk_no);
-                       goto retrieve_ppname;
-               } else
-                       lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
-                                       "3082 Mailbox (x%x) returned ldv:x0\n",
-                                       bf_get(lpfc_mqe_command,
-                                              &mboxq->u.mqe));
-       } else
-               lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
-                               "3083 Mailbox (x%x) failed, status:x%x\n",
-                               bf_get(lpfc_mqe_command, &mboxq->u.mqe),
-                               bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+       phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
+       lpfc_sli4_read_config(phba);
+       if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
+               goto retrieve_ppname;
 
        /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
        reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
@@ -4875,14 +4900,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
        lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
        lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
        fcp_eqidx = 0;
-       do
-               lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
-                                    LPFC_QUEUE_REARM);
-       while (++fcp_eqidx < phba->cfg_fcp_eq_count);
+       if (phba->sli4_hba.fcp_cq) {
+               do
+                       lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
+                                            LPFC_QUEUE_REARM);
+               while (++fcp_eqidx < phba->cfg_fcp_eq_count);
+       }
        lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
-       for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
-               lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
-                                    LPFC_QUEUE_REARM);
+       if (phba->sli4_hba.fp_eq) {
+               for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count;
+                    fcp_eqidx++)
+                       lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
+                                            LPFC_QUEUE_REARM);
+       }
 }
 
 /**
@@ -5457,6 +5487,8 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
        uint16_t count, base;
        unsigned long longs;
 
+       if (!phba->sli4_hba.rpi_hdrs_in_use)
+               phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
        if (phba->sli4_hba.extents_in_use) {
                /*
                 * The port supports resource extents. The XRI, VPI, VFI, RPI
@@ -5538,9 +5570,10 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
                 * need any action - just exit.
                 */
                if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
-                   LPFC_IDX_RSRC_RDY)
-                       return 0;
-
+                   LPFC_IDX_RSRC_RDY) {
+                       lpfc_sli4_dealloc_resource_identifiers(phba);
+                       lpfc_sli4_remove_rpis(phba);
+               }
                /* RPIs. */
                count = phba->sli4_hba.max_cfg_param.max_rpi;
                base = phba->sli4_hba.max_cfg_param.rpi_base;
@@ -5880,14 +5913,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
        if (!mboxq)
                return -ENOMEM;
 
-       /*
-        * Continue initialization with default values even if driver failed
-        * to read FCoE param config regions
-        */
-       if (lpfc_sli4_read_fcoe_params(phba, mboxq))
-               lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
-                       "2570 Failed to read FCoE parameters\n");
-
        /* Issue READ_REV to collect vpd and FW information. */
        vpd_size = SLI4_PAGE_SIZE;
        vpd = kzalloc(vpd_size, GFP_KERNEL);
@@ -5924,6 +5949,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
                goto out_free_mbox;
        }
 
+       /*
+        * Continue initialization with default values even if driver failed
+        * to read FCoE param config regions, only read parameters if the
+        * board is FCoE
+        */
+       if (phba->hba_flag & HBA_FCOE_MODE &&
+           lpfc_sli4_read_fcoe_params(phba))
+               lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
+                       "2570 Failed to read FCoE parameters\n");
+
        /*
         * Retrieve sli4 device physical port name, failure of doing it
         * is considered as non-fatal.
@@ -6044,6 +6079,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
                                "rc = x%x\n", rc);
                goto out_free_mbox;
        }
+       /* update physical xri mappings in the scsi buffers */
+       lpfc_scsi_buf_update(phba);
 
        /* Read the port's service parameters. */
        rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
@@ -6205,7 +6242,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
                rc = 0;
                phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
                                        &mboxq->u.mqe.un.reg_fcfi);
+
+               /* Check if the port is configured to be disabled */
+               lpfc_sli_read_link_ste(phba);
        }
+
        /*
         * The port is ready, set the host's link state to LINK_DOWN
         * in preparation for link interrupts.
@@ -6213,10 +6254,25 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
        spin_lock_irq(&phba->hbalock);
        phba->link_state = LPFC_LINK_DOWN;
        spin_unlock_irq(&phba->hbalock);
-       if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
-               rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
-               if (rc)
+       if (!(phba->hba_flag & HBA_FCOE_MODE) &&
+           (phba->hba_flag & LINK_DISABLED)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
+                               "3103 Adapter Link is disabled.\n");
+               lpfc_down_link(phba, mboxq);
+               rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+               if (rc != MBX_SUCCESS) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
+                                       "3104 Adapter failed to issue "
+                                       "DOWN_LINK mbox cmd, rc:x%x\n", rc);
                        goto out_unset_queue;
+               }
+       } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
+               /* don't perform init_link on SLI4 FC port loopback test */
+               if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
+                       rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+                       if (rc)
+                               goto out_unset_queue;
+               }
        }
        mempool_free(mboxq, phba->mbox_mem_pool);
        return rc;
@@ -7487,6 +7543,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
        struct ulp_bde64 *bpl = NULL;
        struct ulp_bde64 bde;
        struct sli4_sge *sgl  = NULL;
+       struct lpfc_dmabuf *dmabuf;
        IOCB_t *icmd;
        int numBdes = 0;
        int i = 0;
@@ -7505,9 +7562,12 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
                 * have not been byteswapped yet so there is no
                 * need to swap them back.
                 */
-               bpl  = (struct ulp_bde64 *)
-                       ((struct lpfc_dmabuf *)piocbq->context3)->virt;
+               if (piocbq->context3)
+                       dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
+               else
+                       return xritag;
 
+               bpl  = (struct ulp_bde64 *)dmabuf->virt;
                if (!bpl)
                        return xritag;
 
@@ -7616,6 +7676,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
        int numBdes, i;
        struct ulp_bde64 bde;
        struct lpfc_nodelist *ndlp;
+       uint32_t *pcmd;
+       uint32_t if_type;
 
        fip = phba->hba_flag & HBA_FIP_SUPPORT;
        /* The fcp commands will set command type */
@@ -7669,6 +7731,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                                iocbq->iocb.ulpCommand);
                        return IOCB_ERROR;
                }
+
                wqe->els_req.payload_len = xmit_len;
                /* Els_reguest64 has a TMO */
                bf_set(wqe_tmo, &wqe->els_req.wqe_com,
@@ -7683,9 +7746,28 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
                bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
                /* CCP CCPE PV PRI in word10 were set in the memcpy */
-               if (command_type == ELS_COMMAND_FIP) {
+               if (command_type == ELS_COMMAND_FIP)
                        els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
                                        >> LPFC_FIP_ELS_ID_SHIFT);
+               pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+                                       iocbq->context2)->virt);
+               if_type = bf_get(lpfc_sli_intf_if_type,
+                                       &phba->sli4_hba.sli_intf);
+               if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+                       if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
+                               *pcmd == ELS_CMD_SCR ||
+                               *pcmd == ELS_CMD_PLOGI)) {
+                               bf_set(els_req64_sp, &wqe->els_req, 1);
+                               bf_set(els_req64_sid, &wqe->els_req,
+                                       iocbq->vport->fc_myDID);
+                               bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
+                               bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+                                       phba->vpi_ids[phba->pport->vpi]);
+                       } else if (iocbq->context1) {
+                               bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
+                               bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+                                       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+                       }
                }
                bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
                       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
@@ -7704,6 +7786,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                /* The entire sequence is transmitted for this IOCB */
                xmit_len = total_len;
                cmnd = CMD_XMIT_SEQUENCE64_CR;
+               if (phba->link_flag & LS_LOOPBACK_MODE)
+                       bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
        case CMD_XMIT_SEQUENCE64_CR:
                /* word3 iocb=io_tag32 wqe=reserved */
                wqe->xmit_sequence.rsvd3 = 0;
@@ -7846,6 +7930,16 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
                bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
                       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+               pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+                                       iocbq->context2)->virt);
+               if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+                               bf_set(els_req64_sp, &wqe->els_req, 1);
+                               bf_set(els_req64_sid, &wqe->els_req,
+                                       iocbq->vport->fc_myDID);
+                               bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
+                               bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+                                       phba->vpi_ids[phba->pport->vpi]);
+               }
                command_type = OTHER_COMMAND;
                break;
        case CMD_CLOSE_XRI_CN:
@@ -8037,6 +8131,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
                 */
                if (piocb->iocb_flag & LPFC_IO_FCP)
                        piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+               if (unlikely(!phba->sli4_hba.fcp_wq))
+                       return IOCB_ERROR;
                if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
                                     &wqe))
                        return IOCB_ERROR;
@@ -8173,6 +8269,137 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
        return 0;
 }
 
+/* lpfc_sli_abts_recover_port - Recover a port that failed an ABTS.
+ * @vport: pointer to virtual port object.
+ * @ndlp: nodelist pointer for the impacted rport.
+ *
+ * The driver calls this routine in response to a XRI ABORT CQE
+ * event from the port.  In this event, the driver is required to
+ * recover its login to the rport even though its login may be valid
+ * from the driver's perspective.  The failed ABTS notice from the
+ * port indicates the rport is not responding.
+ */
+static void
+lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
+                          struct lpfc_nodelist *ndlp)
+{
+       struct Scsi_Host *shost;
+       struct lpfc_hba *phba;
+       unsigned long flags = 0;
+
+       shost = lpfc_shost_from_vport(vport);
+       phba = vport->phba;
+       if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
+               lpfc_printf_log(phba, KERN_INFO,
+                       LOG_SLI, "3093 No rport recovery needed. "
+                       "rport in state 0x%x\n",
+                       ndlp->nlp_state);
+               return;
+       }
+       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                       "3094 Start rport recovery on shost id 0x%x "
+                       "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
+                       "flags 0x%x\n",
+                       shost->host_no, ndlp->nlp_DID,
+                       vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
+                       ndlp->nlp_flag);
+       /*
+        * The rport is not responding.  Don't attempt ADISC recovery.
+        * Remove the FCP-2 flag to force a PLOGI.
+        */
+       spin_lock_irqsave(shost->host_lock, flags);
+       ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+       spin_unlock_irqrestore(shost->host_lock, flags);
+       lpfc_disc_state_machine(vport, ndlp, NULL,
+                               NLP_EVT_DEVICE_RECOVERY);
+       lpfc_cancel_retry_delay_tmo(vport, ndlp);
+       spin_lock_irqsave(shost->host_lock, flags);
+       ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+       spin_unlock_irqrestore(shost->host_lock, flags);
+       lpfc_disc_start(vport);
+}
+
+/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to iocb object.
+ *
+ * The async_event handler calls this routine when it receives
+ * an ASYNC_STATUS_CN event from the port.  The port generates
+ * this event when an Abort Sequence request to an rport fails
+ * twice in succession.  The abort could be originated by the
+ * driver or by the port.  The ABTS could have been for an ELS
+ * or FCP IO.  The port only generates this event when an ABTS
+ * fails to complete after one retry.
+ */
+static void
+lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
+                         struct lpfc_iocbq *iocbq)
+{
+       struct lpfc_nodelist *ndlp = NULL;
+       uint16_t rpi = 0, vpi = 0;
+       struct lpfc_vport *vport = NULL;
+
+       /* The rpi in the ulpContext is vport-sensitive. */
+       vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
+       rpi = iocbq->iocb.ulpContext;
+
+       lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                       "3092 Port generated ABTS async event "
+                       "on vpi %d rpi %d status 0x%x\n",
+                       vpi, rpi, iocbq->iocb.ulpStatus);
+
+       vport = lpfc_find_vport_by_vpid(phba, vpi);
+       if (!vport)
+               goto err_exit;
+       ndlp = lpfc_findnode_rpi(vport, rpi);
+       if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+               goto err_exit;
+
+       if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
+               lpfc_sli_abts_recover_port(vport, ndlp);
+       return;
+
+ err_exit:
+       lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+                       "3095 Event Context not found, no "
+                       "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
+                       iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
+                       vpi, rpi);
+}
+
+/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
+ * @phba: pointer to HBA context object.
+ * @ndlp: nodelist pointer for the impacted rport.
+ * @axri: pointer to the wcqe containing the failed exchange.
+ *
+ * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
+ * port.  The port generates this event when an abort exchange request to an
+ * rport fails twice in succession with no reply.  The abort could be originated
+ * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
+ */
+void
+lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
+                          struct lpfc_nodelist *ndlp,
+                          struct sli4_wcqe_xri_aborted *axri)
+{
+       struct lpfc_vport *vport;
+
+       if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+               lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+                               "3115 Node Context not found, driver "
+                               "ignoring abts err event\n");
+       vport = ndlp->vport;
+       lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                       "3116 Port generated FCP XRI ABORT event on "
+                       "vpi %d rpi %d xri x%x status 0x%x\n",
+                       ndlp->vport->vpi, ndlp->nlp_rpi,
+                       bf_get(lpfc_wcqe_xa_xri, axri),
+                       bf_get(lpfc_wcqe_xa_status, axri));
+
+       if (bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT)
+               lpfc_sli_abts_recover_port(vport, ndlp);
+}
+
 /**
  * lpfc_sli_async_event_handler - ASYNC iocb handler function
  * @phba: Pointer to HBA context object.
@@ -8192,63 +8419,58 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
 {
        IOCB_t *icmd;
        uint16_t evt_code;
-       uint16_t temp;
        struct temp_event temp_event_data;
        struct Scsi_Host *shost;
        uint32_t *iocb_w;
 
        icmd = &iocbq->iocb;
        evt_code = icmd->un.asyncstat.evt_code;
-       temp = icmd->ulpContext;
 
-       if ((evt_code != ASYNC_TEMP_WARN) &&
-               (evt_code != ASYNC_TEMP_SAFE)) {
+       switch (evt_code) {
+       case ASYNC_TEMP_WARN:
+       case ASYNC_TEMP_SAFE:
+               temp_event_data.data = (uint32_t) icmd->ulpContext;
+               temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+               if (evt_code == ASYNC_TEMP_WARN) {
+                       temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
+                       lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
+                               "0347 Adapter is very hot, please take "
+                               "corrective action. temperature : %d Celsius\n",
+                               (uint32_t) icmd->ulpContext);
+               } else {
+                       temp_event_data.event_code = LPFC_NORMAL_TEMP;
+                       lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
+                               "0340 Adapter temperature is OK now. "
+                               "temperature : %d Celsius\n",
+                               (uint32_t) icmd->ulpContext);
+               }
+
+               /* Send temperature change event to applications */
+               shost = lpfc_shost_from_vport(phba->pport);
+               fc_host_post_vendor_event(shost, fc_get_event_number(),
+                       sizeof(temp_event_data), (char *) &temp_event_data,
+                       LPFC_NL_VENDOR_ID);
+               break;
+       case ASYNC_STATUS_CN:
+               lpfc_sli_abts_err_handler(phba, iocbq);
+               break;
+       default:
                iocb_w = (uint32_t *) icmd;
-               lpfc_printf_log(phba,
-                       KERN_ERR,
-                       LOG_SLI,
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                        "0346 Ring %d handler: unexpected ASYNC_STATUS"
                        " evt_code 0x%x\n"
                        "W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
                        "W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
                        "W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
                        "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
-                       pring->ringno,
-                       icmd->un.asyncstat.evt_code,
+                       pring->ringno, icmd->un.asyncstat.evt_code,
                        iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
                        iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
                        iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
                        iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
 
-               return;
-       }
-       temp_event_data.data = (uint32_t)temp;
-       temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
-       if (evt_code == ASYNC_TEMP_WARN) {
-               temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
-               lpfc_printf_log(phba,
-                               KERN_ERR,
-                               LOG_TEMP,
-                               "0347 Adapter is very hot, please take "
-                               "corrective action. temperature : %d Celsius\n",
-                               temp);
-       }
-       if (evt_code == ASYNC_TEMP_SAFE) {
-               temp_event_data.event_code = LPFC_NORMAL_TEMP;
-               lpfc_printf_log(phba,
-                               KERN_ERR,
-                               LOG_TEMP,
-                               "0340 Adapter temperature is OK now. "
-                               "temperature : %d Celsius\n",
-                               temp);
+               break;
        }
-
-       /* Send temperature change event to applications */
-       shost = lpfc_shost_from_vport(phba->pport);
-       fc_host_post_vendor_event(shost, fc_get_event_number(),
-               sizeof(temp_event_data), (char *) &temp_event_data,
-               LPFC_NL_VENDOR_ID);
-
 }
 
 
@@ -8823,12 +9045,14 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 {
        IOCB_t *irsp = &rspiocb->iocb;
        uint16_t abort_iotag, abort_context;
-       struct lpfc_iocbq *abort_iocb;
-       struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
-
-       abort_iocb = NULL;
+       struct lpfc_iocbq *abort_iocb = NULL;
 
        if (irsp->ulpStatus) {
+
+               /*
+                * Assume that the port already completed and returned, or
+                * will return the iocb. Just Log the message.
+                */
                abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
                abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
 
@@ -8846,68 +9070,15 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                         */
                        abort_iocb = phba->sli.iocbq_lookup[abort_context];
 
-               /*
-                *  If the iocb is not found in Firmware queue the iocb
-                *  might have completed already. Do not free it again.
-                */
-               if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
-                       if (irsp->un.ulpWord[4] != IOERR_NO_XRI) {
-                               spin_unlock_irq(&phba->hbalock);
-                               lpfc_sli_release_iocbq(phba, cmdiocb);
-                               return;
-                       }
-                       /* For SLI4 the ulpContext field for abort IOCB
-                        * holds the iotag of the IOCB being aborted so
-                        * the local abort_context needs to be reset to
-                        * match the aborted IOCBs ulpContext.
-                        */
-                       if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
-                               abort_context = abort_iocb->iocb.ulpContext;
-               }
-
                lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
                                "0327 Cannot abort els iocb %p "
                                "with tag %x context %x, abort status %x, "
                                "abort code %x\n",
                                abort_iocb, abort_iotag, abort_context,
                                irsp->ulpStatus, irsp->un.ulpWord[4]);
-               /*
-                * make sure we have the right iocbq before taking it
-                * off the txcmplq and try to call completion routine.
-                */
-               if (!abort_iocb ||
-                   abort_iocb->iocb.ulpContext != abort_context ||
-                   (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
-                       spin_unlock_irq(&phba->hbalock);
-               else if (phba->sli_rev < LPFC_SLI_REV4) {
-                       /*
-                        * leave the SLI4 aborted command on the txcmplq
-                        * list and the command complete WCQE's XB bit
-                        * will tell whether the SGL (XRI) can be released
-                        * immediately or to the aborted SGL list for the
-                        * following abort XRI from the HBA.
-                        */
-                       list_del_init(&abort_iocb->list);
-                       if (abort_iocb->iocb_flag & LPFC_IO_ON_Q) {
-                               abort_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
-                               pring->txcmplq_cnt--;
-                       }
 
-                       /* Firmware could still be in progress of DMAing
-                        * payload, so don't free data buffer till after
-                        * a hbeat.
-                        */
-                       abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
-                       abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
-                       spin_unlock_irq(&phba->hbalock);
-
-                       abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
-                       abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
-                       (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
-               } else
-                       spin_unlock_irq(&phba->hbalock);
+               spin_unlock_irq(&phba->hbalock);
        }
-
        lpfc_sli_release_iocbq(phba, cmdiocb);
        return;
 }
@@ -9258,6 +9429,14 @@ void
 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        struct lpfc_iocbq *rspiocb)
 {
+       lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+                       "3096 ABORT_XRI_CN completing on xri x%x "
+                       "original iotag x%x, abort cmd iotag x%x "
+                       "status 0x%x, reason 0x%x\n",
+                       cmdiocb->iocb.un.acxri.abortContextTag,
+                       cmdiocb->iocb.un.acxri.abortIoTag,
+                       cmdiocb->iotag, rspiocb->iocb.ulpStatus,
+                       rspiocb->iocb.un.ulpWord[4]);
        lpfc_sli_release_iocbq(phba, cmdiocb);
        return;
 }
@@ -9771,7 +9950,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
                        phba->work_status[1] =
                                readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "2885 Port Error Detected: "
+                                       "2885 Port Status Event: "
                                        "port status reg 0x%x, "
                                        "port smphr reg 0x%x, "
                                        "error 1=0x%x, error 2=0x%x\n",
@@ -10777,6 +10956,9 @@ static void
 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
                             struct lpfc_wcqe_release *wcqe)
 {
+       /* sanity check on queue memory */
+       if (unlikely(!phba->sli4_hba.els_wq))
+               return;
        /* Check for the slow-path ELS work queue */
        if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
                lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
@@ -10866,6 +11048,10 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
        uint32_t status, rq_id;
        unsigned long iflags;
 
+       /* sanity check on queue memory */
+       if (unlikely(!hrq) || unlikely(!drq))
+               return workposted;
+
        if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
                rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
        else
@@ -11000,6 +11186,9 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
 
        /* Search for completion queue pointer matching this cqid */
        speq = phba->sli4_hba.sp_eq;
+       /* sanity check on queue memory */
+       if (unlikely(!speq))
+               return;
        list_for_each_entry(childq, &speq->child_list, list) {
                if (childq->queue_id == cqid) {
                        cq = childq;
@@ -11241,12 +11430,18 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
                return;
        }
 
+       if (unlikely(!phba->sli4_hba.fcp_cq)) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                               "3146 Fast-path completion queues "
+                               "does not exist\n");
+               return;
+       }
        cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
        if (unlikely(!cq)) {
                if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                        "0367 Fast-path completion queue "
-                                       "does not exist\n");
+                                       "(%d) does not exist\n", fcp_cqidx);
                return;
        }
 
@@ -11417,6 +11612,8 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
 
        /* Get to the EQ struct associated with this vector */
        fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
+       if (unlikely(!fpeq))
+               return IRQ_NONE;
 
        /* Check device state for handling interrupt */
        if (unlikely(lpfc_intr_state_check(phba))) {
@@ -11635,6 +11832,9 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
        uint16_t dmult;
        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
 
+       /* sanity check on queue memory */
+       if (!eq)
+               return -ENODEV;
        if (!phba->sli4_hba.pc_sli4_params.supported)
                hw_page_size = SLI4_PAGE_SIZE;
 
@@ -11751,6 +11951,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
        union lpfc_sli4_cfg_shdr *shdr;
        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
 
+       /* sanity check on queue memory */
+       if (!cq || !eq)
+               return -ENODEV;
        if (!phba->sli4_hba.pc_sli4_params.supported)
                hw_page_size = SLI4_PAGE_SIZE;
 
@@ -11933,6 +12136,9 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
        union lpfc_sli4_cfg_shdr *shdr;
        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
 
+       /* sanity check on queue memory */
+       if (!mq || !cq)
+               return -ENODEV;
        if (!phba->sli4_hba.pc_sli4_params.supported)
                hw_page_size = SLI4_PAGE_SIZE;
 
@@ -12083,6 +12289,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
        struct dma_address *page;
 
+       /* sanity check on queue memory */
+       if (!wq || !cq)
+               return -ENODEV;
        if (!phba->sli4_hba.pc_sli4_params.supported)
                hw_page_size = SLI4_PAGE_SIZE;
 
@@ -12151,6 +12360,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
        wq->subtype = subtype;
        wq->host_index = 0;
        wq->hba_index = 0;
+       wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
 
        /* link the wq onto the parent cq child list */
        list_add_tail(&wq->list, &cq->child_list);
@@ -12174,6 +12384,9 @@ lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
 {
        uint32_t cnt;
 
+       /* sanity check on queue memory */
+       if (!rq)
+               return;
        cnt = lpfc_hbq_defs[qno]->entry_count;
 
        /* Recalc repost for RQs based on buffers initially posted */
@@ -12219,6 +12432,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
        union lpfc_sli4_cfg_shdr *shdr;
        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
 
+       /* sanity check on queue memory */
+       if (!hrq || !drq || !cq)
+               return -ENODEV;
        if (!phba->sli4_hba.pc_sli4_params.supported)
                hw_page_size = SLI4_PAGE_SIZE;
 
@@ -12420,6 +12636,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
        uint32_t shdr_status, shdr_add_status;
        union lpfc_sli4_cfg_shdr *shdr;
 
+       /* sanity check on queue memory */
        if (!eq)
                return -ENODEV;
        mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12475,6 +12692,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
        uint32_t shdr_status, shdr_add_status;
        union lpfc_sli4_cfg_shdr *shdr;
 
+       /* sanity check on queue memory */
        if (!cq)
                return -ENODEV;
        mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12528,6 +12746,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
        uint32_t shdr_status, shdr_add_status;
        union lpfc_sli4_cfg_shdr *shdr;
 
+       /* sanity check on queue memory */
        if (!mq)
                return -ENODEV;
        mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12581,6 +12800,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
        uint32_t shdr_status, shdr_add_status;
        union lpfc_sli4_cfg_shdr *shdr;
 
+       /* sanity check on queue memory */
        if (!wq)
                return -ENODEV;
        mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12634,6 +12854,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
        uint32_t shdr_status, shdr_add_status;
        union lpfc_sli4_cfg_shdr *shdr;
 
+       /* sanity check on queue memory */
        if (!hrq || !drq)
                return -ENODEV;
        mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -15252,45 +15473,42 @@ lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
+ * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
  * @phba: pointer to lpfc hba data structure.
+ * @rgn23_data: pointer to configure region 23 data.
  *
- * This function read region 23 and parse TLV for port status to
- * decide if the user disaled the port. If the TLV indicates the
- * port is disabled, the hba_flag is set accordingly.
+ * This function gets SLI3 port configure region 23 data through memory dump
+ * mailbox command. When it successfully retrieves data, the size of the data
+ * will be returned, otherwise, 0 will be returned.
  **/
-void
-lpfc_sli_read_link_ste(struct lpfc_hba *phba)
+static uint32_t
+lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
 {
        LPFC_MBOXQ_t *pmb = NULL;
        MAILBOX_t *mb;
-       uint8_t *rgn23_data = NULL;
-       uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset;
+       uint32_t offset = 0;
        int rc;
 
+       if (!rgn23_data)
+               return 0;
+
        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!pmb) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                       "2600 lpfc_sli_read_serdes_param failed to"
-                       " allocate mailbox memory\n");
-               goto out;
+                               "2600 failed to allocate mailbox memory\n");
+               return 0;
        }
        mb = &pmb->u.mb;
 
-       /* Get adapter Region 23 data */
-       rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
-       if (!rgn23_data)
-               goto out;
-
        do {
                lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 
                if (rc != MBX_SUCCESS) {
                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "2601 lpfc_sli_read_link_ste failed to"
-                               " read config region 23 rc 0x%x Status 0x%x\n",
-                               rc, mb->mbxStatus);
+                                       "2601 failed to read config "
+                                       "region 23, rc 0x%x Status 0x%x\n",
+                                       rc, mb->mbxStatus);
                        mb->un.varDmp.word_cnt = 0;
                }
                /*
@@ -15303,13 +15521,96 @@ lpfc_sli_read_link_ste(struct lpfc_hba *phba)
                        mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
 
                lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
-                       rgn23_data + offset,
-                       mb->un.varDmp.word_cnt);
+                                      rgn23_data + offset,
+                                      mb->un.varDmp.word_cnt);
                offset += mb->un.varDmp.word_cnt;
        } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
 
-       data_size = offset;
-       offset = 0;
+       mempool_free(pmb, phba->mbox_mem_pool);
+       return offset;
+}
+
+/**
+ * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
+ * @phba: pointer to lpfc hba data structure.
+ * @rgn23_data: pointer to configure region 23 data.
+ *
+ * This function gets SLI4 port configure region 23 data through memory dump
+ * mailbox command. When it successfully retrieves data, the size of the data
+ * will be returned, otherwise, 0 will be returned.
+ **/
+static uint32_t
+lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
+{
+       LPFC_MBOXQ_t *mboxq = NULL;
+       struct lpfc_dmabuf *mp = NULL;
+       struct lpfc_mqe *mqe;
+       uint32_t data_length = 0;
+       int rc;
+
+       if (!rgn23_data)
+               return 0;
+
+       mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3105 failed to allocate mailbox memory\n");
+               return 0;
+       }
+
+       if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
+               goto out;
+       mqe = &mboxq->u.mqe;
+       mp = (struct lpfc_dmabuf *) mboxq->context1;
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       if (rc)
+               goto out;
+       data_length = mqe->un.mb_words[5];
+       if (data_length == 0)
+               goto out;
+       if (data_length > DMP_RGN23_SIZE) {
+               data_length = 0;
+               goto out;
+       }
+       lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
+out:
+       mempool_free(mboxq, phba->mbox_mem_pool);
+       if (mp) {
+               lpfc_mbuf_free(phba, mp->virt, mp->phys);
+               kfree(mp);
+       }
+       return data_length;
+}
+
+/**
+ * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function read region 23 and parse TLV for port status to
+ * decide if the user disaled the port. If the TLV indicates the
+ * port is disabled, the hba_flag is set accordingly.
+ **/
+void
+lpfc_sli_read_link_ste(struct lpfc_hba *phba)
+{
+       uint8_t *rgn23_data = NULL;
+       uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
+       uint32_t offset = 0;
+
+       /* Get adapter Region 23 data */
+       rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
+       if (!rgn23_data)
+               goto out;
+
+       if (phba->sli_rev < LPFC_SLI_REV4)
+               data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
+       else {
+               if_type = bf_get(lpfc_sli_intf_if_type,
+                                &phba->sli4_hba.sli_intf);
+               if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
+                       goto out;
+               data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
+       }
 
        if (!data_size)
                goto out;
@@ -15373,9 +15674,8 @@ lpfc_sli_read_link_ste(struct lpfc_hba *phba)
                        goto out;
                }
        }
+
 out:
-       if (pmb)
-               mempool_free(pmb, phba->mbox_mem_pool);
        kfree(rgn23_data);
        return;
 }
index d5cffd8af340fde2f0c1aa9f4ca34060c5f84f01..3f266e2c54e0d7f57131b91b4aa3bb1dd1dc9cf6 100644 (file)
@@ -291,7 +291,7 @@ struct lpfc_bmbx {
 #define LPFC_RQE_SIZE          8
 
 #define LPFC_EQE_DEF_COUNT     1024
-#define LPFC_CQE_DEF_COUNT      256
+#define LPFC_CQE_DEF_COUNT      1024
 #define LPFC_WQE_DEF_COUNT      256
 #define LPFC_MQE_DEF_COUNT      16
 #define LPFC_RQE_DEF_COUNT     512
@@ -420,7 +420,16 @@ struct lpfc_sli4_hba {
                        void __iomem *STATUSregaddr;
                        void __iomem *CTRLregaddr;
                        void __iomem *ERR1regaddr;
+#define SLIPORT_ERR1_REG_ERR_CODE_1            0x1
+#define SLIPORT_ERR1_REG_ERR_CODE_2            0x2
                        void __iomem *ERR2regaddr;
+#define SLIPORT_ERR2_REG_FW_RESTART            0x0
+#define SLIPORT_ERR2_REG_FUNC_PROVISON         0x1
+#define SLIPORT_ERR2_REG_FORCED_DUMP           0x2
+#define SLIPORT_ERR2_REG_FAILURE_EQ            0x3
+#define SLIPORT_ERR2_REG_FAILURE_CQ            0x4
+#define SLIPORT_ERR2_REG_FAILURE_BUS           0x5
+#define SLIPORT_ERR2_REG_FAILURE_RQ            0x6
                } if_type2;
        } u;
 
index b0630e37f1ef45756e77f4db5cb87db5a6b09ddd..dd044d01a07fa91ca8781397b2c95a2f2e51fb84 100644 (file)
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.27"
+#define LPFC_DRIVER_VERSION "8.3.28"
 #define LPFC_DRIVER_NAME               "lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME    "lpfc:sp"
 #define LPFC_FP_DRIVER_HANDLER_NAME    "lpfc:fp"
index cff6ca67415ccfc2e118e86cb4f33bd8ab53fd99..0fe188e66000da063c01feec76b557a075545258 100644 (file)
@@ -774,10 +774,10 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
                return NULL;
        spin_lock_irq(&phba->hbalock);
        list_for_each_entry(port_iterator, &phba->port_list, listentry) {
+               if (port_iterator->load_flag & FC_UNLOADING)
+                       continue;
                if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
-                       if (!(port_iterator->load_flag & FC_UNLOADING))
-                               lpfc_printf_vlog(port_iterator, KERN_ERR,
-                                        LOG_VPORT,
+                       lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_VPORT,
                                         "1801 Create vport work array FAILED: "
                                         "cannot do scsi_host_get\n");
                        continue;
index af3a6af97cc7c0455f9304c1cabfb199642f71ac..ea2bde206f7f951ee66abe3baa2b2abd94b3980e 100644 (file)
@@ -291,8 +291,7 @@ int __init macscsi_detect(struct scsi_host_template * tpnt)
     ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0;
 
     if (instance->irq != SCSI_IRQ_NONE)
-       if (request_irq(instance->irq, NCR5380_intr, IRQ_FLG_SLOW, 
-                       "ncr5380", instance)) {
+       if (request_irq(instance->irq, NCR5380_intr, 0, "ncr5380", instance)) {
            printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n",
                   instance->host_no, instance->irq);
            instance->irq = SCSI_IRQ_NONE;
index 8dc1b32918dd6d064027fd32159859864bc96e87..a01f0aa66f2082aeb6dfc16b335685f3600e708b 100644 (file)
@@ -8,7 +8,7 @@
  *                  scatter/gather formats.
  *  Creation Date:  June 21, 2006
  *
- *  mpi2.h Version:  02.00.20
+ *  mpi2.h Version:  02.00.22
  *
  *  Version History
  *  ---------------
@@ -69,6 +69,8 @@
  *  02-23-11  02.00.19  Bumped MPI2_HEADER_VERSION_UNIT.
  *                      Added MPI2_FUNCTION_SEND_HOST_MESSAGE.
  *  03-09-11  02.00.20  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  05-25-11  02.00.21  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  08-24-11  02.00.22  Bumped MPI2_HEADER_VERSION_UNIT.
  *  --------------------------------------------------------------------------
  */
 
@@ -94,7 +96,7 @@
 #define MPI2_VERSION_02_00                  (0x0200)
 
 /* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x14)
+#define MPI2_HEADER_VERSION_UNIT            (0x16)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
@@ -1073,8 +1075,10 @@ typedef struct _MPI2_IEEE_SGE_UNION
 #define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR         (0x02)
 #define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR      (0x03)
                                                /* IEEE Simple Element only */
-#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR   (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR   (0x03)
                                                /* IEEE Chain Element only */
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR   \
+       (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR) /* typo in name */
 
 /****************************************************************************
 *  IEEE SGE operation Macros
index cfd95b4e3004705da33d9e27c4ac9b1d899a034a..3a023dad77a13348fc38e0c1f6fc56ddf961dcbe 100644 (file)
@@ -6,7 +6,7 @@
  *          Title:  MPI Configuration messages and pages
  *  Creation Date:  November 10, 2006
  *
- *    mpi2_cnfg.h Version:  02.00.19
+ *    mpi2_cnfg.h Version:  02.00.21
  *
  *  Version History
  *  ---------------
  *                      Added SASNotifyPrimitiveMasks field to
  *                      MPI2_CONFIG_PAGE_IOC_7.
  *  03-09-11  02.00.19  Fixed IO Unit Page 10 (to match the spec).
+ *  05-25-11  02.00.20  Cleaned up a few comments.
+ *  08-24-11  02.00.21  Marked the IO Unit Page 7 PowerManagementCapabilities
+ *                      for PCIe link as obsolete.
+ *                      Added SpinupFlags field containing a Disable Spin-up
+ *                      bit to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of
+ *                      SAS IO Unit Page 4.
+
  *  --------------------------------------------------------------------------
  */
 
@@ -904,8 +911,8 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
 #define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED    (0x00000400)
 #define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED    (0x00000200)
 #define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED    (0x00000100)
-#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE    (0x00000008)
-#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE    (0x00000004)
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE    (0x00000008) /* obsolete */
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE    (0x00000004) /* obsolete */
 
 /* defines for IO Unit Page 7 IOCTemperatureUnits field */
 #define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT       (0x00)
@@ -1970,10 +1977,14 @@ typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP
 {
     U8          MaxTargetSpinup;            /* 0x00 */
     U8          SpinupDelay;                /* 0x01 */
-    U16         Reserved1;                  /* 0x02 */
+       U8          SpinupFlags;                /* 0x02 */
+       U8          Reserved1;                  /* 0x03 */
 } MPI2_SAS_IOUNIT4_SPINUP_GROUP, MPI2_POINTER PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP,
   Mpi2SasIOUnit4SpinupGroup_t, MPI2_POINTER pMpi2SasIOUnit4SpinupGroup_t;
 
+/* defines for SAS IO Unit Page 4 SpinupFlags */
+#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG         (0x01)
+
 /*
  * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
  * one and check the value returned for NumPhys at runtime.
@@ -2321,13 +2332,12 @@ typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1
 
 /* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
 
-/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
-
 /* values for SAS Expander Page 1 DiscoveryInfo field */
 #define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED    (0x04)
 #define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE  (0x02)
 #define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES  (0x01)
 
+/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
 
 /****************************************************************************
 *   SAS Device Config Pages
@@ -2447,6 +2457,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0
 
 #define MPI2_SASPHY0_PAGEVERSION            (0x03)
 
+/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+
 /* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
 
 /* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
@@ -2454,12 +2466,10 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0
 /* values for SAS PHY Page 0 Flags field */
 #define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC             (0x01)
 
-/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
 
 /* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
 
-/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
-
 
 /* SAS PHY Page 1 */
 
index 93d9b6956d052380e15453044f9a065a1c8842bd..9a925c07a9ec0c6e4f6c13b71b2654efc7099e95 100644 (file)
@@ -6,7 +6,7 @@
  *          Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
  *  Creation Date:  October 11, 2006
  *
- *  mpi2_ioc.h Version:  02.00.17
+ *  mpi2_ioc.h Version:  02.00.19
  *
  *  Version History
  *  ---------------
  *                      Added Temperature Threshold Event.
  *                      Added Host Message Event.
  *                      Added Send Host Message request and reply.
+ *  05-25-11  02.00.18  For Extended Image Header, added
+ *                      MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and
+ *                      MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines.
+ *                      Deprecated MPI2_EXT_IMAGE_TYPE_MAX define.
+ *  08-24-11  02.00.19  Added PhysicalPort field to
+ *                      MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure.
+ *                      Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete.
  *  --------------------------------------------------------------------------
  */
 
@@ -578,7 +585,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
 {
     U16                     TaskTag;                        /* 0x00 */
     U8                      ReasonCode;                     /* 0x02 */
-    U8                      Reserved1;                      /* 0x03 */
+       U8                      PhysicalPort;                   /* 0x03 */
     U8                      ASC;                            /* 0x04 */
     U8                      ASCQ;                           /* 0x05 */
     U16                     DevHandle;                      /* 0x06 */
@@ -1366,16 +1373,18 @@ typedef struct _MPI2_EXT_IMAGE_HEADER
 #define MPI2_EXT_IMAGE_HEADER_SIZE              (0x40)
 
 /* defines for the ImageType field */
-#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED         (0x00)
-#define MPI2_EXT_IMAGE_TYPE_FW                  (0x01)
-#define MPI2_EXT_IMAGE_TYPE_NVDATA              (0x03)
-#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER          (0x04)
-#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION      (0x05)
-#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT        (0x06)
-#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES   (0x07)
-#define MPI2_EXT_IMAGE_TYPE_MEGARAID            (0x08)
-
-#define MPI2_EXT_IMAGE_TYPE_MAX                 (MPI2_EXT_IMAGE_TYPE_MEGARAID)
+#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED                                (0x00)
+#define MPI2_EXT_IMAGE_TYPE_FW                                         (0x01)
+#define MPI2_EXT_IMAGE_TYPE_NVDATA                                     (0x03)
+#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER                         (0x04)
+#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION                     (0x05)
+#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT                       (0x06)
+#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES          (0x07)
+#define MPI2_EXT_IMAGE_TYPE_MEGARAID                           (0x08)
+#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC    (0x80)
+#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC    (0xFF)
+#define MPI2_EXT_IMAGE_TYPE_MAX                   \
+       (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC)      /* deprecated */
 
 
 
@@ -1568,7 +1577,7 @@ typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
 /* defines for the Feature field */
 #define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND       (0x01)
 #define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION   (0x02)
-#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK               (0x03)
+#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK               (0x03) /* obsolete */
 #define MPI2_PM_CONTROL_FEATURE_IOC_SPEED               (0x04)
 #define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC    (0x80)
 #define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC    (0xFF)
@@ -1597,14 +1606,14 @@ typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
 
 /* parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */
 /* Parameter1 indicates desired PCIe link speed using these defines */
-#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS            (0x00)
-#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS            (0x01)
-#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS            (0x02)
+#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS            (0x00) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS            (0x01) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS            (0x02) /* obsolete */
 /* Parameter2 indicates desired PCIe link width using these defines */
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1                 (0x01)
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2                 (0x02)
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4                 (0x04)
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8                 (0x08)
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1                 (0x01) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2                 (0x02) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4                 (0x04) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8                 (0x08) /* obsolete */
 /* Parameter3 and Parameter4 are reserved */
 
 /* parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */
index bd61a7b60a2bcf2c306517a0b1a45913b156c258..0601612b875add889302ae2630919c6851672d5a 100644 (file)
@@ -6,7 +6,7 @@
  *          Title:  MPI Integrated RAID messages and structures
  *  Creation Date:  April 26, 2007
  *
- *    mpi2_raid.h Version:  02.00.05
+ *    mpi2_raid.h Version:  02.00.06
  *
  *  Version History
  *  ---------------
  *  07-30-09  02.00.04  Added proper define for the Use Default Settings bit of
  *                      VolumeCreationFlags and marked the old one as obsolete.
  *  05-12-10  02.00.05  Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
+ *  08-24-10  02.00.06  Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with
+ *                      related structures and defines.
+ *                      Added product-specific range to RAID Action values.
+
  *  --------------------------------------------------------------------------
  */
 
@@ -176,7 +180,9 @@ typedef struct _MPI2_RAID_ACTION_REQUEST
 #define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED  (0x20)
 #define MPI2_RAID_ACTION_START_RAID_FUNCTION        (0x21)
 #define MPI2_RAID_ACTION_STOP_RAID_FUNCTION         (0x22)
-
+#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK        (0x23)
+#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC       (0x80)
+#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC       (0xFF)
 
 /* RAID Volume Creation Structure */
 
@@ -244,6 +250,23 @@ typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION
   Mpi2RaidOnlineCapacityExpansion_t,
   MPI2_POINTER pMpi2RaidOnlineCapacityExpansion_t;
 
+/* RAID Compatibility Input Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT {
+       U16                     SourceDevHandle;               /* 0x00 */
+       U16                     CandidateDevHandle;             /* 0x02 */
+       U32                     Flags;                          /* 0x04 */
+       U32                     Reserved1;                      /* 0x08 */
+       U32                     Reserved2;                      /* 0x0C */
+} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+Mpi2RaidCompatibilityInputStruct_t,
+MPI2_POINTER pMpi2RaidCompatibilityInputStruct_t;
+
+/* defines for RAID Compatibility Structure Flags field */
+#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG      (0x00000002)
+#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG    (0x00000001)
+
 
 /* RAID Volume Indicator Structure */
 
@@ -263,15 +286,45 @@ typedef struct _MPI2_RAID_VOL_INDICATOR
 #define MPI2_RAID_VOL_FLAGS_OP_RESYNC               (0x00000003)
 #define MPI2_RAID_VOL_FLAGS_OP_MDC                  (0x00000004)
 
+/* RAID Compatibility Result Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT {
+       U8                      State;                          /* 0x00 */
+       U8                      Reserved1;                      /* 0x01 */
+       U16                     Reserved2;                      /* 0x02 */
+       U32                     GenericAttributes;              /* 0x04 */
+       U32                     OEMSpecificAttributes;          /* 0x08 */
+       U32                     Reserved3;                      /* 0x0C */
+       U32                     Reserved4;                      /* 0x10 */
+} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+Mpi2RaidCompatibilityResultStruct_t,
+MPI2_POINTER pMpi2RaidCompatibilityResultStruct_t;
+
+/* defines for RAID Compatibility Result Structure State field */
+#define MPI2_RAID_COMPAT_STATE_COMPATIBLE           (0x00)
+#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE       (0x01)
+
+/* defines for RAID Compatibility Result Structure GenericAttributes field */
+#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR            (0x00000010)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK           (0x0000000C)
+#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE    (0x00000008)
+#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE      (0x00000004)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK        (0x00000003)
+#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL         (0x00000002)
+#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL        (0x00000001)
 
 /* RAID Action Reply ActionData union */
 typedef union _MPI2_RAID_ACTION_REPLY_DATA
 {
-    U32                     Word[5];
-    MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator;
-    U16                     VolDevHandle;
-    U8                      VolumeState;
-    U8                      PhysDiskNum;
+       U32                                     Word[5];
+       MPI2_RAID_VOL_INDICATOR                 RaidVolumeIndicator;
+       U16                                     VolDevHandle;
+       U8                                      VolumeState;
+       U8                                      PhysDiskNum;
+       MPI2_RAID_COMPATIBILITY_RESULT_STRUCT   RaidCompatibilityResult;
 } MPI2_RAID_ACTION_REPLY_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY_DATA,
   Mpi2RaidActionReplyData_t, MPI2_POINTER pMpi2RaidActionReplyData_t;
 
index 2a4bceda364b13ae6591bd25343d2ccb3976b6e9..3cbe677c68860fc41fe9e06e2c30506f92f9f1d9 100644 (file)
@@ -6,7 +6,7 @@
  *          Title:  MPI diagnostic tool structures and definitions
  *  Creation Date:  March 26, 2007
  *
- *    mpi2_tool.h Version:  02.00.06
+ *    mpi2_tool.h Version:  02.00.07
  *
  *  Version History
  *  ---------------
@@ -25,6 +25,8 @@
  *  05-12-10  02.00.05  Added Diagnostic Data Upload tool.
  *  08-11-10  02.00.06  Added defines that were missing for Diagnostic Buffer
  *                      Post Request.
+ *  05-25-11  02.00.07  Added Flags field and related defines to
+ *                      MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
  *  --------------------------------------------------------------------------
  */
 
@@ -181,7 +183,7 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
     U8                      DevIndex;                   /* 0x14 */
     U8                      Action;                     /* 0x15 */
     U8                      SGLFlags;                   /* 0x16 */
-    U8                      Reserved7;                  /* 0x17 */
+        U8                      Flags;                      /* 0x17 */
     U16                     TxDataLength;               /* 0x18 */
     U16                     RxDataLength;               /* 0x1A */
     U32                     Reserved8;                  /* 0x1C */
@@ -205,6 +207,9 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
 
 /* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
 
+/* values for the Flags field */
+#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE   (0x80)
+#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK         (0x07)
 
 /* Toolbox ISTWI Read Write Tool reply message */
 typedef struct _MPI2_TOOLBOX_ISTWI_REPLY {
index beda04a8404b5b72579f90e7f870bb8b6aeb1a98..0b2c95583660f67a652217d8b5ca3f25bee24984 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/sort.h>
 #include <linux/io.h>
 #include <linux/time.h>
+#include <linux/kthread.h>
 #include <linux/aer.h>
 
 #include "mpt2sas_base.h"
@@ -65,6 +66,8 @@ static MPT_CALLBACK   mpt_callbacks[MPT_MAX_CALLBACKS];
 
 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
 
+#define MAX_HBA_QUEUE_DEPTH    30000
+#define MAX_CHAIN_DEPTH                100000
 static int max_queue_depth = -1;
 module_param(max_queue_depth, int, 0);
 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
@@ -89,19 +92,6 @@ static int disable_discovery = -1;
 module_param(disable_discovery, int, 0);
 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
 
-
-/* diag_buffer_enable is bitwise
- * bit 0 set = TRACE
- * bit 1 set = SNAPSHOT
- * bit 2 set = EXTENDED
- *
- * Either bit can be set, or both
- */
-static int diag_buffer_enable;
-module_param(diag_buffer_enable, int, 0);
-MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
-    "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
-
 /**
  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
  *
@@ -120,9 +110,33 @@ _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
                ioc->fwfault_debug = mpt2sas_fwfault_debug;
        return 0;
 }
+
 module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
     param_get_int, &mpt2sas_fwfault_debug, 0644);
 
+/**
+ *  mpt2sas_remove_dead_ioc_func - kthread context to remove dead ioc
+ * @arg: input argument, used to derive ioc
+ *
+ * Return 0 if controller is removed from pci subsystem.
+ * Return -1 for other case.
+ */
+static int mpt2sas_remove_dead_ioc_func(void *arg)
+{
+               struct MPT2SAS_ADAPTER *ioc = (struct MPT2SAS_ADAPTER *)arg;
+               struct pci_dev *pdev;
+
+               if ((ioc == NULL))
+                       return -1;
+
+               pdev = ioc->pdev;
+               if ((pdev == NULL))
+                       return -1;
+               pci_remove_bus_device(pdev);
+               return 0;
+}
+
+
 /**
  * _base_fault_reset_work - workq handling ioc fault conditions
  * @work: input argument, used to derive ioc
@@ -138,6 +152,7 @@ _base_fault_reset_work(struct work_struct *work)
        unsigned long    flags;
        u32 doorbell;
        int rc;
+       struct task_struct *p;
 
        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
        if (ioc->shost_recovery)
@@ -145,6 +160,39 @@ _base_fault_reset_work(struct work_struct *work)
        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 
        doorbell = mpt2sas_base_get_iocstate(ioc, 0);
+       if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
+               printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
+                       ioc->name, __func__);
+
+               /*
+                * Call _scsih_flush_pending_cmds callback so that we flush all
+                * pending commands back to OS. This call is required to aovid
+                * deadlock at block layer. Dead IOC will fail to do diag reset,
+                * and this call is safe since dead ioc will never return any
+                * command back from HW.
+                */
+               ioc->schedule_dead_ioc_flush_running_cmds(ioc);
+               /*
+                * Set remove_host flag early since kernel thread will
+                * take some time to execute.
+                */
+               ioc->remove_host = 1;
+               /*Remove the Dead Host */
+               p = kthread_run(mpt2sas_remove_dead_ioc_func, ioc,
+                   "mpt2sas_dead_ioc_%d", ioc->id);
+               if (IS_ERR(p)) {
+                       printk(MPT2SAS_ERR_FMT
+                       "%s: Running mpt2sas_dead_ioc thread failed !!!!\n",
+                       ioc->name, __func__);
+               } else {
+                   printk(MPT2SAS_ERR_FMT
+                       "%s: Running mpt2sas_dead_ioc thread success !!!!\n",
+                       ioc->name, __func__);
+               }
+
+               return; /* don't rearm timer */
+       }
+
        if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
                rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
                    FORCE_BIG_HAMMER);
@@ -1346,7 +1394,7 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
        if (_base_check_enable_msix(ioc) != 0)
                goto try_ioapic;
 
-       ioc->reply_queue_count = min_t(u8, ioc->cpu_count,
+       ioc->reply_queue_count = min_t(int, ioc->cpu_count,
            ioc->msix_vector_count);
 
        entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
@@ -1916,6 +1964,10 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
                        printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
                            MPT2SAS_INTEL_RMS2LL040_BRANDING);
                        break;
+               case MPT2SAS_INTEL_RAMSDALE_SSDID:
+                       printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+                           MPT2SAS_INTEL_RAMSDALE_BRANDING);
+                       break;
                default:
                        break;
                }
@@ -1925,6 +1977,22 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
                        printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
                            MPT2SAS_INTEL_RS25GB008_BRANDING);
                        break;
+               case MPT2SAS_INTEL_RMS25JB080_SSDID:
+                       printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+                           MPT2SAS_INTEL_RMS25JB080_BRANDING);
+                       break;
+               case MPT2SAS_INTEL_RMS25JB040_SSDID:
+                       printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+                           MPT2SAS_INTEL_RMS25JB040_BRANDING);
+                       break;
+               case MPT2SAS_INTEL_RMS25KB080_SSDID:
+                       printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+                           MPT2SAS_INTEL_RMS25KB080_BRANDING);
+                       break;
+               case MPT2SAS_INTEL_RMS25KB040_SSDID:
+                       printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+                           MPT2SAS_INTEL_RMS25KB040_BRANDING);
+                       break;
                default:
                        break;
                }
@@ -2311,8 +2379,6 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
                }
                if (ioc->chain_dma_pool)
                        pci_pool_destroy(ioc->chain_dma_pool);
-       }
-       if (ioc->chain_lookup) {
                free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
                ioc->chain_lookup = NULL;
        }
@@ -2330,9 +2396,7 @@ static int
 _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
 {
        struct mpt2sas_facts *facts;
-       u32 queue_size, queue_diff;
        u16 max_sge_elements;
-       u16 num_of_reply_frames;
        u16 chains_needed_per_io;
        u32 sz, total_sz, reply_post_free_sz;
        u32 retry_sz;
@@ -2359,7 +2423,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
                max_request_credit = (max_queue_depth < facts->RequestCredit)
                    ? max_queue_depth : facts->RequestCredit;
        else
-               max_request_credit = facts->RequestCredit;
+               max_request_credit = min_t(u16, facts->RequestCredit,
+                   MAX_HBA_QUEUE_DEPTH);
 
        ioc->hba_queue_depth = max_request_credit;
        ioc->hi_priority_depth = facts->HighPriorityCredit;
@@ -2400,50 +2465,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
        }
        ioc->chains_needed_per_io = chains_needed_per_io;
 
-       /* reply free queue sizing - taking into account for events */
-       num_of_reply_frames = ioc->hba_queue_depth + 32;
-
-       /* number of replies frames can't be a multiple of 16 */
-       /* decrease number of reply frames by 1 */
-       if (!(num_of_reply_frames % 16))
-               num_of_reply_frames--;
-
-       /* calculate number of reply free queue entries
-        *  (must be multiple of 16)
-        */
-
-       /* (we know reply_free_queue_depth is not a multiple of 16) */
-       queue_size = num_of_reply_frames;
-       queue_size += 16 - (queue_size % 16);
-       ioc->reply_free_queue_depth = queue_size;
-
-       /* reply descriptor post queue sizing */
-       /* this size should be the number of request frames + number of reply
-        * frames
-        */
-
-       queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
-       /* round up to 16 byte boundary */
-       if (queue_size % 16)
-               queue_size += 16 - (queue_size % 16);
-
-       /* check against IOC maximum reply post queue depth */
-       if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
-               queue_diff = queue_size -
-                   facts->MaxReplyDescriptorPostQueueDepth;
+       /* reply free queue sizing - taking into account for 64 FW events */
+       ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
 
-               /* round queue_diff up to multiple of 16 */
-               if (queue_diff % 16)
-                       queue_diff += 16 - (queue_diff % 16);
-
-               /* adjust hba_queue_depth, reply_free_queue_depth,
-                * and queue_size
-                */
-               ioc->hba_queue_depth -= (queue_diff / 2);
-               ioc->reply_free_queue_depth -= (queue_diff / 2);
-               queue_size = facts->MaxReplyDescriptorPostQueueDepth;
+       /* align the reply post queue on the next 16 count boundary */
+       if (!ioc->reply_free_queue_depth % 16)
+               ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
+       else
+               ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
+                               32 - (ioc->reply_free_queue_depth % 16);
+       if (ioc->reply_post_queue_depth >
+           facts->MaxReplyDescriptorPostQueueDepth) {
+               ioc->reply_post_queue_depth = min_t(u16,
+                   (facts->MaxReplyDescriptorPostQueueDepth -
+                   (facts->MaxReplyDescriptorPostQueueDepth % 16)),
+                   (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
+               ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
+               ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
        }
-       ioc->reply_post_queue_depth = queue_size;
+
 
        dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
            "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
@@ -2529,15 +2569,12 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
            "depth(%d)\n", ioc->name, ioc->request,
            ioc->scsiio_depth));
 
-       /* loop till the allocation succeeds */
-       do {
-               sz = ioc->chain_depth * sizeof(struct chain_tracker);
-               ioc->chain_pages = get_order(sz);
-               ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
-                   GFP_KERNEL, ioc->chain_pages);
-               if (ioc->chain_lookup == NULL)
-                       ioc->chain_depth -= 100;
-       } while (ioc->chain_lookup == NULL);
+       ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
+       sz = ioc->chain_depth * sizeof(struct chain_tracker);
+       ioc->chain_pages = get_order(sz);
+
+       ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
+           GFP_KERNEL, ioc->chain_pages);
        ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
            ioc->request_sz, 16, 0);
        if (!ioc->chain_dma_pool) {
@@ -3136,8 +3173,8 @@ mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
        if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
            mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
                ioc->ioc_link_reset_in_progress = 1;
-       mpt2sas_base_put_smid_default(ioc, smid);
        init_completion(&ioc->base_cmds.done);
+       mpt2sas_base_put_smid_default(ioc, smid);
        timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
            msecs_to_jiffies(10000));
        if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
@@ -3238,8 +3275,8 @@ mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
        request = mpt2sas_base_get_msg_frame(ioc, smid);
        ioc->base_cmds.smid = smid;
        memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
-       mpt2sas_base_put_smid_default(ioc, smid);
        init_completion(&ioc->base_cmds.done);
+       mpt2sas_base_put_smid_default(ioc, smid);
        timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
            msecs_to_jiffies(10000));
        if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
@@ -3746,8 +3783,8 @@ _base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
        for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
                mpi_request->EventMasks[i] =
                    cpu_to_le32(ioc->event_masks[i]);
-       mpt2sas_base_put_smid_default(ioc, smid);
        init_completion(&ioc->base_cmds.done);
+       mpt2sas_base_put_smid_default(ioc, smid);
        timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
        if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
                printk(MPT2SAS_ERR_FMT "%s: timeout\n",
@@ -4062,7 +4099,8 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
                ioc->reply_free[i] = cpu_to_le32(reply_address);
 
        /* initialize reply queues */
-       _base_assign_reply_queues(ioc);
+       if (ioc->is_driver_loading)
+               _base_assign_reply_queues(ioc);
 
        /* initialize Reply Post Free Queue */
        reply_post_free = (long)ioc->reply_post_free;
@@ -4110,24 +4148,17 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
 
 
        if (ioc->is_driver_loading) {
-
-
-
-               ioc->wait_for_discovery_to_complete =
-                   _base_determine_wait_on_discovery(ioc);
-               return r; /* scan_start and scan_finished support */
-       }
-
-
-       if (ioc->wait_for_discovery_to_complete && ioc->is_warpdrive) {
-               if (ioc->manu_pg10.OEMIdentifier  == 0x80) {
+               if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
+                   == 0x80) {
                        hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 &
                            MFG_PAGE10_HIDE_SSDS_MASK);
                        if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
                                ioc->mfg_pg10_hide_flag = hide_flag;
                }
+               ioc->wait_for_discovery_to_complete =
+                   _base_determine_wait_on_discovery(ioc);
+               return r; /* scan_start and scan_finished support */
        }
-
        r = _base_send_port_enable(ioc, sleep_flag);
        if (r)
                return r;
@@ -4206,7 +4237,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
 
        r = mpt2sas_base_map_resources(ioc);
        if (r)
-               return r;
+               goto out_free_resources;
 
        if (ioc->is_warpdrive) {
                ioc->reply_post_host_index[0] =
index 3c3babc7d260ef0eaeed4fd1ba18278d50ee47fe..c7459fdc06cc746895c83f7eb865ec3cf49d76ce 100644 (file)
@@ -69,8 +69,8 @@
 #define MPT2SAS_DRIVER_NAME            "mpt2sas"
 #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_DESCRIPTION    "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION         "10.100.00.00"
-#define MPT2SAS_MAJOR_VERSION          10
+#define MPT2SAS_DRIVER_VERSION         "12.100.00.00"
+#define MPT2SAS_MAJOR_VERSION          12
 #define MPT2SAS_MINOR_VERSION          100
 #define MPT2SAS_BUILD_VERSION          00
 #define MPT2SAS_RELEASE_VERSION                00
 /*
  * Intel HBA branding
  */
+#define MPT2SAS_INTEL_RMS25JB080_BRANDING    \
+                               "Intel(R) Integrated RAID Module RMS25JB080"
+#define MPT2SAS_INTEL_RMS25JB040_BRANDING    \
+                               "Intel(R) Integrated RAID Module RMS25JB040"
+#define MPT2SAS_INTEL_RMS25KB080_BRANDING    \
+                               "Intel(R) Integrated RAID Module RMS25KB080"
+#define MPT2SAS_INTEL_RMS25KB040_BRANDING    \
+                               "Intel(R) Integrated RAID Module RMS25KB040"
 #define MPT2SAS_INTEL_RMS2LL080_BRANDING       \
                                "Intel Integrated RAID Module RMS2LL080"
 #define MPT2SAS_INTEL_RMS2LL040_BRANDING       \
                                "Intel Integrated RAID Module RMS2LL040"
 #define MPT2SAS_INTEL_RS25GB008_BRANDING       \
                                "Intel(R) RAID Controller RS25GB008"
-
+#define MPT2SAS_INTEL_RAMSDALE_BRANDING        \
+                               "Intel 720 Series SSD"
 /*
  * Intel HBA SSDIDs
  */
+#define MPT2SAS_INTEL_RMS25JB080_SSDID         0x3516
+#define MPT2SAS_INTEL_RMS25JB040_SSDID         0x3517
+#define MPT2SAS_INTEL_RMS25KB080_SSDID         0x3518
+#define MPT2SAS_INTEL_RMS25KB040_SSDID         0x3519
 #define MPT2SAS_INTEL_RMS2LL080_SSDID          0x350E
 #define MPT2SAS_INTEL_RMS2LL040_SSDID          0x350F
 #define MPT2SAS_INTEL_RS25GB008_SSDID          0x3000
-
+#define MPT2SAS_INTEL_RAMSDALE_SSDID           0x3700
 
 /*
  * HP HBA branding
@@ -373,6 +386,7 @@ struct _sas_device {
  * @percent_complete: resync percent complete
  * @direct_io_enabled: Whether direct io to PDs are allowed or not
  * @stripe_exponent: X where 2powX is the stripe sz in blocks
+ * @block_exponent: X where 2powX is the block sz in bytes
  * @max_lba: Maximum number of LBA in the volume
  * @stripe_sz: Stripe Size of the volume
  * @device_info: Device info of the volume member disk
@@ -394,6 +408,7 @@ struct _raid_device {
        u8      percent_complete;
        u8      direct_io_enabled;
        u8      stripe_exponent;
+       u8      block_exponent;
        u64     max_lba;
        u32     stripe_sz;
        u32     device_info;
@@ -623,6 +638,7 @@ enum mutex_type {
        TM_MUTEX_ON = 1,
 };
 
+typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
 /**
  * struct MPT2SAS_ADAPTER - per adapter struct
  * @list: ioc_list
@@ -665,6 +681,7 @@ enum mutex_type {
  * @msix_vector_count: number msix vectors
  * @cpu_msix_table: table for mapping cpus to msix index
  * @cpu_msix_table_sz: table size
+ * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
  * @scsi_io_cb_idx: shost generated commands
  * @tm_cb_idx: task management commands
  * @scsih_cb_idx: scsih internal commands
@@ -816,6 +833,7 @@ struct MPT2SAS_ADAPTER {
        resource_size_t **reply_post_host_index;
        u16             cpu_msix_table_sz;
        u32             ioc_reset_count;
+       MPT2SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
 
        /* internal commands, callback index */
        u8              scsi_io_cb_idx;
index aabcb911706e11155bb886ec420051b9edb04e36..7fceb899029ed990b7b19dde1c22491ad29a4dc5 100644 (file)
@@ -818,6 +818,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
        _ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
 #endif
 
+       init_completion(&ioc->ctl_cmds.done);
        switch (mpi_request->Function) {
        case MPI2_FUNCTION_SCSI_IO_REQUEST:
        case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
@@ -903,7 +904,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
                timeout = MPT2_IOCTL_DEFAULT_TIMEOUT;
        else
                timeout = karg.timeout;
-       init_completion(&ioc->ctl_cmds.done);
        timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
            timeout*HZ);
        if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
@@ -1477,8 +1477,8 @@ _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc,
                mpi_request->ProductSpecific[i] =
                        cpu_to_le32(ioc->product_specific[buffer_type][i]);
 
-       mpt2sas_base_put_smid_default(ioc, smid);
        init_completion(&ioc->ctl_cmds.done);
+       mpt2sas_base_put_smid_default(ioc, smid);
        timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
            MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
 
@@ -1821,8 +1821,8 @@ _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset)
        mpi_request->VF_ID = 0; /* TODO */
        mpi_request->VP_ID = 0;
 
-       mpt2sas_base_put_smid_default(ioc, smid);
        init_completion(&ioc->ctl_cmds.done);
+       mpt2sas_base_put_smid_default(ioc, smid);
        timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
            MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
 
@@ -2095,8 +2095,8 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
        mpi_request->VF_ID = 0; /* TODO */
        mpi_request->VP_ID = 0;
 
-       mpt2sas_base_put_smid_default(ioc, smid);
        init_completion(&ioc->ctl_cmds.done);
+       mpt2sas_base_put_smid_default(ioc, smid);
        timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
            MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
 
index d570573b7963ec47179d15ec7ac613221f0ce4bc..193e33e28e497522a183d18f3336198a8cbd2b74 100644 (file)
@@ -99,7 +99,7 @@ MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info "
 
 static ushort max_sectors = 0xFFFF;
 module_param(max_sectors, ushort, 0);
-MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 8192  default=8192");
+MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
 
 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
 #define MPT2SAS_MAX_LUN (16895)
@@ -612,13 +612,17 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
        if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
             sas_device->sas_address_parent)) {
                _scsih_sas_device_remove(ioc, sas_device);
-               } else if (!sas_device->starget) {
-                       if (!ioc->is_driver_loading)
-                               mpt2sas_transport_port_remove(ioc,
-                               sas_device->sas_address,
-                           sas_device->sas_address_parent);
-                       _scsih_sas_device_remove(ioc, sas_device);
-               }
+       } else if (!sas_device->starget) {
+               /* When asyn scanning is enabled, its not possible to remove
+                * devices while scanning is turned on due to an oops in
+                * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
+                */
+               if (!ioc->is_driver_loading)
+                       mpt2sas_transport_port_remove(ioc,
+                       sas_device->sas_address,
+                       sas_device->sas_address_parent);
+               _scsih_sas_device_remove(ioc, sas_device);
+       }
 }
 
 /**
@@ -1007,8 +1011,8 @@ _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
        if (list_empty(&ioc->free_chain_list)) {
                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-               printk(MPT2SAS_WARN_FMT "chain buffers not available\n",
-                   ioc->name);
+               dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not "
+                       "available\n", ioc->name));
                return NULL;
        }
        chain_req = list_entry(ioc->free_chain_list.next,
@@ -1449,7 +1453,7 @@ _scsih_slave_destroy(struct scsi_device *sdev)
                spin_lock_irqsave(&ioc->sas_device_lock, flags);
                sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
                   sas_target_priv_data->sas_address);
-               if (sas_device)
+               if (sas_device && !sas_target_priv_data->num_luns)
                        sas_device->starget = NULL;
                spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
        }
@@ -1776,11 +1780,9 @@ _scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc,
        Mpi2ConfigReply_t mpi_reply;
        u16 sz;
        u8 num_pds, count;
-       u64 mb = 1024 * 1024;
-       u64 tb_2 = 2 * mb * mb;
-       u64 capacity;
-       u32 stripe_sz;
-       u8 i, stripe_exp;
+       unsigned long stripe_sz, block_sz;
+       u8 stripe_exp, block_exp;
+       u64 dev_max_lba;
 
        if (!ioc->is_warpdrive)
                return;
@@ -1844,51 +1846,57 @@ _scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc,
                            vol_pg0->PhysDisk[count].PhysDiskNum);
                        goto out_error;
                }
+               /* Disable direct I/O if member drive lba exceeds 4 bytes */
+               dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA);
+               if (dev_max_lba >> 32) {
+                       printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is "
+                           "disabled for the drive with handle(0x%04x) member"
+                           "handle (0x%04x) unsupported max lba 0x%016llx\n",
+                           ioc->name, raid_device->handle,
+                           le16_to_cpu(pd_pg0.DevHandle),
+                           (unsigned long long)dev_max_lba);
+                       goto out_error;
+               }
+
                raid_device->pd_handle[count] = le16_to_cpu(pd_pg0.DevHandle);
        }
 
        /*
         * Assumption for WD: Direct I/O is not supported if the volume is
-        * not RAID0, if the stripe size is not 64KB, if the block size is
-        * not 512 and if the volume size is >2TB
+        * not RAID0
         */
-       if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0 ||
-           le16_to_cpu(vol_pg0->BlockSize) != 512) {
+       if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) {
                printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
                    "for the drive with handle(0x%04x): type=%d, "
                    "s_sz=%uK, blk_size=%u\n", ioc->name,
                    raid_device->handle, raid_device->volume_type,
-                   le32_to_cpu(vol_pg0->StripeSize)/2,
+                   (le32_to_cpu(vol_pg0->StripeSize) *
+                   le16_to_cpu(vol_pg0->BlockSize)) / 1024,
                    le16_to_cpu(vol_pg0->BlockSize));
                goto out_error;
        }
 
-       capacity = (u64) le16_to_cpu(vol_pg0->BlockSize) *
-           (le64_to_cpu(vol_pg0->MaxLBA) + 1);
-
-       if (capacity > tb_2) {
+       stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
+       stripe_exp = find_first_bit(&stripe_sz, 32);
+       if (stripe_exp == 32) {
                printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
-               "for the drive with handle(0x%04x) since drive sz > 2TB\n",
-               ioc->name, raid_device->handle);
+               "for the drive with handle(0x%04x) invalid stripe sz %uK\n",
+                   ioc->name, raid_device->handle,
+                   (le32_to_cpu(vol_pg0->StripeSize) *
+                   le16_to_cpu(vol_pg0->BlockSize)) / 1024);
                goto out_error;
        }
-
-       stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
-       stripe_exp = 0;
-       for (i = 0; i < 32; i++) {
-               if (stripe_sz & 1)
-                       break;
-               stripe_exp++;
-               stripe_sz >>= 1;
-       }
-       if (i == 32) {
+       raid_device->stripe_exponent = stripe_exp;
+       block_sz = le16_to_cpu(vol_pg0->BlockSize);
+       block_exp = find_first_bit(&block_sz, 16);
+       if (block_exp == 16) {
                printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
-                   "for the drive with handle(0x%04x) invalid stripe sz %uK\n",
+                   "for the drive with handle(0x%04x) invalid block sz %u\n",
                    ioc->name, raid_device->handle,
-                   le32_to_cpu(vol_pg0->StripeSize)/2);
+                   le16_to_cpu(vol_pg0->BlockSize));
                goto out_error;
        }
-       raid_device->stripe_exponent = stripe_exp;
+       raid_device->block_exponent = block_exp;
        raid_device->direct_io_enabled = 1;
 
        printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is Enabled for the drive"
@@ -3804,8 +3812,9 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
 {
        u32 v_lba, p_lba, stripe_off, stripe_unit, column, io_size;
        u32 stripe_sz, stripe_exp;
-       u8 num_pds, *cdb_ptr, *tmp_ptr, *lba_ptr1, *lba_ptr2;
+       u8 num_pds, *cdb_ptr, i;
        u8 cdb0 = scmd->cmnd[0];
+       u64 v_llba;
 
        /*
         * Try Direct I/O to RAID memeber disks
@@ -3816,15 +3825,11 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
 
                if ((cdb0 < READ_16) || !(cdb_ptr[2] | cdb_ptr[3] | cdb_ptr[4]
                        | cdb_ptr[5])) {
-                       io_size = scsi_bufflen(scmd) >> 9;
+                       io_size = scsi_bufflen(scmd) >>
+                           raid_device->block_exponent;
+                       i = (cdb0 < READ_16) ? 2 : 6;
                        /* get virtual lba */
-                       lba_ptr1 = lba_ptr2 = (cdb0 < READ_16) ? &cdb_ptr[2] :
-                           &cdb_ptr[6];
-                       tmp_ptr = (u8 *)&v_lba + 3;
-                       *tmp_ptr-- = *lba_ptr1++;
-                       *tmp_ptr-- = *lba_ptr1++;
-                       *tmp_ptr-- = *lba_ptr1++;
-                       *tmp_ptr = *lba_ptr1;
+                       v_lba = be32_to_cpu(*(__be32 *)(&cdb_ptr[i]));
 
                        if (((u64)v_lba + (u64)io_size - 1) <=
                            (u32)raid_device->max_lba) {
@@ -3843,11 +3848,39 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
                                        mpi_request->DevHandle =
                                                cpu_to_le16(raid_device->
                                                    pd_handle[column]);
-                                       tmp_ptr = (u8 *)&p_lba + 3;
-                                       *lba_ptr2++ = *tmp_ptr--;
-                                       *lba_ptr2++ = *tmp_ptr--;
-                                       *lba_ptr2++ = *tmp_ptr--;
-                                       *lba_ptr2 = *tmp_ptr;
+                                       (*(__be32 *)(&cdb_ptr[i])) =
+                                               cpu_to_be32(p_lba);
+                                       /*
+                                       * WD: To indicate this I/O is directI/O
+                                       */
+                                       _scsih_scsi_direct_io_set(ioc, smid, 1);
+                               }
+                       }
+               } else {
+                       io_size = scsi_bufflen(scmd) >>
+                           raid_device->block_exponent;
+                       /* get virtual lba */
+                       v_llba = be64_to_cpu(*(__be64 *)(&cdb_ptr[2]));
+
+                       if ((v_llba + (u64)io_size - 1) <=
+                           raid_device->max_lba) {
+                               stripe_sz = raid_device->stripe_sz;
+                               stripe_exp = raid_device->stripe_exponent;
+                               stripe_off = (u32) (v_llba & (stripe_sz - 1));
+
+                               /* Check whether IO falls within a stripe */
+                               if ((stripe_off + io_size) <= stripe_sz) {
+                                       num_pds = raid_device->num_pds;
+                                       p_lba = (u32)(v_llba >> stripe_exp);
+                                       stripe_unit = p_lba / num_pds;
+                                       column = p_lba % num_pds;
+                                       p_lba = (stripe_unit << stripe_exp) +
+                                           stripe_off;
+                                       mpi_request->DevHandle =
+                                               cpu_to_le16(raid_device->
+                                                   pd_handle[column]);
+                                       (*(__be64 *)(&cdb_ptr[2])) =
+                                           cpu_to_be64((u64)p_lba);
                                        /*
                                        * WD: To indicate this I/O is directI/O
                                        */
@@ -4403,11 +4436,14 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
                scmd->result = DID_NO_CONNECT << 16;
                goto out;
        }
+       ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
        /*
         * WARPDRIVE: If direct_io is set then it is directIO,
         * the failed direct I/O should be redirected to volume
         */
-       if (_scsih_scsi_direct_io_get(ioc, smid)) {
+       if (_scsih_scsi_direct_io_get(ioc, smid) &&
+           ((ioc_status & MPI2_IOCSTATUS_MASK)
+           != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
                spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
                ioc->scsi_lookup[smid - 1].scmd = scmd;
                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
@@ -4441,7 +4477,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
 
        xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
        scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
-       ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
        if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
                log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
        else
@@ -4485,6 +4520,8 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
                        scmd->result = DID_TRANSPORT_DISRUPTED << 16;
                        goto out;
                }
+               scmd->result = DID_SOFT_ERROR << 16;
+               break;
        case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
        case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
                scmd->result = DID_RESET << 16;
@@ -6714,6 +6751,7 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
                        } else
                                sas_target_priv_data = NULL;
                        raid_device->responding = 1;
+                       spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
                        starget_printk(KERN_INFO, raid_device->starget,
                            "handle(0x%04x), wwid(0x%016llx)\n", handle,
                            (unsigned long long)raid_device->wwid);
@@ -6724,16 +6762,16 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
                         */
                        _scsih_init_warpdrive_properties(ioc, raid_device);
                        if (raid_device->handle == handle)
-                               goto out;
+                               return;
                        printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
                            raid_device->handle);
                        raid_device->handle = handle;
                        if (sas_target_priv_data)
                                sas_target_priv_data->handle = handle;
-                       goto out;
+                       return;
                }
        }
- out:
+
        spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 }
 
@@ -7418,7 +7456,7 @@ static struct scsi_host_template scsih_driver_template = {
        .can_queue                      = 1,
        .this_id                        = -1,
        .sg_tablesize                   = MPT2SAS_SG_DEPTH,
-       .max_sectors                    = 8192,
+       .max_sectors                    = 32767,
        .cmd_per_lun                    = 7,
        .use_clustering                 = ENABLE_CLUSTERING,
        .shost_attrs                    = mpt2sas_host_attrs,
@@ -7928,6 +7966,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
        ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
        ioc->logging_level = logging_level;
+       ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
        /* misc semaphores and spin locks */
        mutex_init(&ioc->reset_in_progress_mutex);
        spin_lock_init(&ioc->ioc_reset_in_progress_lock);
@@ -7958,11 +7997,11 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                        printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
                            "for max_sectors, range is 64 to 8192. Assigning "
                            "value of 64.\n", ioc->name, max_sectors);
-               } else if (max_sectors > 8192) {
-                       shost->max_sectors = 8192;
+               } else if (max_sectors > 32767) {
+                       shost->max_sectors = 32767;
                        printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
                            "for max_sectors, range is 64 to 8192. Assigning "
-                           "default value of 8192.\n", ioc->name,
+                           "default value of 32767.\n", ioc->name,
                            max_sectors);
                } else {
                        shost->max_sectors = max_sectors & 0xFFFE;
@@ -8000,7 +8039,6 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto out_attach_fail;
        }
 
-       scsi_scan_host(shost);
        if (ioc->is_warpdrive) {
                if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS)
                        ioc->hide_drives = 0;
@@ -8014,8 +8052,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                }
        } else
                ioc->hide_drives = 0;
+       scsi_scan_host(shost);
 
-       _scsih_probe_devices(ioc);
        return 0;
 
  out_attach_fail:
index 230732241aa2ff86b4de8ec6aa953351f4a1d9ed..831047466a5a9865f0f8da94672dd9f449546eec 100644 (file)
@@ -398,8 +398,8 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
        dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "report_manufacture - "
            "send to sas_addr(0x%016llx)\n", ioc->name,
            (unsigned long long)sas_address));
-       mpt2sas_base_put_smid_default(ioc, smid);
        init_completion(&ioc->transport_cmds.done);
+       mpt2sas_base_put_smid_default(ioc, smid);
        timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
            10*HZ);
 
@@ -1184,8 +1184,8 @@ _transport_get_expander_phy_error_log(struct MPT2SAS_ADAPTER *ioc,
        dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_error_log - "
            "send to sas_addr(0x%016llx), phy(%d)\n", ioc->name,
            (unsigned long long)phy->identify.sas_address, phy->number));
-       mpt2sas_base_put_smid_default(ioc, smid);
        init_completion(&ioc->transport_cmds.done);
+       mpt2sas_base_put_smid_default(ioc, smid);
        timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
            10*HZ);
 
@@ -1509,8 +1509,9 @@ _transport_expander_phy_control(struct MPT2SAS_ADAPTER *ioc,
            "send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", ioc->name,
            (unsigned long long)phy->identify.sas_address, phy->number,
            phy_operation));
-       mpt2sas_base_put_smid_default(ioc, smid);
+
        init_completion(&ioc->transport_cmds.done);
+       mpt2sas_base_put_smid_default(ioc, smid);
        timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
            10*HZ);
 
@@ -1949,8 +1950,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - "
            "sending smp request\n", ioc->name, __func__));
 
-       mpt2sas_base_put_smid_default(ioc, smid);
        init_completion(&ioc->transport_cmds.done);
+       mpt2sas_base_put_smid_default(ioc, smid);
        timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
            10*HZ);
 
index 6465dae5883a9dcb44ddde60b9c5f6f98fafd89b..a2f1b3043dfbf88dfb2d3d491c8357aa6fc07d51 100644 (file)
@@ -107,7 +107,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
                        set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
                break;
        }
-       return -EINVAL;
+       return count;
 }
 
 static struct bin_attribute sysfs_fw_dump_attr = {
@@ -387,7 +387,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
                break;
        case 3:
                if (ha->optrom_state != QLA_SWRITING)
-                       return -ENOMEM;
+                       return -EINVAL;
 
                if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
                        ql_log(ql_log_warn, vha, 0x7068,
@@ -667,7 +667,7 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
            dev, adr, len, opt);
        if (rval != QLA_SUCCESS) {
                ql_log(ql_log_warn, vha, 0x7074,
-                   "Unable to write EDC (%x) %02x:%04x:%02x:%02hhx\n",
+                   "Unable to write EDC (%x) %02x:%04x:%02x:%02x:%02hhx\n",
                    rval, dev, adr, opt, len, buf[8]);
                return -EIO;
        }
@@ -724,7 +724,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
                        dev, adr, len, opt);
        if (rval != QLA_SUCCESS) {
                ql_log(ql_log_info, vha, 0x7075,
-                   "Unable to write EDC status (%x) %02x:%04x:%02x.\n",
+                   "Unable to write EDC status (%x) %02x:%04x:%02x:%02x.\n",
                    rval, dev, adr, opt, len);
                return -EIO;
        }
@@ -1971,8 +1971,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
                            "Queue delete failed.\n");
        }
 
-       scsi_host_put(vha->host);
        ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
+       scsi_host_put(vha->host);
        return 0;
 }
 
index 8b641a8a0c74aed39307c73036de328e9cd11fee..b1d0f936bf2d9530391ec532594a52aee3a154d2 100644 (file)
@@ -31,6 +31,7 @@ qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
        memset(sp, 0, sizeof(*sp));
        sp->fcport = fcport;
        sp->ctx = ctx;
+       ctx->iocbs = 1;
 done:
        return sp;
 }
@@ -102,7 +103,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
 
        bsg_job->reply->reply_payload_rcv_len = 0;
 
-       if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))) {
+       if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
                ret = -EINVAL;
                goto exit_fcp_prio_cfg;
        }
@@ -389,6 +390,20 @@ done:
        return rval;
 }
 
+inline uint16_t
+qla24xx_calc_ct_iocbs(uint16_t dsds)
+{
+       uint16_t iocbs;
+
+       iocbs = 1;
+       if (dsds > 2) {
+               iocbs += (dsds - 2) / 5;
+               if ((dsds - 2) % 5)
+                       iocbs++;
+       }
+       return iocbs;
+}
+
 static int
 qla2x00_process_ct(struct fc_bsg_job *bsg_job)
 {
@@ -489,6 +504,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
        ct = sp->ctx;
        ct->type = SRB_CT_CMD;
        ct->name = "bsg_ct";
+       ct->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
        ct->u.bsg_job = bsg_job;
 
        ql_dbg(ql_dbg_user, vha, 0x7016,
@@ -1653,7 +1669,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
        }
 
        ql_dbg(ql_dbg_user, vha, 0x7000,
-           "Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode);
+           "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
 
        switch (bsg_job->request->msgcode) {
        case FC_BSG_RPT_ELS:
index f3cddd5800c307e219bb8a8c6a84db569117df03..7c54624b5b1327216c2a7e435a868b91264b7c99 100644 (file)
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes     |
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x0116       |               |
+ * | Module Init and Probe        |       0x0116       | 0xfa           |
  * | Mailbox commands             |       0x112b       |               |
- * | Device Discovery             |       0x2083       |               |
- * | Queue Command and IO tracing |       0x302e       |     0x3008     |
+ * | Device Discovery             |       0x2084       |               |
+ * | Queue Command and IO tracing |       0x302f       | 0x3008,0x302d, |
+ * |                              |                    | 0x302e         |
  * | DPC Thread                   |       0x401c       |               |
- * | Async Events                 |       0x5059       |               |
- * | Timer Routines               |       0x6010       | 0x600e,0x600f  |
- * | User Space Interactions      |       0x709d       |               |
- * | Task Management              |       0x8041       | 0x800b         |
+ * | Async Events                 |       0x5057       | 0x5052                |
+ * | Timer Routines               |       0x6011       | 0x600e,0x600f  |
+ * | User Space Interactions      |       0x709e       |               |
+ * | Task Management              |       0x803c       | 0x8025-0x8026  |
+ * |                              |                    | 0x800b,0x8039  |
  * | AER/EEH                      |       0x900f       |               |
  * | Virtual Port                 |       0xa007       |               |
  * | ISP82XX Specific             |       0xb052       |               |
@@ -368,7 +370,7 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
 
        memcpy(iter_reg, ha->fce, ntohl(fcec->size));
 
-       return iter_reg;
+       return (char *)iter_reg + ntohl(fcec->size);
 }
 
 static inline void *
@@ -1650,6 +1652,15 @@ qla81xx_fw_dump_failed:
 /****************************************************************************/
 /*                         Driver Debug Functions.                          */
 /****************************************************************************/
+
+static inline int
+ql_mask_match(uint32_t level)
+{
+       if (ql2xextended_error_logging == 1)
+               ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
+       return (level & ql2xextended_error_logging) == level;
+}
+
 /*
  * This function is for formatting and logging debug information.
  * It is to be used when vha is available. It formats the message
@@ -1664,34 +1675,31 @@ qla81xx_fw_dump_failed:
  * msg:   The message to be displayed.
  */
 void
-ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
-
-       char pbuf[QL_DBG_BUF_LEN];
-       va_list ap;
-       uint32_t len;
-       struct pci_dev *pdev = NULL;
-
-       memset(pbuf, 0, QL_DBG_BUF_LEN);
-
-       va_start(ap, msg);
-
-       if ((level & ql2xextended_error_logging) == level) {
-               if (vha != NULL) {
-                       pdev = vha->hw->pdev;
-                       /* <module-name> <pci-name> <msg-id>:<host> Message */
-                       sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
-                           dev_name(&(pdev->dev)), id + ql_dbg_offset,
-                           vha->host_no);
-               } else
-                       sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
-                           "0000:00:00.0", id + ql_dbg_offset);
-
-               len = strlen(pbuf);
-               vsprintf(pbuf+len, msg, ap);
-               pr_warning("%s", pbuf);
+ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+{
+       va_list va;
+       struct va_format vaf;
+
+       if (!ql_mask_match(level))
+               return;
+
+       va_start(va, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &va;
+
+       if (vha != NULL) {
+               const struct pci_dev *pdev = vha->hw->pdev;
+               /* <module-name> <pci-name> <msg-id>:<host> Message */
+               pr_warn("%s [%s]-%04x:%ld: %pV",
+                       QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
+                       vha->host_no, &vaf);
+       } else {
+               pr_warn("%s [%s]-%04x: : %pV",
+                       QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
        }
 
-       va_end(ap);
+       va_end(va);
 
 }
 
@@ -1710,31 +1718,27 @@ ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
  * msg:   The message to be displayed.
  */
 void
-ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
-
-       char pbuf[QL_DBG_BUF_LEN];
-       va_list ap;
-       uint32_t len;
+ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
+          const char *fmt, ...)
+{
+       va_list va;
+       struct va_format vaf;
 
        if (pdev == NULL)
                return;
+       if (!ql_mask_match(level))
+               return;
 
-       memset(pbuf, 0, QL_DBG_BUF_LEN);
-
-       va_start(ap, msg);
-
-       if ((level & ql2xextended_error_logging) == level) {
-               /* <module-name> <dev-name>:<msg-id> Message */
-               sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
-                   dev_name(&(pdev->dev)), id + ql_dbg_offset);
+       va_start(va, fmt);
 
-               len = strlen(pbuf);
-               vsprintf(pbuf+len, msg, ap);
-               pr_warning("%s", pbuf);
-       }
+       vaf.fmt = fmt;
+       vaf.va = &va;
 
-       va_end(ap);
+       /* <module-name> <dev-name>:<msg-id> Message */
+       pr_warn("%s [%s]-%04x: : %pV",
+               QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
 
+       va_end(va);
 }
 
 /*
@@ -1751,47 +1755,47 @@ ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
  * msg:   The message to be displayed.
  */
 void
-ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
-
-       char pbuf[QL_DBG_BUF_LEN];
-       va_list ap;
-       uint32_t len;
-       struct pci_dev *pdev = NULL;
-
-       memset(pbuf, 0, QL_DBG_BUF_LEN);
-
-       va_start(ap, msg);
-
-       if (level <= ql_errlev) {
-               if (vha != NULL) {
-                       pdev = vha->hw->pdev;
-                       /* <module-name> <msg-id>:<host> Message */
-                       sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
-                           dev_name(&(pdev->dev)), id, vha->host_no);
-               } else
-                       sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
-                           "0000:00:00.0", id);
+ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+{
+       va_list va;
+       struct va_format vaf;
+       char pbuf[128];
 
-               len = strlen(pbuf);
-                       vsprintf(pbuf+len, msg, ap);
+       if (level > ql_errlev)
+               return;
 
-               switch (level) {
-               case 0: /* FATAL LOG */
-                       pr_crit("%s", pbuf);
-                       break;
-               case 1:
-                       pr_err("%s", pbuf);
-                       break;
-               case 2:
-                       pr_warn("%s", pbuf);
-                       break;
-               default:
-                       pr_info("%s", pbuf);
-                       break;
-               }
+       if (vha != NULL) {
+               const struct pci_dev *pdev = vha->hw->pdev;
+               /* <module-name> <msg-id>:<host> Message */
+               snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
+                       QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
+       } else {
+               snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+                       QL_MSGHDR, "0000:00:00.0", id);
+       }
+       pbuf[sizeof(pbuf) - 1] = 0;
+
+       va_start(va, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &va;
+
+       switch (level) {
+       case 0: /* FATAL LOG */
+               pr_crit("%s%pV", pbuf, &vaf);
+               break;
+       case 1:
+               pr_err("%s%pV", pbuf, &vaf);
+               break;
+       case 2:
+               pr_warn("%s%pV", pbuf, &vaf);
+               break;
+       default:
+               pr_info("%s%pV", pbuf, &vaf);
+               break;
        }
 
-       va_end(ap);
+       va_end(va);
 }
 
 /*
@@ -1809,43 +1813,44 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
  * msg:   The message to be displayed.
  */
 void
-ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
-
-       char pbuf[QL_DBG_BUF_LEN];
-       va_list ap;
-       uint32_t len;
+ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
+          const char *fmt, ...)
+{
+       va_list va;
+       struct va_format vaf;
+       char pbuf[128];
 
        if (pdev == NULL)
                return;
+       if (level > ql_errlev)
+               return;
 
-       memset(pbuf, 0, QL_DBG_BUF_LEN);
-
-       va_start(ap, msg);
-
-       if (level <= ql_errlev) {
-               /* <module-name> <dev-name>:<msg-id> Message */
-               sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
-                   dev_name(&(pdev->dev)), id);
-
-               len = strlen(pbuf);
-               vsprintf(pbuf+len, msg, ap);
-               switch (level) {
-               case 0: /* FATAL LOG */
-                       pr_crit("%s", pbuf);
-                       break;
-               case 1:
-                       pr_err("%s", pbuf);
-                       break;
-               case 2:
-                       pr_warn("%s", pbuf);
-                       break;
-               default:
-                       pr_info("%s", pbuf);
-                       break;
-               }
+       /* <module-name> <dev-name>:<msg-id> Message */
+       snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+                QL_MSGHDR, dev_name(&(pdev->dev)), id);
+       pbuf[sizeof(pbuf) - 1] = 0;
+
+       va_start(va, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &va;
+
+       switch (level) {
+       case 0: /* FATAL LOG */
+               pr_crit("%s%pV", pbuf, &vaf);
+               break;
+       case 1:
+               pr_err("%s%pV", pbuf, &vaf);
+               break;
+       case 2:
+               pr_warn("%s%pV", pbuf, &vaf);
+               break;
+       default:
+               pr_info("%s%pV", pbuf, &vaf);
+               break;
        }
 
-       va_end(ap);
+       va_end(va);
 }
 
 void
@@ -1858,20 +1863,20 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
        struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
        uint16_t __iomem *mbx_reg;
 
-       if ((level & ql2xextended_error_logging) == level) {
-
-               if (IS_QLA82XX(ha))
-                       mbx_reg = &reg82->mailbox_in[0];
-               else if (IS_FWI2_CAPABLE(ha))
-                       mbx_reg = &reg24->mailbox0;
-               else
-                       mbx_reg = MAILBOX_REG(ha, reg, 0);
+       if (!ql_mask_match(level))
+               return;
 
-               ql_dbg(level, vha, id, "Mailbox registers:\n");
-               for (i = 0; i < 6; i++)
-                       ql_dbg(level, vha, id,
-                           "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
-       }
+       if (IS_QLA82XX(ha))
+               mbx_reg = &reg82->mailbox_in[0];
+       else if (IS_FWI2_CAPABLE(ha))
+               mbx_reg = &reg24->mailbox0;
+       else
+               mbx_reg = MAILBOX_REG(ha, reg, 0);
+
+       ql_dbg(level, vha, id, "Mailbox registers:\n");
+       for (i = 0; i < 6; i++)
+               ql_dbg(level, vha, id,
+                   "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
 }
 
 
@@ -1881,24 +1886,25 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
 {
        uint32_t cnt;
        uint8_t c;
-       if ((level & ql2xextended_error_logging) == level) {
-
-               ql_dbg(level, vha, id, " 0   1   2   3   4   5   6   7   8   "
-                   "9  Ah  Bh  Ch  Dh  Eh  Fh\n");
-               ql_dbg(level, vha, id, "----------------------------------"
-                   "----------------------------\n");
-
-               ql_dbg(level, vha, id, "");
-               for (cnt = 0; cnt < size;) {
-                       c = *b++;
-                       printk("%02x", (uint32_t) c);
-                       cnt++;
-                       if (!(cnt % 16))
-                               printk("\n");
-                       else
-                               printk("  ");
-               }
-               if (cnt % 16)
-                       ql_dbg(level, vha, id, "\n");
+
+       if (!ql_mask_match(level))
+               return;
+
+       ql_dbg(level, vha, id, " 0   1   2   3   4   5   6   7   8   "
+           "9  Ah  Bh  Ch  Dh  Eh  Fh\n");
+       ql_dbg(level, vha, id, "----------------------------------"
+           "----------------------------\n");
+
+       ql_dbg(level, vha, id, " ");
+       for (cnt = 0; cnt < size;) {
+               c = *b++;
+               printk("%02x", (uint32_t) c);
+               cnt++;
+               if (!(cnt % 16))
+                       printk("\n");
+               else
+                       printk("  ");
        }
+       if (cnt % 16)
+               ql_dbg(level, vha, id, "\n");
 }
index 98a377b990173503fcdc2c982f4ad68bb3efc222..5f1b6d9c3dcbac432546039629355973ccc3b52f 100644 (file)
@@ -232,6 +232,7 @@ struct qla2xxx_fw_dump {
 };
 
 #define QL_MSGHDR "qla2xxx"
+#define QL_DBG_DEFAULT1_MASK    0x1e400000
 
 #define ql_log_fatal           0 /* display fatal errors */
 #define ql_log_warn            1 /* display critical errors */
@@ -244,15 +245,15 @@ struct qla2xxx_fw_dump {
 
 extern int ql_errlev;
 
-void
-ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
-void
-ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
 
-void
-ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
-void
-ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
 
 /* Debug Levels */
 /* The 0x40000000 is the max value any debug level can have
@@ -275,5 +276,3 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
 #define ql_dbg_misc    0x00010000 /* For dumping everything that is not
                                    * not covered by upper categories
                                    */
-
-#define QL_DBG_BUF_LEN 512
index fcf052c50bf585db11d73d46a3ad43a06e5a09e3..a6a4eebce4a889c5bfc02c6e5420a469b2517f6f 100644 (file)
@@ -271,6 +271,7 @@ struct srb_iocb {
 struct srb_ctx {
        uint16_t type;
        char *name;
+       int iocbs;
        union {
                struct srb_iocb *iocb_cmd;
                struct fc_bsg_job *bsg_job;
@@ -2244,6 +2245,7 @@ struct isp_operations {
        int (*get_flash_version) (struct scsi_qla_host *, void *);
        int (*start_scsi) (srb_t *);
        int (*abort_isp) (struct scsi_qla_host *);
+       int (*iospace_config)(struct qla_hw_data*);
 };
 
 /* MSI-X Support *************************************************************/
@@ -2978,10 +2980,6 @@ typedef struct scsi_qla_host {
        atomic_dec(&__vha->vref_count);                      \
 } while (0)
 
-
-#define qla_printk(level, ha, format, arg...) \
-       dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
-
 /*
  * qla2x00 local function return status codes
  */
index c0c11afb685c450e473faaaf711b00d8ec0d7d82..408679be8fdfa2d25c25ea6f04a430c14bbcbf38 100644 (file)
@@ -572,7 +572,7 @@ extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
     size_t, char *);
 extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
 extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
-extern void qla82xx_start_iocbs(srb_t *);
+extern void qla82xx_start_iocbs(scsi_qla_host_t *);
 extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
 extern int qla82xx_check_md_needed(scsi_qla_host_t *);
 extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
index 37937aa3c3b8dc46a150137547314c5eeaadff9c..4aea4ae2330006b1cf180f0c6256a705019f2014 100644 (file)
@@ -758,7 +758,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
                    "GA_NXT Send SNS failed (%d).\n", rval);
        } else if (sns_cmd->p.gan_data[8] != 0x80 ||
            sns_cmd->p.gan_data[9] != 0x02) {
-               ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207d,
+               ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
                    "GA_NXT failed, rejected request ga_nxt_rsp:\n");
                ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
                    sns_cmd->p.gan_data, 16);
index 54ea68cec4c58c3cd0ee80c48bfaeb3f96e637d4..1fa067e053d2877fad3a175099d1b9a38da06d4f 100644 (file)
@@ -111,6 +111,7 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
        memset(sp, 0, sizeof(*sp));
        sp->fcport = fcport;
        sp->ctx = ctx;
+       ctx->iocbs = 1;
        ctx->u.iocb_cmd = iocb;
        iocb->free = qla2x00_ctx_sp_free;
 
@@ -154,8 +155,8 @@ qla2x00_async_iocb_timeout(srb_t *sp)
        struct srb_ctx *ctx = sp->ctx;
 
        ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
-           "Async-%s timeout - portid=%02x%02x%02x.\n",
-           ctx->name, fcport->d_id.b.domain, fcport->d_id.b.area,
+           "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
+           ctx->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
            fcport->d_id.b.al_pa);
 
        fcport->flags &= ~FCF_ASYNC_SENT;
@@ -211,9 +212,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
                goto done_free_sp;
 
        ql_dbg(ql_dbg_disc, vha, 0x2072,
-           "Async-login - loopid=%x portid=%02x%02x%02x retries=%d.\n",
-           fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
-           fcport->d_id.b.al_pa, fcport->login_retry);
+           "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x "
+           "retries=%d.\n", sp->handle, fcport->loop_id,
+           fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+           fcport->login_retry);
        return rval;
 
 done_free_sp:
@@ -258,9 +260,9 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
                goto done_free_sp;
 
        ql_dbg(ql_dbg_disc, vha, 0x2070,
-           "Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
-           fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
-           fcport->d_id.b.al_pa);
+           "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+           sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+           fcport->d_id.b.area, fcport->d_id.b.al_pa);
        return rval;
 
 done_free_sp:
@@ -308,9 +310,9 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
                goto done_free_sp;
 
        ql_dbg(ql_dbg_disc, vha, 0x206f,
-           "Async-adisc - loopid=%x portid=%02x%02x%02x.\n",
-           fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
-           fcport->d_id.b.al_pa);
+           "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
+           sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+           fcport->d_id.b.area, fcport->d_id.b.al_pa);
        return rval;
 
 done_free_sp:
@@ -360,9 +362,9 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
                goto done_free_sp;
 
        ql_dbg(ql_dbg_taskm, vha, 0x802f,
-           "Async-tmf loop-id=%x portid=%02x%02x%02x.\n",
-           fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
-           fcport->d_id.b.al_pa);
+           "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+           sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+           fcport->d_id.b.area, fcport->d_id.b.al_pa);
        return rval;
 
 done_free_sp:
@@ -514,7 +516,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
        set_bit(0, ha->req_qid_map);
        set_bit(0, ha->rsp_qid_map);
 
-       ql_log(ql_log_info, vha, 0x0040,
+       ql_dbg(ql_dbg_init, vha, 0x0040,
            "Configuring PCI space...\n");
        rval = ha->isp_ops->pci_config(vha);
        if (rval) {
@@ -533,7 +535,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
        }
 
        ha->isp_ops->get_flash_version(vha, req->ring);
-       ql_log(ql_log_info, vha, 0x0061,
+       ql_dbg(ql_dbg_init, vha, 0x0061,
            "Configure NVRAM parameters...\n");
 
        ha->isp_ops->nvram_config(vha);
@@ -550,7 +552,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
                return QLA_FUNCTION_FAILED;
        }
 
-       ql_log(ql_log_info, vha, 0x0078,
+       ql_dbg(ql_dbg_init, vha, 0x0078,
            "Verifying loaded RISC code...\n");
 
        if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
@@ -1294,7 +1296,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
                        ha->flags.fce_enabled = 0;
                        goto try_eft;
                }
-               ql_log(ql_log_info, vha, 0x00c0,
+               ql_dbg(ql_dbg_init, vha, 0x00c0,
                    "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
 
                fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
@@ -1321,7 +1323,7 @@ try_eft:
                            tc_dma);
                        goto cont_alloc;
                }
-               ql_log(ql_log_info, vha, 0x00c3,
+               ql_dbg(ql_dbg_init, vha, 0x00c3,
                    "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
 
                eft_size = EFT_SIZE;
@@ -1358,7 +1360,7 @@ cont_alloc:
                }
                return;
        }
-       ql_log(ql_log_info, vha, 0x00c5,
+       ql_dbg(ql_dbg_init, vha, 0x00c5,
            "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
 
        ha->fw_dump_len = dump_size;
@@ -1929,7 +1931,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
                                        rval = qla84xx_init_chip(vha);
                                        if (rval != QLA_SUCCESS) {
                                                ql_log(ql_log_warn,
-                                                   vha, 0x8026,
+                                                   vha, 0x8007,
                                                    "Init chip failed.\n");
                                                break;
                                        }
@@ -1938,7 +1940,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
                                        cs84xx_time = jiffies - cs84xx_time;
                                        wtime += cs84xx_time;
                                        mtime += cs84xx_time;
-                                       ql_dbg(ql_dbg_taskm, vha, 0x8025,
+                                       ql_dbg(ql_dbg_taskm, vha, 0x8008,
                                            "Increasing wait time by %ld. "
                                            "New time %ld.\n", cs84xx_time,
                                            wtime);
@@ -1981,16 +1983,13 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
 
                /* Delay for a while */
                msleep(500);
-
-               ql_dbg(ql_dbg_taskm, vha, 0x8039,
-                   "fw_state=%x curr time=%lx.\n", state[0], jiffies);
        } while (1);
 
        ql_dbg(ql_dbg_taskm, vha, 0x803a,
            "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
            state[1], state[2], state[3], state[4], jiffies);
 
-       if (rval) {
+       if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
                ql_log(ql_log_warn, vha, 0x803b,
                    "Firmware ready **** FAILED ****.\n");
        }
@@ -2386,7 +2385,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
         * internal driver logging.
         */
        if (nv->host_p[0] & BIT_7)
-               ql2xextended_error_logging = 0x7fffffff;
+               ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
        ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
        /* Always load RISC code on non ISP2[12]00 chips. */
        if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
@@ -4188,7 +4187,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
                spin_unlock_irqrestore(&ha->vport_slock, flags);
 
        } else {
-               ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n");
+               ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
+                      __func__);
        }
 
        return(status);
@@ -4638,7 +4638,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
        struct req_que *req = ha->req_q_map[0];
 
        ql_dbg(ql_dbg_init, vha, 0x008b,
-           "Loading firmware from flash (%x).\n", faddr);
+           "FW: Loading firmware from flash (%x).\n", faddr);
 
        rval = QLA_SUCCESS;
 
@@ -4836,8 +4836,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
                return QLA_FUNCTION_FAILED;
        }
 
-       ql_log(ql_log_info, vha, 0x0092,
-           "Loading via request-firmware.\n");
+       ql_dbg(ql_dbg_init, vha, 0x0092,
+           "FW: Loading via request-firmware.\n");
 
        rval = QLA_SUCCESS;
 
@@ -5425,7 +5425,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
                if ((vha->device_flags & DFLG_NO_CABLE))
                        status = 0;
 
-               ql_log(ql_log_info, vha, 0x803d,
+               ql_log(ql_log_info, vha, 0x8000,
                    "Configure loop done, status = 0x%x.\n", status);
        }
 
@@ -5458,7 +5458,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
                            ha->fce_dma, ha->fce_bufs, ha->fce_mb,
                            &ha->fce_bufs);
                        if (rval) {
-                               ql_log(ql_log_warn, vha, 0x803e,
+                               ql_log(ql_log_warn, vha, 0x8001,
                                    "Unable to reinitialize FCE (%d).\n",
                                    rval);
                                ha->flags.fce_enabled = 0;
@@ -5470,7 +5470,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
                        rval = qla2x00_enable_eft_trace(vha,
                            ha->eft_dma, EFT_NUM_BUFFERS);
                        if (rval) {
-                               ql_log(ql_log_warn, vha, 0x803f,
+                               ql_log(ql_log_warn, vha, 0x8010,
                                    "Unable to reinitialize EFT (%d).\n",
                                    rval);
                        }
@@ -5478,7 +5478,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
        }
 
        if (!status) {
-               ql_dbg(ql_dbg_taskm, vha, 0x8040,
+               ql_dbg(ql_dbg_taskm, vha, 0x8011,
                    "qla82xx_restart_isp succeeded.\n");
 
                spin_lock_irqsave(&ha->vport_slock, flags);
@@ -5496,7 +5496,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
                spin_unlock_irqrestore(&ha->vport_slock, flags);
 
        } else {
-               ql_log(ql_log_warn, vha, 0x8041,
+               ql_log(ql_log_warn, vha, 0x8016,
                    "qla82xx_restart_isp **** FAILED ****.\n");
        }
 
@@ -5643,13 +5643,26 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
        if (priority < 0)
                return QLA_FUNCTION_FAILED;
 
+       if (IS_QLA82XX(vha->hw)) {
+               fcport->fcp_prio = priority & 0xf;
+               return QLA_SUCCESS;
+       }
+
        ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
-       if (ret == QLA_SUCCESS)
-               fcport->fcp_prio = priority;
-       else
+       if (ret == QLA_SUCCESS) {
+               if (fcport->fcp_prio != priority)
+                       ql_dbg(ql_dbg_user, vha, 0x709e,
+                           "Updated FCP_CMND priority - value=%d loop_id=%d "
+                           "port_id=%02x%02x%02x.\n", priority,
+                           fcport->loop_id, fcport->d_id.b.domain,
+                           fcport->d_id.b.area, fcport->d_id.b.al_pa);
+               fcport->fcp_prio = priority & 0xf;
+       } else
                ql_dbg(ql_dbg_user, vha, 0x704f,
-                   "Unable to activate fcp priority, ret=0x%x.\n", ret);
-
+                   "Unable to update FCP_CMND priority - ret=0x%x for "
+                   "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
+                   fcport->d_id.b.domain, fcport->d_id.b.area,
+                   fcport->d_id.b.al_pa);
        return  ret;
 }
 
index a4b267e60a352b7cb72f620bd099ef19e12ddd13..55a96761b5a4bc8210b781c8060d5b4803cee0a4 100644 (file)
@@ -11,8 +11,6 @@
 
 #include <scsi/scsi_tcq.h>
 
-static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
-
 static void qla25xx_set_que(srb_t *, struct rsp_que **);
 /**
  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
@@ -467,6 +465,42 @@ queuing_error:
        return (QLA_FUNCTION_FAILED);
 }
 
+/**
+ * qla2x00_start_iocbs() - Execute the IOCB command
+ */
+static void
+qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
+{
+       struct qla_hw_data *ha = vha->hw;
+       device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
+       struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+
+       if (IS_QLA82XX(ha)) {
+               qla82xx_start_iocbs(vha);
+       } else {
+               /* Adjust ring index. */
+               req->ring_index++;
+               if (req->ring_index == req->length) {
+                       req->ring_index = 0;
+                       req->ring_ptr = req->ring;
+               } else
+                       req->ring_ptr++;
+
+               /* Set chip new ring index. */
+               if (ha->mqenable) {
+                       WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
+                       RD_REG_DWORD(&ioreg->hccr);
+               } else if (IS_FWI2_CAPABLE(ha)) {
+                       WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
+                       RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+               } else {
+                       WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+                               req->ring_index);
+                       RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+               }
+       }
+}
+
 /**
  * qla2x00_marker() - Send a marker IOCB to the firmware.
  * @ha: HA context
@@ -489,6 +523,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
        mrk24 = NULL;
+       req = ha->req_q_map[0];
        mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
        if (mrk == NULL) {
                ql_log(ql_log_warn, base_vha, 0x3026,
@@ -515,7 +550,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
        }
        wmb();
 
-       qla2x00_isp_cmd(vha, req);
+       qla2x00_start_iocbs(vha, req);
 
        return (QLA_SUCCESS);
 }
@@ -536,89 +571,140 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
 }
 
 /**
- * qla2x00_isp_cmd() - Modify the request ring pointer.
- * @ha: HA context
+ * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
+ * Continuation Type 1 IOCBs to allocate.
+ *
+ * @dsds: number of data segment decriptors needed
  *
- * Note: The caller must hold the hardware lock before calling this routine.
+ * Returns the number of IOCB entries needed to store @dsds.
  */
-static void
-qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
+inline uint16_t
+qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
 {
-       struct qla_hw_data *ha = vha->hw;
-       device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
-       struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+       uint16_t iocbs;
 
-       ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
-           "IOCB data:\n");
-       ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
-           (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
+       iocbs = 1;
+       if (dsds > 1) {
+               iocbs += (dsds - 1) / 5;
+               if ((dsds - 1) % 5)
+                       iocbs++;
+       }
+       return iocbs;
+}
 
-       /* Adjust ring index. */
-       req->ring_index++;
-       if (req->ring_index == req->length) {
-               req->ring_index = 0;
-               req->ring_ptr = req->ring;
-       } else
-               req->ring_ptr++;
+static inline int
+qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
+       uint16_t tot_dsds)
+{
+       uint32_t *cur_dsd = NULL;
+       scsi_qla_host_t *vha;
+       struct qla_hw_data *ha;
+       struct scsi_cmnd *cmd;
+       struct  scatterlist *cur_seg;
+       uint32_t *dsd_seg;
+       void *next_dsd;
+       uint8_t avail_dsds;
+       uint8_t first_iocb = 1;
+       uint32_t dsd_list_len;
+       struct dsd_dma *dsd_ptr;
+       struct ct6_dsd *ctx;
 
-       /* Set chip new ring index. */
-       if (IS_QLA82XX(ha)) {
-               uint32_t dbval = 0x04 | (ha->portnum << 5);
+       cmd = sp->cmd;
 
-               /* write, read and verify logic */
-               dbval = dbval | (req->id << 8) | (req->ring_index << 16);
-               if (ql2xdbwr)
-                       qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
-               else {
-                       WRT_REG_DWORD(
-                               (unsigned long __iomem *)ha->nxdb_wr_ptr,
-                               dbval);
-                       wmb();
-                       while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
-                               WRT_REG_DWORD((unsigned long __iomem *)
-                                       ha->nxdb_wr_ptr, dbval);
-                               wmb();
-                       }
-               }
-       } else if (ha->mqenable) {
-               /* Set chip new ring index. */
-               WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
-               RD_REG_DWORD(&ioreg->hccr);
-       } else {
-               if (IS_FWI2_CAPABLE(ha)) {
-                       WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
-                       RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+       /* Update entry type to indicate Command Type 3 IOCB */
+       *((uint32_t *)(&cmd_pkt->entry_type)) =
+               __constant_cpu_to_le32(COMMAND_TYPE_6);
+
+       /* No data transfer */
+       if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+               cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+               return 0;
+       }
+
+       vha = sp->fcport->vha;
+       ha = vha->hw;
+
+       /* Set transfer direction */
+       if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+               cmd_pkt->control_flags =
+                   __constant_cpu_to_le16(CF_WRITE_DATA);
+               ha->qla_stats.output_bytes += scsi_bufflen(cmd);
+       } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+               cmd_pkt->control_flags =
+                   __constant_cpu_to_le16(CF_READ_DATA);
+               ha->qla_stats.input_bytes += scsi_bufflen(cmd);
+       }
+
+       cur_seg = scsi_sglist(cmd);
+       ctx = sp->ctx;
+
+       while (tot_dsds) {
+               avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
+                   QLA_DSDS_PER_IOCB : tot_dsds;
+               tot_dsds -= avail_dsds;
+               dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
+
+               dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
+                   struct dsd_dma, list);
+               next_dsd = dsd_ptr->dsd_addr;
+               list_del(&dsd_ptr->list);
+               ha->gbl_dsd_avail--;
+               list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
+               ctx->dsd_use_cnt++;
+               ha->gbl_dsd_inuse++;
+
+               if (first_iocb) {
+                       first_iocb = 0;
+                       dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+                       *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+                       *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+                       cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
                } else {
-                       WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
-                               req->ring_index);
-                       RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+                       *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+                       *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+                       *cur_dsd++ = cpu_to_le32(dsd_list_len);
+               }
+               cur_dsd = (uint32_t *)next_dsd;
+               while (avail_dsds) {
+                       dma_addr_t      sle_dma;
+
+                       sle_dma = sg_dma_address(cur_seg);
+                       *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+                       *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+                       *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
+                       cur_seg = sg_next(cur_seg);
+                       avail_dsds--;
                }
        }
 
+       /* Null termination */
+       *cur_dsd++ =  0;
+       *cur_dsd++ = 0;
+       *cur_dsd++ = 0;
+       cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
+       return 0;
 }
 
-/**
- * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
- * Continuation Type 1 IOCBs to allocate.
+/*
+ * qla24xx_calc_dsd_lists() - Determine number of DSD list required
+ * for Command Type 6.
  *
  * @dsds: number of data segment decriptors needed
  *
- * Returns the number of IOCB entries needed to store @dsds.
+ * Returns the number of dsd list needed to store @dsds.
  */
 inline uint16_t
-qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
+qla24xx_calc_dsd_lists(uint16_t dsds)
 {
-       uint16_t iocbs;
+       uint16_t dsd_lists = 0;
 
-       iocbs = 1;
-       if (dsds > 1) {
-               iocbs += (dsds - 1) / 5;
-               if ((dsds - 1) % 5)
-                       iocbs++;
-       }
-       return iocbs;
+       dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
+       if (dsds % QLA_DSDS_PER_IOCB)
+               dsd_lists++;
+       return dsd_lists;
 }
 
+
 /**
  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
  * IOCB types.
@@ -945,6 +1031,7 @@ alloc_and_fill:
        *cur_dsd++ = 0;
        return 0;
 }
+
 static int
 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
        uint16_t tot_dsds)
@@ -1004,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
                sle_dma = sg_dma_address(sg);
                ql_dbg(ql_dbg_io, vha, 0x300a,
                    "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
-                   cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
+                   i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
                    sp->cmd);
                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
@@ -1731,6 +1818,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
        uint32_t index, handle;
        request_t *pkt;
        uint16_t cnt, req_cnt;
+       struct srb_ctx *ctx;
 
        pkt = NULL;
        req_cnt = 1;
@@ -1759,6 +1847,12 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
        req->outstanding_cmds[handle] = sp;
        sp->handle = handle;
 
+       /* Adjust entry-counts as needed. */
+       if (sp->ctx) {
+               ctx = sp->ctx;
+               req_cnt = ctx->iocbs;
+       }
+
 skip_cmd_array:
        /* Check for room on request queue. */
        if (req->cnt < req_cnt) {
@@ -1792,42 +1886,6 @@ queuing_error:
        return pkt;
 }
 
-static void
-qla2x00_start_iocbs(srb_t *sp)
-{
-       struct qla_hw_data *ha = sp->fcport->vha->hw;
-       struct req_que *req = ha->req_q_map[0];
-       device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
-       struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
-
-       if (IS_QLA82XX(ha)) {
-               qla82xx_start_iocbs(sp);
-       } else {
-               /* Adjust ring index. */
-               req->ring_index++;
-               if (req->ring_index == req->length) {
-                       req->ring_index = 0;
-                       req->ring_ptr = req->ring;
-               } else
-                       req->ring_ptr++;
-
-               /* Set chip new ring index. */
-               if (ha->mqenable) {
-                       WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
-                       RD_REG_DWORD(&ioreg->hccr);
-               } else if (IS_QLA82XX(ha)) {
-                       qla82xx_start_iocbs(sp);
-               } else if (IS_FWI2_CAPABLE(ha)) {
-                       WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
-                       RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
-               } else {
-                       WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
-                               req->ring_index);
-                       RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
-               }
-       }
-}
-
 static void
 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
 {
@@ -2160,6 +2218,381 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
         ct_iocb->entry_count = entry_count;
 }
 
+/*
+ * qla82xx_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla82xx_start_scsi(srb_t *sp)
+{
+       int             ret, nseg;
+       unsigned long   flags;
+       struct scsi_cmnd *cmd;
+       uint32_t        *clr_ptr;
+       uint32_t        index;
+       uint32_t        handle;
+       uint16_t        cnt;
+       uint16_t        req_cnt;
+       uint16_t        tot_dsds;
+       struct device_reg_82xx __iomem *reg;
+       uint32_t dbval;
+       uint32_t *fcp_dl;
+       uint8_t additional_cdb_len;
+       struct ct6_dsd *ctx;
+       struct scsi_qla_host *vha = sp->fcport->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct req_que *req = NULL;
+       struct rsp_que *rsp = NULL;
+       char            tag[2];
+
+       /* Setup device pointers. */
+       ret = 0;
+       reg = &ha->iobase->isp82;
+       cmd = sp->cmd;
+       req = vha->req;
+       rsp = ha->rsp_q_map[0];
+
+       /* So we know we haven't pci_map'ed anything yet */
+       tot_dsds = 0;
+
+       dbval = 0x04 | (ha->portnum << 5);
+
+       /* Send marker if required */
+       if (vha->marker_needed != 0) {
+               if (qla2x00_marker(vha, req,
+                       rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+                       ql_log(ql_log_warn, vha, 0x300c,
+                           "qla2x00_marker failed for cmd=%p.\n", cmd);
+                       return QLA_FUNCTION_FAILED;
+               }
+               vha->marker_needed = 0;
+       }
+
+       /* Acquire ring specific lock */
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       /* Check for room in outstanding command list. */
+       handle = req->current_outstanding_cmd;
+       for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+               handle++;
+               if (handle == MAX_OUTSTANDING_COMMANDS)
+                       handle = 1;
+               if (!req->outstanding_cmds[handle])
+                       break;
+       }
+       if (index == MAX_OUTSTANDING_COMMANDS)
+               goto queuing_error;
+
+       /* Map the sg table so we have an accurate count of sg entries needed */
+       if (scsi_sg_count(cmd)) {
+               nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+                   scsi_sg_count(cmd), cmd->sc_data_direction);
+               if (unlikely(!nseg))
+                       goto queuing_error;
+       } else
+               nseg = 0;
+
+       tot_dsds = nseg;
+
+       if (tot_dsds > ql2xshiftctondsd) {
+               struct cmd_type_6 *cmd_pkt;
+               uint16_t more_dsd_lists = 0;
+               struct dsd_dma *dsd_ptr;
+               uint16_t i;
+
+               more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
+               if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
+                       ql_dbg(ql_dbg_io, vha, 0x300d,
+                           "Num of DSD list %d is than %d for cmd=%p.\n",
+                           more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
+                           cmd);
+                       goto queuing_error;
+               }
+
+               if (more_dsd_lists <= ha->gbl_dsd_avail)
+                       goto sufficient_dsds;
+               else
+                       more_dsd_lists -= ha->gbl_dsd_avail;
+
+               for (i = 0; i < more_dsd_lists; i++) {
+                       dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+                       if (!dsd_ptr) {
+                               ql_log(ql_log_fatal, vha, 0x300e,
+                                   "Failed to allocate memory for dsd_dma "
+                                   "for cmd=%p.\n", cmd);
+                               goto queuing_error;
+                       }
+
+                       dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
+                               GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
+                       if (!dsd_ptr->dsd_addr) {
+                               kfree(dsd_ptr);
+                               ql_log(ql_log_fatal, vha, 0x300f,
+                                   "Failed to allocate memory for dsd_addr "
+                                   "for cmd=%p.\n", cmd);
+                               goto queuing_error;
+                       }
+                       list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
+                       ha->gbl_dsd_avail++;
+               }
+
+sufficient_dsds:
+               req_cnt = 1;
+
+               if (req->cnt < (req_cnt + 2)) {
+                       cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+                               &reg->req_q_out[0]);
+                       if (req->ring_index < cnt)
+                               req->cnt = cnt - req->ring_index;
+                       else
+                               req->cnt = req->length -
+                                       (req->ring_index - cnt);
+               }
+
+               if (req->cnt < (req_cnt + 2))
+                       goto queuing_error;
+
+               ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
+               if (!sp->ctx) {
+                       ql_log(ql_log_fatal, vha, 0x3010,
+                           "Failed to allocate ctx for cmd=%p.\n", cmd);
+                       goto queuing_error;
+               }
+               memset(ctx, 0, sizeof(struct ct6_dsd));
+               ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
+                       GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+               if (!ctx->fcp_cmnd) {
+                       ql_log(ql_log_fatal, vha, 0x3011,
+                           "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
+                       goto queuing_error_fcp_cmnd;
+               }
+
+               /* Initialize the DSD list and dma handle */
+               INIT_LIST_HEAD(&ctx->dsd_list);
+               ctx->dsd_use_cnt = 0;
+
+               if (cmd->cmd_len > 16) {
+                       additional_cdb_len = cmd->cmd_len - 16;
+                       if ((cmd->cmd_len % 4) != 0) {
+                               /* SCSI command bigger than 16 bytes must be
+                                * multiple of 4
+                                */
+                               ql_log(ql_log_warn, vha, 0x3012,
+                                   "scsi cmd len %d not multiple of 4 "
+                                   "for cmd=%p.\n", cmd->cmd_len, cmd);
+                               goto queuing_error_fcp_cmnd;
+                       }
+                       ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+               } else {
+                       additional_cdb_len = 0;
+                       ctx->fcp_cmnd_len = 12 + 16 + 4;
+               }
+
+               cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+               cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+               /* Zero out remaining portion of packet. */
+               /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
+               clr_ptr = (uint32_t *)cmd_pkt + 2;
+               memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+               cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+               /* Set NPORT-ID and LUN number*/
+               cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+               cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+               cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+               cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+               cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+               /* Build IOCB segments */
+               if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
+                       goto queuing_error_fcp_cmnd;
+
+               int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+               host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+               /* build FCP_CMND IU */
+               memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+               int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
+               ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+               if (cmd->sc_data_direction == DMA_TO_DEVICE)
+                       ctx->fcp_cmnd->additional_cdb_len |= 1;
+               else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+                       ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+               /*
+                * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+                */
+               if (scsi_populate_tag_msg(cmd, tag)) {
+                       switch (tag[0]) {
+                       case HEAD_OF_QUEUE_TAG:
+                               ctx->fcp_cmnd->task_attribute =
+                                   TSK_HEAD_OF_QUEUE;
+                               break;
+                       case ORDERED_QUEUE_TAG:
+                               ctx->fcp_cmnd->task_attribute =
+                                   TSK_ORDERED;
+                               break;
+                       }
+               }
+
+               /* Populate the FCP_PRIO. */
+               if (ha->flags.fcp_prio_enabled)
+                       ctx->fcp_cmnd->task_attribute |=
+                           sp->fcport->fcp_prio << 3;
+
+               memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+               fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
+                   additional_cdb_len);
+               *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+               cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+               cmd_pkt->fcp_cmnd_dseg_address[0] =
+                   cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
+               cmd_pkt->fcp_cmnd_dseg_address[1] =
+                   cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
+
+               sp->flags |= SRB_FCP_CMND_DMA_VALID;
+               cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+               /* Set total data segment count. */
+               cmd_pkt->entry_count = (uint8_t)req_cnt;
+               /* Specify response queue number where
+                * completion should happen
+                */
+               cmd_pkt->entry_status = (uint8_t) rsp->id;
+       } else {
+               struct cmd_type_7 *cmd_pkt;
+               req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+               if (req->cnt < (req_cnt + 2)) {
+                       cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+                           &reg->req_q_out[0]);
+                       if (req->ring_index < cnt)
+                               req->cnt = cnt - req->ring_index;
+                       else
+                               req->cnt = req->length -
+                                       (req->ring_index - cnt);
+               }
+               if (req->cnt < (req_cnt + 2))
+                       goto queuing_error;
+
+               cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+               cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+               /* Zero out remaining portion of packet. */
+               /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
+               clr_ptr = (uint32_t *)cmd_pkt + 2;
+               memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+               cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+               /* Set NPORT-ID and LUN number*/
+               cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+               cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+               cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+               cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+               cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+               int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+               host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
+                       sizeof(cmd_pkt->lun));
+
+               /*
+                * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+                */
+               if (scsi_populate_tag_msg(cmd, tag)) {
+                       switch (tag[0]) {
+                       case HEAD_OF_QUEUE_TAG:
+                               cmd_pkt->task = TSK_HEAD_OF_QUEUE;
+                               break;
+                       case ORDERED_QUEUE_TAG:
+                               cmd_pkt->task = TSK_ORDERED;
+                               break;
+                       }
+               }
+
+               /* Populate the FCP_PRIO. */
+               if (ha->flags.fcp_prio_enabled)
+                       cmd_pkt->task |= sp->fcport->fcp_prio << 3;
+
+               /* Load SCSI command packet. */
+               memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+               host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+               cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+               /* Build IOCB segments */
+               qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+
+               /* Set total data segment count. */
+               cmd_pkt->entry_count = (uint8_t)req_cnt;
+               /* Specify response queue number where
+                * completion should happen.
+                */
+               cmd_pkt->entry_status = (uint8_t) rsp->id;
+
+       }
+       /* Build command packet. */
+       req->current_outstanding_cmd = handle;
+       req->outstanding_cmds[handle] = sp;
+       sp->handle = handle;
+       sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+       req->cnt -= req_cnt;
+       wmb();
+
+       /* Adjust ring index. */
+       req->ring_index++;
+       if (req->ring_index == req->length) {
+               req->ring_index = 0;
+               req->ring_ptr = req->ring;
+       } else
+               req->ring_ptr++;
+
+       sp->flags |= SRB_DMA_VALID;
+
+       /* Set chip new ring index. */
+       /* write, read and verify logic */
+       dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+       if (ql2xdbwr)
+               qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
+       else {
+               WRT_REG_DWORD(
+                       (unsigned long __iomem *)ha->nxdb_wr_ptr,
+                       dbval);
+               wmb();
+               while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+                       WRT_REG_DWORD(
+                               (unsigned long __iomem *)ha->nxdb_wr_ptr,
+                               dbval);
+                       wmb();
+               }
+       }
+
+       /* Manage unprocessed RIO/ZIO commands in response queue. */
+       if (vha->flags.process_response_queue &&
+           rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+               qla24xx_process_response_queue(vha, rsp);
+
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+       dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+queuing_error:
+       if (tot_dsds)
+               scsi_dma_unmap(cmd);
+
+       if (sp->ctx) {
+               mempool_free(sp->ctx, ha->ctx_mempool);
+               sp->ctx = NULL;
+       }
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return QLA_FUNCTION_FAILED;
+}
+
 int
 qla2x00_start_sp(srb_t *sp)
 {
@@ -2196,8 +2629,8 @@ qla2x00_start_sp(srb_t *sp)
                break;
        case SRB_CT_CMD:
                IS_FWI2_CAPABLE(ha) ?
-               qla24xx_ct_iocb(sp, pkt) :
-               qla2x00_ct_iocb(sp, pkt);
+                   qla24xx_ct_iocb(sp, pkt) :
+                   qla2x00_ct_iocb(sp, pkt);
                break;
        case SRB_ADISC_CMD:
                IS_FWI2_CAPABLE(ha) ?
@@ -2212,7 +2645,7 @@ qla2x00_start_sp(srb_t *sp)
        }
 
        wmb();
-       qla2x00_start_iocbs(sp);
+       qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
 done:
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
        return rval;
index 7b91b290ffd6bd23c1b137a7a3c73074f3f1580a..e804585cc59cf60dc7affc4e45acd47dbecdafff 100644 (file)
@@ -242,32 +242,34 @@ static void
 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
 {
        uint16_t        cnt;
+       uint32_t        mboxes;
        uint16_t __iomem *wptr;
        struct qla_hw_data *ha = vha->hw;
        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 
+       /* Read all mbox registers? */
+       mboxes = (1 << ha->mbx_count) - 1;
+       if (!ha->mcp)
+               ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n");
+       else
+               mboxes = ha->mcp->in_mb;
+
        /* Load return mailbox registers. */
        ha->flags.mbox_int = 1;
        ha->mailbox_out[0] = mb0;
+       mboxes >>= 1;
        wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
 
        for (cnt = 1; cnt < ha->mbx_count; cnt++) {
                if (IS_QLA2200(ha) && cnt == 8)
                        wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
-               if (cnt == 4 || cnt == 5)
+               if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
                        ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
-               else
+               else if (mboxes & BIT_0)
                        ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
 
                wptr++;
-       }
-
-       if (ha->mcp) {
-               ql_dbg(ql_dbg_async, vha, 0x5000,
-                   "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]);
-       } else {
-               ql_dbg(ql_dbg_async, vha, 0x5001,
-                   "MBX pointer ERROR.\n");
+               mboxes >>= 1;
        }
 }
 
@@ -298,7 +300,7 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
                return;
 
        ql_dbg(ql_dbg_async, vha, 0x5022,
-           "Inter-Driver Commucation %s -- ACK timeout=%d.\n",
+           "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
            vha->host_no, event[aen & 0xff], timeout);
 
        rval = qla2x00_post_idc_ack_work(vha, mb);
@@ -453,7 +455,7 @@ skip_rio:
                break;
 
        case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
-               ql_log(ql_log_info, vha, 0x5009,
+               ql_dbg(ql_dbg_async, vha, 0x5009,
                    "LIP occurred (%x).\n", mb[1]);
 
                if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -487,7 +489,7 @@ skip_rio:
                        ha->link_data_rate = mb[1];
                }
 
-               ql_log(ql_log_info, vha, 0x500a,
+               ql_dbg(ql_dbg_async, vha, 0x500a,
                    "LOOP UP detected (%s Gbps).\n", link_speed);
 
                vha->flags.management_server_logged_in = 0;
@@ -497,7 +499,7 @@ skip_rio:
        case MBA_LOOP_DOWN:             /* Loop Down Event */
                mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
                mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
-               ql_log(ql_log_info, vha, 0x500b,
+               ql_dbg(ql_dbg_async, vha, 0x500b,
                    "LOOP DOWN detected (%x %x %x %x).\n",
                    mb[1], mb[2], mb[3], mbx);
 
@@ -519,7 +521,7 @@ skip_rio:
                break;
 
        case MBA_LIP_RESET:             /* LIP reset occurred */
-               ql_log(ql_log_info, vha, 0x500c,
+               ql_dbg(ql_dbg_async, vha, 0x500c,
                    "LIP reset occurred (%x).\n", mb[1]);
 
                if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -587,7 +589,7 @@ skip_rio:
                if (IS_QLA2100(ha))
                        break;
 
-               ql_log(ql_log_info, vha, 0x500f,
+               ql_dbg(ql_dbg_async, vha, 0x500f,
                    "Configuration change detected: value=%x.\n", mb[1]);
 
                if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -920,15 +922,15 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
            QLA_LOGIO_LOGIN_RETRIED : 0;
        if (mbx->entry_status) {
                ql_dbg(ql_dbg_async, vha, 0x5043,
-                   "Async-%s error entry - portid=%02x%02x%02x "
+                   "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
                    "entry-status=%x status=%x state-flag=%x "
-                   "status-flags=%x.\n",
-                   type, fcport->d_id.b.domain, fcport->d_id.b.area,
+                   "status-flags=%x.\n", type, sp->handle,
+                   fcport->d_id.b.domain, fcport->d_id.b.area,
                    fcport->d_id.b.al_pa, mbx->entry_status,
                    le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
                    le16_to_cpu(mbx->status_flags));
 
-               ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057,
+               ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
                    (uint8_t *)mbx, sizeof(*mbx));
 
                goto logio_done;
@@ -940,9 +942,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
                status = 0;
        if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
                ql_dbg(ql_dbg_async, vha, 0x5045,
-                   "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n",
-                   type, fcport->d_id.b.domain, fcport->d_id.b.area,
-                   fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1));
+                   "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
+                   type, sp->handle, fcport->d_id.b.domain,
+                   fcport->d_id.b.area, fcport->d_id.b.al_pa,
+                   le16_to_cpu(mbx->mb1));
 
                data[0] = MBS_COMMAND_COMPLETE;
                if (ctx->type == SRB_LOGIN_CMD) {
@@ -968,11 +971,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
        }
 
        ql_log(ql_log_warn, vha, 0x5046,
-           "Async-%s failed - portid=%02x%02x%02x status=%x "
-           "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n",
-           type, fcport->d_id.b.domain,
-           fcport->d_id.b.area, fcport->d_id.b.al_pa, status,
-           le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
+           "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
+           "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
+           fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+           status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
            le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
            le16_to_cpu(mbx->mb7));
 
@@ -1036,7 +1038,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
                        bsg_job->reply->result = DID_ERROR << 16;
                        bsg_job->reply->reply_payload_rcv_len = 0;
                }
-               ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058,
+               ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
                    (uint8_t *)pkt, sizeof(*pkt));
        } else {
                bsg_job->reply->result =  DID_OK << 16;
@@ -1111,9 +1113,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
                                le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
 
                        ql_log(ql_log_info, vha, 0x503f,
-                           "ELS-CT pass-through-%s error comp_status-status=0x%x "
+                           "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
                            "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
-                           type, comp_status, fw_status[1], fw_status[2],
+                           type, sp->handle, comp_status, fw_status[1], fw_status[2],
                            le16_to_cpu(((struct els_sts_entry_24xx *)
                                pkt)->total_byte_count));
                        fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
@@ -1121,9 +1123,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
                }
                else {
                        ql_log(ql_log_info, vha, 0x5040,
-                           "ELS-CT pass-through-%s error comp_status-status=0x%x "
+                           "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
                            "error subcode 1=0x%x error subcode 2=0x%x.\n",
-                           type, comp_status,
+                           type, sp->handle, comp_status,
                            le16_to_cpu(((struct els_sts_entry_24xx *)
                                pkt)->error_subcode_1),
                            le16_to_cpu(((struct els_sts_entry_24xx *)
@@ -1184,11 +1186,12 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
                QLA_LOGIO_LOGIN_RETRIED : 0;
        if (logio->entry_status) {
                ql_log(ql_log_warn, vha, 0x5034,
-                   "Async-%s error entry - "
+                   "Async-%s error entry - hdl=%x"
                    "portid=%02x%02x%02x entry-status=%x.\n",
-                   type, fcport->d_id.b.domain, fcport->d_id.b.area,
-                   fcport->d_id.b.al_pa, logio->entry_status);
-               ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059,
+                   type, sp->handle, fcport->d_id.b.domain,
+                   fcport->d_id.b.area, fcport->d_id.b.al_pa,
+                   logio->entry_status);
+               ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
                    (uint8_t *)logio, sizeof(*logio));
 
                goto logio_done;
@@ -1196,10 +1199,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
 
        if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
                ql_dbg(ql_dbg_async, vha, 0x5036,
-                   "Async-%s complete - portid=%02x%02x%02x "
-                   "iop0=%x.\n",
-                   type, fcport->d_id.b.domain, fcport->d_id.b.area,
-                   fcport->d_id.b.al_pa,
+                   "Async-%s complete - hdl=%x portid=%02x%02x%02x "
+                   "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+                   fcport->d_id.b.area, fcport->d_id.b.al_pa,
                    le32_to_cpu(logio->io_parameter[0]));
 
                data[0] = MBS_COMMAND_COMPLETE;
@@ -1238,9 +1240,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
        }
 
        ql_dbg(ql_dbg_async, vha, 0x5037,
-           "Async-%s failed - portid=%02x%02x%02x comp=%x "
-           "iop0=%x iop1=%x.\n",
-           type, fcport->d_id.b.domain,
+           "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
+           "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
            fcport->d_id.b.area, fcport->d_id.b.al_pa,
            le16_to_cpu(logio->comp_status),
            le32_to_cpu(logio->io_parameter[0]),
@@ -1274,25 +1275,25 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 
        if (sts->entry_status) {
                ql_log(ql_log_warn, vha, 0x5038,
-                   "Async-%s error - entry-status(%x).\n",
-                   type, sts->entry_status);
+                   "Async-%s error - hdl=%x entry-status(%x).\n",
+                   type, sp->handle, sts->entry_status);
        } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
                ql_log(ql_log_warn, vha, 0x5039,
-                   "Async-%s error - completion status(%x).\n",
-                   type, sts->comp_status);
+                   "Async-%s error - hdl=%x completion status(%x).\n",
+                   type, sp->handle, sts->comp_status);
        } else if (!(le16_to_cpu(sts->scsi_status) &
            SS_RESPONSE_INFO_LEN_VALID)) {
                ql_log(ql_log_warn, vha, 0x503a,
-                   "Async-%s error - no response info(%x).\n",
-                   type, sts->scsi_status);
+                   "Async-%s error - hdl=%x no response info(%x).\n",
+                   type, sp->handle, sts->scsi_status);
        } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
                ql_log(ql_log_warn, vha, 0x503b,
-                   "Async-%s error - not enough response(%d).\n",
-                   type, sts->rsp_data_len);
+                   "Async-%s error - hdl=%x not enough response(%d).\n",
+                   type, sp->handle, sts->rsp_data_len);
        } else if (sts->data[3]) {
                ql_log(ql_log_warn, vha, 0x503c,
-                   "Async-%s error - response(%x).\n",
-                   type, sts->data[3]);
+                   "Async-%s error - hdl=%x response(%x).\n",
+                   type, sp->handle, sts->data[3]);
        } else {
                error = 0;
        }
@@ -1337,9 +1338,6 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
                }
 
                if (pkt->entry_status != 0) {
-                       ql_log(ql_log_warn, vha, 0x5035,
-                           "Process error entry.\n");
-
                        qla2x00_error_entry(vha, rsp, pkt);
                        ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
                        wmb();
@@ -1391,7 +1389,6 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
 }
 
 static inline void
-
 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
     uint32_t sense_len, struct rsp_que *rsp)
 {
@@ -1413,13 +1410,14 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
        if (sp->request_sense_length != 0)
                rsp->status_srb = sp;
 
-       ql_dbg(ql_dbg_io, vha, 0x301c,
-           "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n",
-           sp->fcport->vha->host_no, cp->device->channel, cp->device->id,
-           cp->device->lun, cp);
-       if (sense_len)
+       if (sense_len) {
+               ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
+                   "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
+                   sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
+                   cp);
                ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
                    cp->sense_buffer, sense_len);
+       }
 }
 
 struct scsi_dif_tuple {
@@ -1506,7 +1504,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
                        }
 
                        if (k != blocks_done) {
-                               qla_printk(KERN_WARNING, sp->fcport->vha->hw,
+                               ql_log(ql_log_warn, vha, 0x302f,
                                    "unexpected tag values tag:lba=%x:%llx)\n",
                                    e_ref_tag, (unsigned long long)lba_s);
                                return 1;
@@ -1611,7 +1609,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                sp = NULL;
 
        if (sp == NULL) {
-               ql_log(ql_log_warn, vha, 0x3017,
+               ql_dbg(ql_dbg_io, vha, 0x3017,
                    "Invalid status handle (0x%x).\n", sts->handle);
 
                if (IS_QLA82XX(ha))
@@ -1623,7 +1621,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
        }
        cp = sp->cmd;
        if (cp == NULL) {
-               ql_log(ql_log_warn, vha, 0x3018,
+               ql_dbg(ql_dbg_io, vha, 0x3018,
                    "Command already returned (0x%x/%p).\n",
                    sts->handle, sp);
 
@@ -1670,7 +1668,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                        par_sense_len -= rsp_info_len;
                }
                if (rsp_info_len > 3 && rsp_info[3]) {
-                       ql_log(ql_log_warn, vha, 0x3019,
+                       ql_dbg(ql_dbg_io, vha, 0x3019,
                            "FCP I/O protocol failure (0x%x/0x%x).\n",
                            rsp_info_len, rsp_info[3]);
 
@@ -1701,7 +1699,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                        if (!lscsi_status &&
                            ((unsigned)(scsi_bufflen(cp) - resid) <
                             cp->underflow)) {
-                               ql_log(ql_log_warn, vha, 0x301a,
+                               ql_dbg(ql_dbg_io, vha, 0x301a,
                                    "Mid-layer underflow "
                                    "detected (0x%x of 0x%x bytes).\n",
                                    resid, scsi_bufflen(cp));
@@ -1713,7 +1711,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                cp->result = DID_OK << 16 | lscsi_status;
 
                if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
-                       ql_log(ql_log_warn, vha, 0x301b,
+                       ql_dbg(ql_dbg_io, vha, 0x301b,
                            "QUEUE FULL detected.\n");
                        break;
                }
@@ -1735,7 +1733,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                scsi_set_resid(cp, resid);
                if (scsi_status & SS_RESIDUAL_UNDER) {
                        if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
-                               ql_log(ql_log_warn, vha, 0x301d,
+                               ql_dbg(ql_dbg_io, vha, 0x301d,
                                    "Dropped frame(s) detected "
                                    "(0x%x of 0x%x bytes).\n",
                                    resid, scsi_bufflen(cp));
@@ -1747,7 +1745,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                        if (!lscsi_status &&
                            ((unsigned)(scsi_bufflen(cp) - resid) <
                            cp->underflow)) {
-                               ql_log(ql_log_warn, vha, 0x301e,
+                               ql_dbg(ql_dbg_io, vha, 0x301e,
                                    "Mid-layer underflow "
                                    "detected (0x%x of 0x%x bytes).\n",
                                    resid, scsi_bufflen(cp));
@@ -1756,7 +1754,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                                break;
                        }
                } else {
-                       ql_log(ql_log_warn, vha, 0x301f,
+                       ql_dbg(ql_dbg_io, vha, 0x301f,
                            "Dropped frame(s) detected (0x%x "
                            "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
 
@@ -1774,7 +1772,7 @@ check_scsi_status:
                 */
                if (lscsi_status != 0) {
                        if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
-                               ql_log(ql_log_warn, vha, 0x3020,
+                               ql_dbg(ql_dbg_io, vha, 0x3020,
                                    "QUEUE FULL detected.\n");
                                logit = 1;
                                break;
@@ -1838,10 +1836,15 @@ out:
        if (logit)
                ql_dbg(ql_dbg_io, vha, 0x3022,
                    "FCP command status: 0x%x-0x%x (0x%x) "
-                   "oxid=0x%x cdb=%02x%02x%02x len=0x%x "
+                   "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
+                   "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
                    "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
-                   comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0],
-                   cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
+                   comp_status, scsi_status, cp->result, vha->host_no,
+                   cp->device->id, cp->device->lun, fcport->d_id.b.domain,
+                   fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
+                   cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
+                   cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
+                   cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
                    resid_len, fw_resid_len);
 
        if (rsp->status_srb == NULL)
@@ -1899,6 +1902,45 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
        }
 }
 
+static int
+qla2x00_free_sp_ctx(scsi_qla_host_t *vha, srb_t *sp)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct srb_ctx *ctx;
+
+       if (!sp->ctx)
+               return 1;
+
+       ctx = sp->ctx;
+
+       if (ctx->type == SRB_LOGIN_CMD ||
+           ctx->type == SRB_LOGOUT_CMD ||
+           ctx->type == SRB_TM_CMD) {
+               ctx->u.iocb_cmd->done(sp);
+               return 0;
+       } else if (ctx->type == SRB_ADISC_CMD) {
+               ctx->u.iocb_cmd->free(sp);
+               return 0;
+       } else {
+               struct fc_bsg_job *bsg_job;
+
+               bsg_job = ctx->u.bsg_job;
+               if (ctx->type == SRB_ELS_CMD_HST ||
+                   ctx->type == SRB_CT_CMD)
+                       kfree(sp->fcport);
+
+               bsg_job->reply->reply_data.ctels_reply.status =
+                   FC_CTELS_STATUS_OK;
+               bsg_job->reply->result = DID_ERROR << 16;
+               bsg_job->reply->reply_payload_rcv_len = 0;
+               kfree(sp->ctx);
+               mempool_free(sp, ha->srb_mempool);
+               bsg_job->job_done(bsg_job);
+               return 0;
+       }
+       return 1;
+}
+
 /**
  * qla2x00_error_entry() - Process an error entry.
  * @ha: SCSI driver HA context
@@ -1909,7 +1951,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
 {
        srb_t *sp;
        struct qla_hw_data *ha = vha->hw;
-       uint32_t handle = LSW(pkt->handle);
+       const char func[] = "ERROR-IOCB";
        uint16_t que = MSW(pkt->handle);
        struct req_que *req = ha->req_q_map[que];
 
@@ -1932,28 +1974,20 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
                ql_dbg(ql_dbg_async, vha, 0x502f,
                    "UNKNOWN flag error.\n");
 
-       /* Validate handle. */
-       if (handle < MAX_OUTSTANDING_COMMANDS)
-               sp = req->outstanding_cmds[handle];
-       else
-               sp = NULL;
-
+       sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
        if (sp) {
-               /* Free outstanding command slot. */
-               req->outstanding_cmds[handle] = NULL;
-
-               /* Bad payload or header */
-               if (pkt->entry_status &
-                   (RF_INV_E_ORDER | RF_INV_E_COUNT |
-                    RF_INV_E_PARAM | RF_INV_E_TYPE)) {
-                       sp->cmd->result = DID_ERROR << 16;
-               } else if (pkt->entry_status & RF_BUSY) {
-                       sp->cmd->result = DID_BUS_BUSY << 16;
-               } else {
-                       sp->cmd->result = DID_ERROR << 16;
+               if (qla2x00_free_sp_ctx(vha, sp)) {
+                       if (pkt->entry_status &
+                           (RF_INV_E_ORDER | RF_INV_E_COUNT |
+                            RF_INV_E_PARAM | RF_INV_E_TYPE)) {
+                               sp->cmd->result = DID_ERROR << 16;
+                       } else if (pkt->entry_status & RF_BUSY) {
+                               sp->cmd->result = DID_BUS_BUSY << 16;
+                       } else {
+                               sp->cmd->result = DID_ERROR << 16;
+                       }
+                       qla2x00_sp_compl(ha, sp);
                }
-               qla2x00_sp_compl(ha, sp);
-
        } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
                COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
                || pkt->entry_type == COMMAND_TYPE_6) {
@@ -1977,26 +2011,30 @@ static void
 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
 {
        uint16_t        cnt;
+       uint32_t        mboxes;
        uint16_t __iomem *wptr;
        struct qla_hw_data *ha = vha->hw;
        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
+       /* Read all mbox registers? */
+       mboxes = (1 << ha->mbx_count) - 1;
+       if (!ha->mcp)
+               ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n");
+       else
+               mboxes = ha->mcp->in_mb;
+
        /* Load return mailbox registers. */
        ha->flags.mbox_int = 1;
        ha->mailbox_out[0] = mb0;
+       mboxes >>= 1;
        wptr = (uint16_t __iomem *)&reg->mailbox1;
 
        for (cnt = 1; cnt < ha->mbx_count; cnt++) {
-               ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
-               wptr++;
-       }
+               if (mboxes & BIT_0)
+                       ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
 
-       if (ha->mcp) {
-               ql_dbg(ql_dbg_async, vha, 0x504d,
-                   "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
-       } else {
-               ql_dbg(ql_dbg_async, vha, 0x504e,
-                   "MBX pointer ERROR.\n");
+               mboxes >>= 1;
+               wptr++;
        }
 }
 
@@ -2025,9 +2063,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
                }
 
                if (pkt->entry_status != 0) {
-                       ql_dbg(ql_dbg_async, vha, 0x5029,
-                           "Process error entry.\n");
-
                        qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
                        ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
                        wmb();
index 82a33533ed26c25dc87e90cfe1a35f95c33ccb44..34344d3f865832ce963f1091105c4efc753074bf 100644 (file)
@@ -2887,7 +2887,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                if (vp_idx == 0 && (MSB(stat) != 1))
                        goto reg_needed;
 
-               if (MSB(stat) == 1) {
+               if (MSB(stat) != 0) {
                        ql_dbg(ql_dbg_mbx, vha, 0x10ba,
                            "Could not acquire ID for VP[%d].\n", vp_idx);
                        return;
index 1873940a7ccb876bfe93680f991bc77c8b025b33..1cd46cd7ff901de408050541ce8bf8aaeef24853 100644 (file)
@@ -369,7 +369,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
                ql_dbg(ql_dbg_p3p, vha, 0xb000,
                    "%s: Written crbwin (0x%x) "
                    "!= Read crbwin (0x%x), off=0x%lx.\n",
-                   ha->crb_win, win_read, *off);
+                   __func__, ha->crb_win, win_read, *off);
        }
        *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
 }
@@ -409,7 +409,7 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
        }
        /* strange address given */
        ql_dbg(ql_dbg_p3p, vha, 0xb001,
-           "%x: Warning: unm_nic_pci_set_crbwindow "
+           "%s: Warning: unm_nic_pci_set_crbwindow "
            "called with an unknown address(%llx).\n",
            QLA2XXX_DRIVER_NAME, off);
        return off;
@@ -1711,12 +1711,12 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
        ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
            "nx_pci_base=%p iobase=%p "
            "max_req_queues=%d msix_count=%d.\n",
-           ha->nx_pcibase, ha->iobase,
+           (void *)ha->nx_pcibase, ha->iobase,
            ha->max_req_queues, ha->msix_count);
        ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
            "nx_pci_base=%p iobase=%p "
            "max_req_queues=%d msix_count=%d.\n",
-           ha->nx_pcibase, ha->iobase,
+           (void *)ha->nx_pcibase, ha->iobase,
            ha->max_req_queues, ha->msix_count);
        return 0;
 
@@ -1744,7 +1744,7 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
        ret = pci_set_mwi(ha->pdev);
        ha->chip_revision = ha->pdev->revision;
        ql_dbg(ql_dbg_init, vha, 0x0043,
-           "Chip revision:%ld.\n",
+           "Chip revision:%d.\n",
            ha->chip_revision);
        return 0;
 }
@@ -2023,13 +2023,9 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
                wptr++;
        }
 
-       if (ha->mcp) {
-               ql_dbg(ql_dbg_async, vha, 0x5052,
-                   "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
-       } else {
+       if (!ha->mcp)
                ql_dbg(ql_dbg_async, vha, 0x5053,
                    "MBX pointer ERROR.\n");
-       }
 }
 
 /*
@@ -2543,484 +2539,6 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
        return qla82xx_check_rcvpeg_state(ha);
 }
 
-static inline int
-qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
-       uint16_t tot_dsds)
-{
-       uint32_t *cur_dsd = NULL;
-       scsi_qla_host_t *vha;
-       struct qla_hw_data *ha;
-       struct scsi_cmnd *cmd;
-       struct  scatterlist *cur_seg;
-       uint32_t *dsd_seg;
-       void *next_dsd;
-       uint8_t avail_dsds;
-       uint8_t first_iocb = 1;
-       uint32_t dsd_list_len;
-       struct dsd_dma *dsd_ptr;
-       struct ct6_dsd *ctx;
-
-       cmd = sp->cmd;
-
-       /* Update entry type to indicate Command Type 3 IOCB */
-       *((uint32_t *)(&cmd_pkt->entry_type)) =
-               __constant_cpu_to_le32(COMMAND_TYPE_6);
-
-       /* No data transfer */
-       if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
-               cmd_pkt->byte_count = __constant_cpu_to_le32(0);
-               return 0;
-       }
-
-       vha = sp->fcport->vha;
-       ha = vha->hw;
-
-       /* Set transfer direction */
-       if (cmd->sc_data_direction == DMA_TO_DEVICE) {
-               cmd_pkt->control_flags =
-                   __constant_cpu_to_le16(CF_WRITE_DATA);
-               ha->qla_stats.output_bytes += scsi_bufflen(cmd);
-       } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
-               cmd_pkt->control_flags =
-                   __constant_cpu_to_le16(CF_READ_DATA);
-               ha->qla_stats.input_bytes += scsi_bufflen(cmd);
-       }
-
-       cur_seg = scsi_sglist(cmd);
-       ctx = sp->ctx;
-
-       while (tot_dsds) {
-               avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
-                   QLA_DSDS_PER_IOCB : tot_dsds;
-               tot_dsds -= avail_dsds;
-               dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
-
-               dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
-                   struct dsd_dma, list);
-               next_dsd = dsd_ptr->dsd_addr;
-               list_del(&dsd_ptr->list);
-               ha->gbl_dsd_avail--;
-               list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
-               ctx->dsd_use_cnt++;
-               ha->gbl_dsd_inuse++;
-
-               if (first_iocb) {
-                       first_iocb = 0;
-                       dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
-                       *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-                       *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-                       cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
-               } else {
-                       *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-                       *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-                       *cur_dsd++ = cpu_to_le32(dsd_list_len);
-               }
-               cur_dsd = (uint32_t *)next_dsd;
-               while (avail_dsds) {
-                       dma_addr_t      sle_dma;
-
-                       sle_dma = sg_dma_address(cur_seg);
-                       *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-                       *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-                       *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
-                       cur_seg = sg_next(cur_seg);
-                       avail_dsds--;
-               }
-       }
-
-       /* Null termination */
-       *cur_dsd++ =  0;
-       *cur_dsd++ = 0;
-       *cur_dsd++ = 0;
-       cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
-       return 0;
-}
-
-/*
- * qla82xx_calc_dsd_lists() - Determine number of DSD list required
- * for Command Type 6.
- *
- * @dsds: number of data segment decriptors needed
- *
- * Returns the number of dsd list needed to store @dsds.
- */
-inline uint16_t
-qla82xx_calc_dsd_lists(uint16_t dsds)
-{
-       uint16_t dsd_lists = 0;
-
-       dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
-       if (dsds % QLA_DSDS_PER_IOCB)
-               dsd_lists++;
-       return dsd_lists;
-}
-
-/*
- * qla82xx_start_scsi() - Send a SCSI command to the ISP
- * @sp: command to send to the ISP
- *
- * Returns non-zero if a failure occurred, else zero.
- */
-int
-qla82xx_start_scsi(srb_t *sp)
-{
-       int             ret, nseg;
-       unsigned long   flags;
-       struct scsi_cmnd *cmd;
-       uint32_t        *clr_ptr;
-       uint32_t        index;
-       uint32_t        handle;
-       uint16_t        cnt;
-       uint16_t        req_cnt;
-       uint16_t        tot_dsds;
-       struct device_reg_82xx __iomem *reg;
-       uint32_t dbval;
-       uint32_t *fcp_dl;
-       uint8_t additional_cdb_len;
-       struct ct6_dsd *ctx;
-       struct scsi_qla_host *vha = sp->fcport->vha;
-       struct qla_hw_data *ha = vha->hw;
-       struct req_que *req = NULL;
-       struct rsp_que *rsp = NULL;
-       char            tag[2];
-
-       /* Setup device pointers. */
-       ret = 0;
-       reg = &ha->iobase->isp82;
-       cmd = sp->cmd;
-       req = vha->req;
-       rsp = ha->rsp_q_map[0];
-
-       /* So we know we haven't pci_map'ed anything yet */
-       tot_dsds = 0;
-
-       dbval = 0x04 | (ha->portnum << 5);
-
-       /* Send marker if required */
-       if (vha->marker_needed != 0) {
-               if (qla2x00_marker(vha, req,
-                       rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
-                       ql_log(ql_log_warn, vha, 0x300c,
-                           "qla2x00_marker failed for cmd=%p.\n", cmd);
-                       return QLA_FUNCTION_FAILED;
-               }
-               vha->marker_needed = 0;
-       }
-
-       /* Acquire ring specific lock */
-       spin_lock_irqsave(&ha->hardware_lock, flags);
-
-       /* Check for room in outstanding command list. */
-       handle = req->current_outstanding_cmd;
-       for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
-               handle++;
-               if (handle == MAX_OUTSTANDING_COMMANDS)
-                       handle = 1;
-               if (!req->outstanding_cmds[handle])
-                       break;
-       }
-       if (index == MAX_OUTSTANDING_COMMANDS)
-               goto queuing_error;
-
-       /* Map the sg table so we have an accurate count of sg entries needed */
-       if (scsi_sg_count(cmd)) {
-               nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
-                   scsi_sg_count(cmd), cmd->sc_data_direction);
-               if (unlikely(!nseg))
-                       goto queuing_error;
-       } else
-               nseg = 0;
-
-       tot_dsds = nseg;
-
-       if (tot_dsds > ql2xshiftctondsd) {
-               struct cmd_type_6 *cmd_pkt;
-               uint16_t more_dsd_lists = 0;
-               struct dsd_dma *dsd_ptr;
-               uint16_t i;
-
-               more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
-               if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
-                       ql_dbg(ql_dbg_io, vha, 0x300d,
-                           "Num of DSD list %d is than %d for cmd=%p.\n",
-                           more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
-                           cmd);
-                       goto queuing_error;
-               }
-
-               if (more_dsd_lists <= ha->gbl_dsd_avail)
-                       goto sufficient_dsds;
-               else
-                       more_dsd_lists -= ha->gbl_dsd_avail;
-
-               for (i = 0; i < more_dsd_lists; i++) {
-                       dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
-                       if (!dsd_ptr) {
-                               ql_log(ql_log_fatal, vha, 0x300e,
-                                   "Failed to allocate memory for dsd_dma "
-                                   "for cmd=%p.\n", cmd);
-                               goto queuing_error;
-                       }
-
-                       dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
-                               GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
-                       if (!dsd_ptr->dsd_addr) {
-                               kfree(dsd_ptr);
-                               ql_log(ql_log_fatal, vha, 0x300f,
-                                   "Failed to allocate memory for dsd_addr "
-                                   "for cmd=%p.\n", cmd);
-                               goto queuing_error;
-                       }
-                       list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
-                       ha->gbl_dsd_avail++;
-               }
-
-sufficient_dsds:
-               req_cnt = 1;
-
-               if (req->cnt < (req_cnt + 2)) {
-                       cnt = (uint16_t)RD_REG_DWORD_RELAXED(
-                               &reg->req_q_out[0]);
-                       if (req->ring_index < cnt)
-                               req->cnt = cnt - req->ring_index;
-                       else
-                               req->cnt = req->length -
-                                       (req->ring_index - cnt);
-               }
-
-               if (req->cnt < (req_cnt + 2))
-                       goto queuing_error;
-
-               ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
-               if (!sp->ctx) {
-                       ql_log(ql_log_fatal, vha, 0x3010,
-                           "Failed to allocate ctx for cmd=%p.\n", cmd);
-                       goto queuing_error;
-               }
-               memset(ctx, 0, sizeof(struct ct6_dsd));
-               ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
-                       GFP_ATOMIC, &ctx->fcp_cmnd_dma);
-               if (!ctx->fcp_cmnd) {
-                       ql_log(ql_log_fatal, vha, 0x3011,
-                           "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
-                       goto queuing_error_fcp_cmnd;
-               }
-
-               /* Initialize the DSD list and dma handle */
-               INIT_LIST_HEAD(&ctx->dsd_list);
-               ctx->dsd_use_cnt = 0;
-
-               if (cmd->cmd_len > 16) {
-                       additional_cdb_len = cmd->cmd_len - 16;
-                       if ((cmd->cmd_len % 4) != 0) {
-                               /* SCSI command bigger than 16 bytes must be
-                                * multiple of 4
-                                */
-                               ql_log(ql_log_warn, vha, 0x3012,
-                                   "scsi cmd len %d not multiple of 4 "
-                                   "for cmd=%p.\n", cmd->cmd_len, cmd);
-                               goto queuing_error_fcp_cmnd;
-                       }
-                       ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
-               } else {
-                       additional_cdb_len = 0;
-                       ctx->fcp_cmnd_len = 12 + 16 + 4;
-               }
-
-               cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
-               cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
-
-               /* Zero out remaining portion of packet. */
-               /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
-               clr_ptr = (uint32_t *)cmd_pkt + 2;
-               memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
-               cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
-
-               /* Set NPORT-ID and LUN number*/
-               cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
-               cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
-               cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
-               cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
-               cmd_pkt->vp_index = sp->fcport->vp_idx;
-
-               /* Build IOCB segments */
-               if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
-                       goto queuing_error_fcp_cmnd;
-
-               int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
-               host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
-
-               /* build FCP_CMND IU */
-               memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
-               int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
-               ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
-
-               if (cmd->sc_data_direction == DMA_TO_DEVICE)
-                       ctx->fcp_cmnd->additional_cdb_len |= 1;
-               else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
-                       ctx->fcp_cmnd->additional_cdb_len |= 2;
-
-               /*
-                * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
-                */
-               if (scsi_populate_tag_msg(cmd, tag)) {
-                       switch (tag[0]) {
-                       case HEAD_OF_QUEUE_TAG:
-                               ctx->fcp_cmnd->task_attribute =
-                                   TSK_HEAD_OF_QUEUE;
-                               break;
-                       case ORDERED_QUEUE_TAG:
-                               ctx->fcp_cmnd->task_attribute =
-                                   TSK_ORDERED;
-                               break;
-                       }
-               }
-
-               memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
-
-               fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
-                   additional_cdb_len);
-               *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
-
-               cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
-               cmd_pkt->fcp_cmnd_dseg_address[0] =
-                   cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
-               cmd_pkt->fcp_cmnd_dseg_address[1] =
-                   cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
-
-               sp->flags |= SRB_FCP_CMND_DMA_VALID;
-               cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
-               /* Set total data segment count. */
-               cmd_pkt->entry_count = (uint8_t)req_cnt;
-               /* Specify response queue number where
-                * completion should happen
-                */
-               cmd_pkt->entry_status = (uint8_t) rsp->id;
-       } else {
-               struct cmd_type_7 *cmd_pkt;
-               req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
-               if (req->cnt < (req_cnt + 2)) {
-                       cnt = (uint16_t)RD_REG_DWORD_RELAXED(
-                           &reg->req_q_out[0]);
-                       if (req->ring_index < cnt)
-                               req->cnt = cnt - req->ring_index;
-                       else
-                               req->cnt = req->length -
-                                       (req->ring_index - cnt);
-               }
-               if (req->cnt < (req_cnt + 2))
-                       goto queuing_error;
-
-               cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
-               cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
-
-               /* Zero out remaining portion of packet. */
-               /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
-               clr_ptr = (uint32_t *)cmd_pkt + 2;
-               memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
-               cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
-
-               /* Set NPORT-ID and LUN number*/
-               cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
-               cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
-               cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
-               cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
-               cmd_pkt->vp_index = sp->fcport->vp_idx;
-
-               int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
-               host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
-                       sizeof(cmd_pkt->lun));
-
-               /*
-                * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
-                */
-               if (scsi_populate_tag_msg(cmd, tag)) {
-                       switch (tag[0]) {
-                       case HEAD_OF_QUEUE_TAG:
-                               cmd_pkt->task = TSK_HEAD_OF_QUEUE;
-                               break;
-                       case ORDERED_QUEUE_TAG:
-                               cmd_pkt->task = TSK_ORDERED;
-                               break;
-                       }
-               }
-
-               /* Load SCSI command packet. */
-               memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
-               host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
-
-               cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
-
-               /* Build IOCB segments */
-               qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
-
-               /* Set total data segment count. */
-               cmd_pkt->entry_count = (uint8_t)req_cnt;
-               /* Specify response queue number where
-                * completion should happen.
-                */
-               cmd_pkt->entry_status = (uint8_t) rsp->id;
-
-       }
-       /* Build command packet. */
-       req->current_outstanding_cmd = handle;
-       req->outstanding_cmds[handle] = sp;
-       sp->handle = handle;
-       sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
-       req->cnt -= req_cnt;
-       wmb();
-
-       /* Adjust ring index. */
-       req->ring_index++;
-       if (req->ring_index == req->length) {
-               req->ring_index = 0;
-               req->ring_ptr = req->ring;
-       } else
-               req->ring_ptr++;
-
-       sp->flags |= SRB_DMA_VALID;
-
-       /* Set chip new ring index. */
-       /* write, read and verify logic */
-       dbval = dbval | (req->id << 8) | (req->ring_index << 16);
-       if (ql2xdbwr)
-               qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
-       else {
-               WRT_REG_DWORD(
-                       (unsigned long __iomem *)ha->nxdb_wr_ptr,
-                       dbval);
-               wmb();
-               while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
-                       WRT_REG_DWORD(
-                               (unsigned long __iomem *)ha->nxdb_wr_ptr,
-                               dbval);
-                       wmb();
-               }
-       }
-
-       /* Manage unprocessed RIO/ZIO commands in response queue. */
-       if (vha->flags.process_response_queue &&
-           rsp->ring_ptr->signature != RESPONSE_PROCESSED)
-               qla24xx_process_response_queue(vha, rsp);
-
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-       return QLA_SUCCESS;
-
-queuing_error_fcp_cmnd:
-       dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
-queuing_error:
-       if (tot_dsds)
-               scsi_dma_unmap(cmd);
-
-       if (sp->ctx) {
-               mempool_free(sp->ctx, ha->ctx_mempool);
-               sp->ctx = NULL;
-       }
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
-       return QLA_FUNCTION_FAILED;
-}
-
 static uint32_t *
 qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
        uint32_t length)
@@ -3272,9 +2790,9 @@ qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
 }
 
 void
-qla82xx_start_iocbs(srb_t *sp)
+qla82xx_start_iocbs(scsi_qla_host_t *vha)
 {
-       struct qla_hw_data *ha = sp->fcport->vha->hw;
+       struct qla_hw_data *ha = vha->hw;
        struct req_que *req = ha->req_q_map[0];
        struct device_reg_82xx __iomem *reg;
        uint32_t dbval;
@@ -3659,11 +3177,10 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
                                qla82xx_md_free(vha);
                                /* ALlocate MiniDump resources */
                                qla82xx_md_prep(vha);
-                       } else
-                               ql_log(ql_log_info, vha, 0xb02e,
-                                   "Firmware dump available to retrieve\n",
-                                   vha->host_no);
-               }
+                       }
+               } else
+                       ql_log(ql_log_info, vha, 0xb02e,
+                           "Firmware dump available to retrieve\n");
        }
        return rval;
 }
@@ -3758,7 +3275,6 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
 
                switch (dev_state) {
                case QLA82XX_DEV_READY:
-                       qla82xx_check_md_needed(vha);
                        ha->flags.isp82xx_reset_owner = 0;
                        goto exit;
                case QLA82XX_DEV_COLD:
@@ -4067,7 +3583,7 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
                }
        }
        ql_dbg(ql_dbg_p3p, vha, 0xb027,
-           "%s status=%d.\n", status);
+              "%s: status=%d.\n", __func__, status);
 
        return status;
 }
index f9e5b85e84d83e6d147eebca647e2e986358cc0c..4ed1e4a96b954bf16562ff28dd306f6ab1f79ad2 100644 (file)
@@ -83,6 +83,9 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
                "\t\t0x00080000 - P3P Specific.  0x00040000 - Virtual Port.\n"
                "\t\t0x00020000 - Buffer Dump.   0x00010000 - Misc.\n"
                "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
+               "\t\t0x1e400000 - Preferred value for capturing essential "
+               "debug information (equivalent to old "
+               "ql2xextended_error_logging=1).\n"
                "\t\tDo LOGICAL OR of the value to enable more than one level");
 
 int ql2xshiftctondsd = 6;
@@ -199,7 +202,7 @@ int ql2xmdcapmask = 0x1F;
 module_param(ql2xmdcapmask, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xmdcapmask,
                "Set the Minidump driver capture mask level. "
-               "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
+               "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
 
 int ql2xmdenable = 1;
 module_param(ql2xmdenable, int, S_IRUGO);
@@ -847,14 +850,10 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
        int wait = 0;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_taskm, vha, 0x8000,
-           "Entered %s for cmd=%p.\n", __func__, cmd);
        if (!CMD_SP(cmd))
                return SUCCESS;
 
        ret = fc_block_scsi_eh(cmd);
-       ql_dbg(ql_dbg_taskm, vha, 0x8001,
-           "Return value of fc_block_scsi_eh=%d.\n", ret);
        if (ret != 0)
                return ret;
        ret = SUCCESS;
@@ -870,7 +869,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
        }
 
        ql_dbg(ql_dbg_taskm, vha, 0x8002,
-           "Aborting sp=%p cmd=%p from RISC ", sp, cmd);
+           "Aborting from RISC nexus=%ld:%d:%d sp=%p cmd=%p\n",
+           vha->host_no, id, lun, sp, cmd);
 
        /* Get a reference to the sp and drop the lock.*/
        sp_get(sp);
@@ -878,10 +878,10 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
        if (ha->isp_ops->abort_command(sp)) {
                ql_dbg(ql_dbg_taskm, vha, 0x8003,
-                   "Abort command mbx failed for cmd=%p.\n", cmd);
+                   "Abort command mbx failed cmd=%p.\n", cmd);
        } else {
                ql_dbg(ql_dbg_taskm, vha, 0x8004,
-                   "Abort command mbx success.\n");
+                   "Abort command mbx success cmd=%p.\n", cmd);
                wait = 1;
        }
 
@@ -897,13 +897,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
        if (wait) {
                if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
                        ql_log(ql_log_warn, vha, 0x8006,
-                           "Abort handler timed out for cmd=%p.\n", cmd);
+                           "Abort handler timed out cmd=%p.\n", cmd);
                        ret = FAILED;
                }
        }
 
        ql_log(ql_log_info, vha, 0x801c,
-           "Abort command issued --  %d %x.\n", wait, ret);
+           "Abort command issued nexus=%ld:%d:%d --  %d %x.\n",
+           vha->host_no, id, lun, wait, ret);
 
        return ret;
 }
@@ -972,19 +973,15 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
        int err;
 
        if (!fcport) {
-               ql_log(ql_log_warn, vha, 0x8007,
-                   "fcport is NULL.\n");
                return FAILED;
        }
 
        err = fc_block_scsi_eh(cmd);
-       ql_dbg(ql_dbg_taskm, vha, 0x8008,
-           "fc_block_scsi_eh ret=%d.\n", err);
        if (err != 0)
                return err;
 
        ql_log(ql_log_info, vha, 0x8009,
-           "%s RESET ISSUED for id %d lun %d cmd=%p.\n", name,
+           "%s RESET ISSUED nexus=%ld:%d:%d cmd=%p.\n", name, vha->host_no,
            cmd->device->id, cmd->device->lun, cmd);
 
        err = 0;
@@ -1009,15 +1006,16 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
        }
 
        ql_log(ql_log_info, vha, 0x800e,
-           "%s RESET SUCCEEDED for id %d lun %d cmd=%p.\n", name,
-           cmd->device->id, cmd->device->lun, cmd);
+           "%s RESET SUCCEEDED nexus:%ld:%d:%d cmd=%p.\n", name,
+           vha->host_no, cmd->device->id, cmd->device->lun, cmd);
 
        return SUCCESS;
 
 eh_reset_failed:
        ql_log(ql_log_info, vha, 0x800f,
-           "%s RESET FAILED: %s for id %d lun %d cmd=%p.\n", name,
-           reset_errors[err], cmd->device->id, cmd->device->lun);
+           "%s RESET FAILED: %s nexus=%ld:%d:%d cmd=%p.\n", name,
+           reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
+           cmd);
        return FAILED;
 }
 
@@ -1068,20 +1066,16 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
        lun = cmd->device->lun;
 
        if (!fcport) {
-               ql_log(ql_log_warn, vha, 0x8010,
-                   "fcport is NULL.\n");
                return ret;
        }
 
        ret = fc_block_scsi_eh(cmd);
-       ql_dbg(ql_dbg_taskm, vha, 0x8011,
-           "fc_block_scsi_eh ret=%d.\n", ret);
        if (ret != 0)
                return ret;
        ret = FAILED;
 
        ql_log(ql_log_info, vha, 0x8012,
-           "BUS RESET ISSUED for id %d lun %d.\n", id, lun);
+           "BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun);
 
        if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
                ql_log(ql_log_fatal, vha, 0x8013,
@@ -1105,7 +1099,8 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
 
 eh_bus_reset_done:
        ql_log(ql_log_warn, vha, 0x802b,
-           "BUS RESET %s.\n", (ret == FAILED) ? "FAILED" : "SUCCEDED");
+           "BUS RESET %s nexus=%ld:%d:%d.\n",
+           (ret == FAILED) ? "FAILED" : "SUCCEDED", vha->host_no, id, lun);
 
        return ret;
 }
@@ -1139,20 +1134,16 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
        lun = cmd->device->lun;
 
        if (!fcport) {
-               ql_log(ql_log_warn, vha, 0x8016,
-                   "fcport is NULL.\n");
                return ret;
        }
 
        ret = fc_block_scsi_eh(cmd);
-       ql_dbg(ql_dbg_taskm, vha, 0x8017,
-           "fc_block_scsi_eh ret=%d.\n", ret);
        if (ret != 0)
                return ret;
        ret = FAILED;
 
        ql_log(ql_log_info, vha, 0x8018,
-           "ADAPTER RESET ISSUED for id %d lun %d.\n", id, lun);
+           "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
 
        if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
                goto eh_host_reset_lock;
@@ -1193,8 +1184,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
                ret = SUCCESS;
 
 eh_host_reset_lock:
-       qla_printk(KERN_INFO, ha, "%s: reset %s.\n", __func__,
-           (ret == FAILED) ? "failed" : "succeeded");
+       ql_log(ql_log_info, vha, 0x8017,
+           "ADAPTER RESET %s nexus=%ld:%d:%d.\n",
+           (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
 
        return ret;
 }
@@ -1344,10 +1336,8 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
                return;
 
        ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
-           "Queue depth adjusted-down "
-           "to %d for scsi(%ld:%d:%d:%d).\n",
-           sdev->queue_depth, fcport->vha->host_no,
-           sdev->channel, sdev->id, sdev->lun);
+           "Queue depth adjusted-down to %d for nexus=%ld:%d:%d.\n",
+           sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
 }
 
 static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
@@ -1369,10 +1359,8 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
                scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
 
        ql_dbg(ql_dbg_io, vha, 0x302a,
-           "Queue depth adjusted-up to %d for "
-           "scsi(%ld:%d:%d:%d).\n",
-           sdev->queue_depth, fcport->vha->host_no,
-           sdev->channel, sdev->id, sdev->lun);
+           "Queue depth adjusted-up to %d for nexus=%ld:%d:%d.\n",
+           sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
 }
 
 static int
@@ -1496,6 +1484,118 @@ qla24xx_disable_intrs(struct qla_hw_data *ha)
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
+static int
+qla2x00_iospace_config(struct qla_hw_data *ha)
+{
+       resource_size_t pio;
+       uint16_t msix;
+       int cpus;
+
+       if (IS_QLA82XX(ha))
+               return qla82xx_iospace_config(ha);
+
+       if (pci_request_selected_regions(ha->pdev, ha->bars,
+           QLA2XXX_DRIVER_NAME)) {
+               ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
+                   "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+                   pci_name(ha->pdev));
+               goto iospace_error_exit;
+       }
+       if (!(ha->bars & 1))
+               goto skip_pio;
+
+       /* We only need PIO for Flash operations on ISP2312 v2 chips. */
+       pio = pci_resource_start(ha->pdev, 0);
+       if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
+               if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
+                       ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
+                           "Invalid pci I/O region size (%s).\n",
+                           pci_name(ha->pdev));
+                       pio = 0;
+               }
+       } else {
+               ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
+                   "Region #0 no a PIO resource (%s).\n",
+                   pci_name(ha->pdev));
+               pio = 0;
+       }
+       ha->pio_address = pio;
+       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
+           "PIO address=%llu.\n",
+           (unsigned long long)ha->pio_address);
+
+skip_pio:
+       /* Use MMIO operations for all accesses. */
+       if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
+               ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
+                   "Region #1 not an MMIO resource (%s), aborting.\n",
+                   pci_name(ha->pdev));
+               goto iospace_error_exit;
+       }
+       if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
+               ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
+                   "Invalid PCI mem region size (%s), aborting.\n",
+                   pci_name(ha->pdev));
+               goto iospace_error_exit;
+       }
+
+       ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
+       if (!ha->iobase) {
+               ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
+                   "Cannot remap MMIO (%s), aborting.\n",
+                   pci_name(ha->pdev));
+               goto iospace_error_exit;
+       }
+
+       /* Determine queue resources */
+       ha->max_req_queues = ha->max_rsp_queues = 1;
+       if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
+               (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
+               (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+               goto mqiobase_exit;
+
+       ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
+                       pci_resource_len(ha->pdev, 3));
+       if (ha->mqiobase) {
+               ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
+                   "MQIO Base=%p.\n", ha->mqiobase);
+               /* Read MSIX vector size of the board */
+               pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
+               ha->msix_count = msix;
+               /* Max queues are bounded by available msix vectors */
+               /* queue 0 uses two msix vectors */
+               if (ql2xmultique_tag) {
+                       cpus = num_online_cpus();
+                       ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
+                               (cpus + 1) : (ha->msix_count - 1);
+                       ha->max_req_queues = 2;
+               } else if (ql2xmaxqueues > 1) {
+                       ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
+                           QLA_MQ_SIZE : ql2xmaxqueues;
+                       ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
+                           "QoS mode set, max no of request queues:%d.\n",
+                           ha->max_req_queues);
+                       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
+                           "QoS mode set, max no of request queues:%d.\n",
+                           ha->max_req_queues);
+               }
+               ql_log_pci(ql_log_info, ha->pdev, 0x001a,
+                   "MSI-X vector count: %d.\n", msix);
+       } else
+               ql_log_pci(ql_log_info, ha->pdev, 0x001b,
+                   "BAR 3 not enabled.\n");
+
+mqiobase_exit:
+       ha->msix_count = ha->max_rsp_queues + 1;
+       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
+           "MSIX Count:%d.\n", ha->msix_count);
+       return (0);
+
+iospace_error_exit:
+       return (-ENOMEM);
+}
+
+
 static struct isp_operations qla2100_isp_ops = {
        .pci_config             = qla2100_pci_config,
        .reset_chip             = qla2x00_reset_chip,
@@ -1530,6 +1630,7 @@ static struct isp_operations qla2100_isp_ops = {
        .get_flash_version      = qla2x00_get_flash_version,
        .start_scsi             = qla2x00_start_scsi,
        .abort_isp              = qla2x00_abort_isp,
+       .iospace_config         = qla2x00_iospace_config,
 };
 
 static struct isp_operations qla2300_isp_ops = {
@@ -1566,6 +1667,7 @@ static struct isp_operations qla2300_isp_ops = {
        .get_flash_version      = qla2x00_get_flash_version,
        .start_scsi             = qla2x00_start_scsi,
        .abort_isp              = qla2x00_abort_isp,
+       .iospace_config         = qla2x00_iospace_config,
 };
 
 static struct isp_operations qla24xx_isp_ops = {
@@ -1602,6 +1704,7 @@ static struct isp_operations qla24xx_isp_ops = {
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_start_scsi,
        .abort_isp              = qla2x00_abort_isp,
+       .iospace_config         = qla2x00_iospace_config,
 };
 
 static struct isp_operations qla25xx_isp_ops = {
@@ -1638,6 +1741,7 @@ static struct isp_operations qla25xx_isp_ops = {
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_dif_start_scsi,
        .abort_isp              = qla2x00_abort_isp,
+       .iospace_config         = qla2x00_iospace_config,
 };
 
 static struct isp_operations qla81xx_isp_ops = {
@@ -1674,6 +1778,7 @@ static struct isp_operations qla81xx_isp_ops = {
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_dif_start_scsi,
        .abort_isp              = qla2x00_abort_isp,
+       .iospace_config         = qla2x00_iospace_config,
 };
 
 static struct isp_operations qla82xx_isp_ops = {
@@ -1710,6 +1815,7 @@ static struct isp_operations qla82xx_isp_ops = {
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla82xx_start_scsi,
        .abort_isp              = qla82xx_abort_isp,
+       .iospace_config         = qla82xx_iospace_config,
 };
 
 static inline void
@@ -1819,121 +1925,10 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
        else
                ha->flags.port0 = 0;
        ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
-           "device_type=0x%x port=%d fw_srisc_address=%p.\n",
+           "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
            ha->device_type, ha->flags.port0, ha->fw_srisc_address);
 }
 
-static int
-qla2x00_iospace_config(struct qla_hw_data *ha)
-{
-       resource_size_t pio;
-       uint16_t msix;
-       int cpus;
-
-       if (IS_QLA82XX(ha))
-               return qla82xx_iospace_config(ha);
-
-       if (pci_request_selected_regions(ha->pdev, ha->bars,
-           QLA2XXX_DRIVER_NAME)) {
-               ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
-                   "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
-                   pci_name(ha->pdev));
-               goto iospace_error_exit;
-       }
-       if (!(ha->bars & 1))
-               goto skip_pio;
-
-       /* We only need PIO for Flash operations on ISP2312 v2 chips. */
-       pio = pci_resource_start(ha->pdev, 0);
-       if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
-               if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
-                       ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
-                           "Invalid pci I/O region size (%s).\n",
-                           pci_name(ha->pdev));
-                       pio = 0;
-               }
-       } else {
-               ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
-                   "Region #0 no a PIO resource (%s).\n",
-                   pci_name(ha->pdev));
-               pio = 0;
-       }
-       ha->pio_address = pio;
-       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
-           "PIO address=%p.\n",
-           ha->pio_address);
-
-skip_pio:
-       /* Use MMIO operations for all accesses. */
-       if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
-               ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
-                   "Region #1 not an MMIO resource (%s), aborting.\n",
-                   pci_name(ha->pdev));
-               goto iospace_error_exit;
-       }
-       if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
-               ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
-                   "Invalid PCI mem region size (%s), aborting.\n",
-                   pci_name(ha->pdev));
-               goto iospace_error_exit;
-       }
-
-       ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
-       if (!ha->iobase) {
-               ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
-                   "Cannot remap MMIO (%s), aborting.\n",
-                   pci_name(ha->pdev));
-               goto iospace_error_exit;
-       }
-
-       /* Determine queue resources */
-       ha->max_req_queues = ha->max_rsp_queues = 1;
-       if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
-               (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
-               (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
-               goto mqiobase_exit;
-
-       ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
-                       pci_resource_len(ha->pdev, 3));
-       if (ha->mqiobase) {
-               ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
-                   "MQIO Base=%p.\n", ha->mqiobase);
-               /* Read MSIX vector size of the board */
-               pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
-               ha->msix_count = msix;
-               /* Max queues are bounded by available msix vectors */
-               /* queue 0 uses two msix vectors */
-               if (ql2xmultique_tag) {
-                       cpus = num_online_cpus();
-                       ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
-                               (cpus + 1) : (ha->msix_count - 1);
-                       ha->max_req_queues = 2;
-               } else if (ql2xmaxqueues > 1) {
-                       ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
-                           QLA_MQ_SIZE : ql2xmaxqueues;
-                       ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
-                           "QoS mode set, max no of request queues:%d.\n",
-                           ha->max_req_queues);
-                       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
-                           "QoS mode set, max no of request queues:%d.\n",
-                           ha->max_req_queues);
-               }
-               ql_log_pci(ql_log_info, ha->pdev, 0x001a,
-                   "MSI-X vector count: %d.\n", msix);
-       } else
-               ql_log_pci(ql_log_info, ha->pdev, 0x001b,
-                   "BAR 3 not enabled.\n");
-
-mqiobase_exit:
-       ha->msix_count = ha->max_rsp_queues + 1;
-       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
-           "MSIX Count:%d.\n", ha->msix_count);
-       return (0);
-
-iospace_error_exit:
-       return (-ENOMEM);
-}
-
 static void
 qla2xxx_scan_start(struct Scsi_Host *shost)
 {
@@ -2032,14 +2027,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                pdev->needs_freset = 1;
        }
 
-       /* Configure PCI I/O space */
-       ret = qla2x00_iospace_config(ha);
-       if (ret)
-               goto probe_hw_failed;
-
-       ql_log_pci(ql_log_info, pdev, 0x001d,
-           "Found an ISP%04X irq %d iobase 0x%p.\n",
-           pdev->device, pdev->irq, ha->iobase);
        ha->prev_topology = 0;
        ha->init_cb_size = sizeof(init_cb_t);
        ha->link_data_rate = PORT_SPEED_UNKNOWN;
@@ -2152,6 +2139,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
            ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
            ha->nvram_conf_off, ha->nvram_data_off);
+
+       /* Configure PCI I/O space */
+       ret = ha->isp_ops->iospace_config(ha);
+       if (ret)
+               goto probe_hw_failed;
+
+       ql_log_pci(ql_log_info, pdev, 0x001d,
+           "Found an ISP%04X irq %d iobase 0x%p.\n",
+           pdev->device, pdev->irq, ha->iobase);
        mutex_init(&ha->vport_lock);
        init_completion(&ha->mbx_cmd_comp);
        complete(&ha->mbx_cmd_comp);
@@ -2227,7 +2223,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        ql_dbg(ql_dbg_init, base_vha, 0x0033,
            "max_id=%d this_id=%d "
            "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
-           "max_lun=%d transportt=%p, vendor_id=%d.\n", host->max_id,
+           "max_lun=%d transportt=%p, vendor_id=%llu.\n", host->max_id,
            host->this_id, host->cmd_per_lun, host->unique_id,
            host->max_cmd_len, host->max_channel, host->max_lun,
            host->transportt, sht->vendor_id);
@@ -2382,9 +2378,6 @@ skip_dpc:
 
        qla2x00_dfs_setup(base_vha);
 
-       ql_log(ql_log_info, base_vha, 0x00fa,
-           "QLogic Fibre Channed HBA Driver: %s.\n",
-           qla2x00_version_str);
        ql_log(ql_log_info, base_vha, 0x00fb,
            "QLogic %s - %s.\n",
            ha->model_number, ha->model_desc ? ha->model_desc : "");
@@ -2833,7 +2826,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
                if (!ha->sns_cmd)
                        goto fail_dma_pool;
                ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
-                   "sns_cmd.\n", ha->sns_cmd);
+                   "sns_cmd: %p.\n", ha->sns_cmd);
        } else {
        /* Get consistent memory allocated for MS IOCB */
                ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -3460,27 +3453,21 @@ qla2x00_do_dpc(void *data)
                schedule();
                __set_current_state(TASK_RUNNING);
 
-               ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
-                   "DPC handler waking up.\n");
-               ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
-                   "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
-
-               /* Initialization not yet finished. Don't do anything yet. */
-               if (!base_vha->flags.init_done)
-                       continue;
+               if (!base_vha->flags.init_done || ha->flags.mbox_busy)
+                       goto end_loop;
 
                if (ha->flags.eeh_busy) {
                        ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
                            "eeh_busy=%d.\n", ha->flags.eeh_busy);
-                       continue;
+                       goto end_loop;
                }
 
                ha->dpc_active = 1;
 
-               if (ha->flags.mbox_busy) {
-                       ha->dpc_active = 0;
-                       continue;
-               }
+               ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
+                   "DPC handler waking up.\n");
+               ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
+                   "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
 
                qla2x00_do_work(base_vha);
 
@@ -3622,6 +3609,7 @@ qla2x00_do_dpc(void *data)
                qla2x00_do_dpc_all_vps(base_vha);
 
                ha->dpc_active = 0;
+end_loop:
                set_current_state(TASK_INTERRUPTIBLE);
        } /* End of while(1) */
        __set_current_state(TASK_RUNNING);
@@ -3705,16 +3693,6 @@ qla2x00_sp_free_dma(srb_t *sp)
                sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
        }
 
-       CMD_SP(cmd) = NULL;
-}
-
-static void
-qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
-{
-       struct scsi_cmnd *cmd = sp->cmd;
-
-       qla2x00_sp_free_dma(sp);
-
        if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
                struct ct6_dsd *ctx = sp->ctx;
                dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
@@ -3726,6 +3704,15 @@ qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
                sp->ctx = NULL;
        }
 
+       CMD_SP(cmd) = NULL;
+}
+
+static void
+qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
+{
+       struct scsi_cmnd *cmd = sp->cmd;
+
+       qla2x00_sp_free_dma(sp);
        mempool_free(sp, ha->srb_mempool);
        cmd->scsi_done(cmd);
 }
index eff13563c82dba6061b51b5d6b7786b718ac181d..16bc72844a97b6afc6286a0618ed9476fd876193 100644 (file)
@@ -904,8 +904,9 @@ no_flash_data:
        }
 done:
        ql_dbg(ql_dbg_init, vha, 0x004d,
-           "FDT[%x]: (0x%x/0x%x) erase=0x%x "
-           "pr=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
+           "FDT[%s]: (0x%x/0x%x) erase=0x%x "
+           "pr=%x wrtd=0x%x blk=0x%x.\n",
+           loc, mid, fid,
            ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
            ha->fdt_wrt_disable, ha->fdt_block_size);
 
index af62c3cf875293c08f53817febd08cd5961b24a4..8d58ae2748292b45026061f9c0e22c2bae31cdf7 100644 (file)
@@ -20,12 +20,12 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
        printk("------------------------------------------------------------"
               "--\n");
        for (cnt = 0; cnt < size; c++) {
-               printk(KERN_INFO "%02x", *c);
+               printk("%02x", *c);
                if (!(++cnt % 16))
-                       printk(KERN_INFO "\n");
+                       printk("\n");
 
                else
-                       printk(KERN_INFO "  ");
+                       printk("  ");
        }
        printk(KERN_INFO "\n");
 }
index fd5edc6e166dec140854e737912fec75a4eebf20..22a3ff02e48a419d0e9353d72e53f8d13a3e41f0 100644 (file)
 #define LOGIN_TOV                      12
 
 #define MAX_RESET_HA_RETRIES           2
+#define FW_ALIVE_WAIT_TOV              3
 
 #define CMD_SP(Cmnd)                   ((Cmnd)->SCp.ptr)
 
index 827e93078b942bbcbc393ac8f063c8ce4ab8b183..95828862eea095ac033f2c0aaa890d8813b77d72 100644 (file)
@@ -123,13 +123,13 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
 
        srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
        if (!srb) {
-               DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
-                             "handle 0x%x, sp=%p. This cmd may have already "
-                             "been completed.\n", ha->host_no, __func__,
-                             le32_to_cpu(sts_entry->handle), srb));
-               ql4_printk(KERN_WARNING, ha, "%s invalid status entry:"
-                   " handle=0x%0x\n", __func__, sts_entry->handle);
-               set_bit(DPC_RESET_HA, &ha->dpc_flags);
+               ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
+                          "handle=0x%0x, srb=%p\n", __func__,
+                          sts_entry->handle, srb);
+               if (is_qla8022(ha))
+                       set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
+               else
+                       set_bit(DPC_RESET_HA, &ha->dpc_flags);
                return;
        }
 
@@ -563,7 +563,11 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
                case MBOX_ASTS_DHCP_LEASE_EXPIRED:
                        DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
                                      "Reset HA\n", ha->host_no, mbox_status));
-                       set_bit(DPC_RESET_HA, &ha->dpc_flags);
+                       if (is_qla8022(ha))
+                               set_bit(DPC_RESET_HA_FW_CONTEXT,
+                                       &ha->dpc_flags);
+                       else
+                               set_bit(DPC_RESET_HA, &ha->dpc_flags);
                        break;
 
                case MBOX_ASTS_LINK_UP:
@@ -617,9 +621,13 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
                            (mbox_sts[2] == ACB_STATE_ACQUIRING)))
                                set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
                        else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
-                           (mbox_sts[2] == ACB_STATE_VALID))
-                               set_bit(DPC_RESET_HA, &ha->dpc_flags);
-                       else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
+                                (mbox_sts[2] == ACB_STATE_VALID)) {
+                               if (is_qla8022(ha))
+                                       set_bit(DPC_RESET_HA_FW_CONTEXT,
+                                               &ha->dpc_flags);
+                               else
+                                       set_bit(DPC_RESET_HA, &ha->dpc_flags);
+                       } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
                                complete(&ha->disable_acb_comp);
                        break;
 
index f484ff43819928f5678ebff0187b7f503a609675..8d6bc1b2ff17266a45d66dfb923f5c7757cf1493 100644 (file)
@@ -1792,8 +1792,11 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
        int rval = QLA_SUCCESS;
        unsigned long dev_init_timeout;
 
-       if (!test_bit(AF_INIT_DONE, &ha->flags))
+       if (!test_bit(AF_INIT_DONE, &ha->flags)) {
+               qla4_8xxx_idc_lock(ha);
                qla4_8xxx_set_drv_active(ha);
+               qla4_8xxx_idc_unlock(ha);
+       }
 
        dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
        ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
@@ -1802,8 +1805,8 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
        /* wait for 30 seconds for device to go ready */
        dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
 
+       qla4_8xxx_idc_lock(ha);
        while (1) {
-               qla4_8xxx_idc_lock(ha);
 
                if (time_after_eq(jiffies, dev_init_timeout)) {
                        ql4_printk(KERN_WARNING, ha, "Device init failed!\n");
@@ -1819,15 +1822,14 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
                /* NOTE: Make sure idc unlocked upon exit of switch statement */
                switch (dev_state) {
                case QLA82XX_DEV_READY:
-                       qla4_8xxx_idc_unlock(ha);
                        goto exit;
                case QLA82XX_DEV_COLD:
                        rval = qla4_8xxx_device_bootstrap(ha);
-                       qla4_8xxx_idc_unlock(ha);
                        goto exit;
                case QLA82XX_DEV_INITIALIZING:
                        qla4_8xxx_idc_unlock(ha);
                        msleep(1000);
+                       qla4_8xxx_idc_lock(ha);
                        break;
                case QLA82XX_DEV_NEED_RESET:
                        if (!ql4xdontresethba) {
@@ -1836,32 +1838,37 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
                                 * reset handler */
                                dev_init_timeout = jiffies +
                                        (ha->nx_dev_init_timeout * HZ);
+                       } else {
+                               qla4_8xxx_idc_unlock(ha);
+                               msleep(1000);
+                               qla4_8xxx_idc_lock(ha);
                        }
-                       qla4_8xxx_idc_unlock(ha);
                        break;
                case QLA82XX_DEV_NEED_QUIESCENT:
-                       qla4_8xxx_idc_unlock(ha);
                        /* idc locked/unlocked in handler */
                        qla4_8xxx_need_qsnt_handler(ha);
-                       qla4_8xxx_idc_lock(ha);
-                       /* fall thru needs idc_locked */
+                       break;
                case QLA82XX_DEV_QUIESCENT:
                        qla4_8xxx_idc_unlock(ha);
                        msleep(1000);
+                       qla4_8xxx_idc_lock(ha);
                        break;
                case QLA82XX_DEV_FAILED:
                        qla4_8xxx_idc_unlock(ha);
                        qla4xxx_dead_adapter_cleanup(ha);
                        rval = QLA_ERROR;
+                       qla4_8xxx_idc_lock(ha);
                        goto exit;
                default:
                        qla4_8xxx_idc_unlock(ha);
                        qla4xxx_dead_adapter_cleanup(ha);
                        rval = QLA_ERROR;
+                       qla4_8xxx_idc_lock(ha);
                        goto exit;
                }
        }
 exit:
+       qla4_8xxx_idc_unlock(ha);
        return rval;
 }
 
index 78bf700b365f202ed83a06558846f02f916f87e7..ec393a00c03816f52515fc7e294187a2527e2bf0 100644 (file)
@@ -935,7 +935,16 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
                goto exit_init_fw_cb;
        }
 
-       qla4xxx_disable_acb(ha);
+       rval = qla4xxx_disable_acb(ha);
+       if (rval != QLA_SUCCESS) {
+               ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
+                          __func__);
+               rval = -EIO;
+               goto exit_init_fw_cb;
+       }
+
+       wait_for_completion_timeout(&ha->disable_acb_comp,
+                                   DISABLE_ACB_TOV * HZ);
 
        qla4xxx_initcb_to_acb(init_fw_cb);
 
@@ -1966,9 +1975,10 @@ mem_alloc_error_exit:
  *
  * Context: Interrupt
  **/
-static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
+static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
 {
-       uint32_t fw_heartbeat_counter, halt_status;
+       uint32_t fw_heartbeat_counter;
+       int status = QLA_SUCCESS;
 
        fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
        /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
@@ -1976,7 +1986,7 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
                DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
                    "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
                    ha->host_no, __func__));
-               return;
+               return status;
        }
 
        if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
@@ -1984,8 +1994,6 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
                /* FW not alive after 2 seconds */
                if (ha->seconds_since_last_heartbeat == 2) {
                        ha->seconds_since_last_heartbeat = 0;
-                       halt_status = qla4_8xxx_rd_32(ha,
-                                                     QLA82XX_PEG_HALT_STATUS1);
 
                        ql4_printk(KERN_INFO, ha,
                                   "scsi(%ld): %s, Dumping hw/fw registers:\n "
@@ -1993,7 +2001,9 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
                                   " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
                                   " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
                                   " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
-                                  ha->host_no, __func__, halt_status,
+                                  ha->host_no, __func__,
+                                  qla4_8xxx_rd_32(ha,
+                                                  QLA82XX_PEG_HALT_STATUS1),
                                   qla4_8xxx_rd_32(ha,
                                                   QLA82XX_PEG_HALT_STATUS2),
                                   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
@@ -2006,24 +2016,13 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
                                                   0x3c),
                                   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
                                                   0x3c));
-
-                       /* Since we cannot change dev_state in interrupt
-                        * context, set appropriate DPC flag then wakeup
-                        * DPC */
-                       if (halt_status & HALT_STATUS_UNRECOVERABLE)
-                               set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
-                       else {
-                               printk("scsi%ld: %s: detect abort needed!\n",
-                                   ha->host_no, __func__);
-                               set_bit(DPC_RESET_HA, &ha->dpc_flags);
-                       }
-                       qla4xxx_wake_dpc(ha);
-                       qla4xxx_mailbox_premature_completion(ha);
+                       status = QLA_ERROR;
                }
        } else
                ha->seconds_since_last_heartbeat = 0;
 
        ha->fw_heartbeat_counter = fw_heartbeat_counter;
+       return status;
 }
 
 /**
@@ -2034,14 +2033,13 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
  **/
 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
 {
-       uint32_t dev_state;
-
-       dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+       uint32_t dev_state, halt_status;
 
        /* don't poll if reset is going on */
        if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
            test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
            test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
+               dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
                if (dev_state == QLA82XX_DEV_NEED_RESET &&
                    !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
                        if (!ql4xdontresethba) {
@@ -2049,7 +2047,6 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
                                    "NEED RESET!\n", __func__);
                                set_bit(DPC_RESET_HA, &ha->dpc_flags);
                                qla4xxx_wake_dpc(ha);
-                               qla4xxx_mailbox_premature_completion(ha);
                        }
                } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
                    !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
@@ -2059,7 +2056,24 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
                        qla4xxx_wake_dpc(ha);
                } else  {
                        /* Check firmware health */
-                       qla4_8xxx_check_fw_alive(ha);
+                       if (qla4_8xxx_check_fw_alive(ha)) {
+                               halt_status = qla4_8xxx_rd_32(ha,
+                                               QLA82XX_PEG_HALT_STATUS1);
+
+                               /* Since we cannot change dev_state in interrupt
+                                * context, set appropriate DPC flag then wakeup
+                                * DPC */
+                               if (halt_status & HALT_STATUS_UNRECOVERABLE)
+                                       set_bit(DPC_HA_UNRECOVERABLE,
+                                               &ha->dpc_flags);
+                               else {
+                                       ql4_printk(KERN_INFO, ha, "%s: detect "
+                                                  "abort needed!\n", __func__);
+                                       set_bit(DPC_RESET_HA, &ha->dpc_flags);
+                               }
+                               qla4xxx_mailbox_premature_completion(ha);
+                               qla4xxx_wake_dpc(ha);
+                       }
                }
        }
 }
@@ -2414,6 +2428,8 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
 {
        int status = QLA_ERROR;
        uint8_t reset_chip = 0;
+       uint32_t dev_state;
+       unsigned long wait;
 
        /* Stall incoming I/O until we are done */
        scsi_block_requests(ha->host);
@@ -2464,8 +2480,29 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
         * or if stop_firmware fails for ISP-82xx.
         * This is the default case for ISP-4xxx */
        if (!is_qla8022(ha) || reset_chip) {
+               if (!is_qla8022(ha))
+                       goto chip_reset;
+
+               /* Check if 82XX firmware is alive or not
+                * We may have arrived here from NEED_RESET
+                * detection only */
+               if (test_bit(AF_FW_RECOVERY, &ha->flags))
+                       goto chip_reset;
+
+               wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
+               while (time_before(jiffies, wait)) {
+                       if (qla4_8xxx_check_fw_alive(ha)) {
+                               qla4xxx_mailbox_premature_completion(ha);
+                               break;
+                       }
+
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       schedule_timeout(HZ);
+               }
+
                if (!test_bit(AF_FW_RECOVERY, &ha->flags))
                        qla4xxx_cmd_wait(ha);
+chip_reset:
                qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
                qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
                DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -2501,6 +2538,25 @@ recover_ha_init_adapter:
                 * Since we don't want to block the DPC for too long
                 * with multiple resets in the same thread,
                 * utilize DPC to retry */
+               if (is_qla8022(ha)) {
+                       qla4_8xxx_idc_lock(ha);
+                       dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+                       qla4_8xxx_idc_unlock(ha);
+                       if (dev_state == QLA82XX_DEV_FAILED) {
+                               ql4_printk(KERN_INFO, ha, "%s: don't retry "
+                                          "recover adapter. H/W is in Failed "
+                                          "state\n", __func__);
+                               qla4xxx_dead_adapter_cleanup(ha);
+                               clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+                               clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+                               clear_bit(DPC_RESET_HA_FW_CONTEXT,
+                                               &ha->dpc_flags);
+                               status = QLA_ERROR;
+
+                               goto exit_recover;
+                       }
+               }
+
                if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
                        ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
                        DEBUG2(printk("scsi%ld: recover adapter - retrying "
@@ -2539,6 +2595,7 @@ recover_ha_init_adapter:
                clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
        }
 
+exit_recover:
        ha->adapter_error_count++;
 
        if (test_bit(AF_ONLINE, &ha->flags))
@@ -2806,6 +2863,7 @@ dpc_post_reset_ha:
  **/
 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
 {
+       qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
 
        if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
                /* Turn-off interrupts on the card. */
@@ -4815,6 +4873,20 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
        return SUCCESS;
 }
 
+/**
+ * qla4xxx_is_eh_active - check if error handler is running
+ * @shost: Pointer to SCSI Host struct
+ *
+ * This routine finds that if reset host is called in EH
+ * scenario or from some application like sg_reset
+ **/
+static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
+{
+       if (shost->shost_state == SHOST_RECOVERY)
+               return 1;
+       return 0;
+}
+
 /**
  * qla4xxx_eh_host_reset - kernel callback
  * @cmd: Pointer to Linux's SCSI command structure
@@ -4832,6 +4904,11 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
        if (ql4xdontresethba) {
                DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
                     ha->host_no, __func__));
+
+               /* Clear outstanding srb in queues */
+               if (qla4xxx_is_eh_active(cmd->device->host))
+                       qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
+
                return FAILED;
        }
 
index 5254e57968f5cf64a24c04731fe9ef2e240c4068..26a3fa34a33c0594c3dfd4fa78f73c75db0fb28c 100644 (file)
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k9"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k10"
index dc6131e6a1ba2d40fe32504f1cb0e0f8d8b41def..5f84a148eb142417d9e296191329a6126e89ef4a 100644 (file)
@@ -1812,7 +1812,7 @@ int scsi_error_handler(void *data)
                 * what we need to do to get it up and online again (if we can).
                 * If we fail, we end up taking the thing offline.
                 */
-               if (scsi_autopm_get_host(shost) != 0) {
+               if (!shost->eh_noresume && scsi_autopm_get_host(shost) != 0) {
                        SCSI_LOG_ERROR_RECOVERY(1,
                                printk(KERN_ERR "Error handler scsi_eh_%d "
                                                "unable to autoresume\n",
@@ -1833,7 +1833,8 @@ int scsi_error_handler(void *data)
                 * which are still online.
                 */
                scsi_restart_operations(shost);
-               scsi_autopm_put_host(shost);
+               if (!shost->eh_noresume)
+                       scsi_autopm_put_host(shost);
                set_current_state(TASK_INTERRUPTIBLE);
        }
        __set_current_state(TASK_RUNNING);
index d329f8b12e2b11503bd52b391bc198798aa1f297..bf8bf79e6a1f22fe2a9e84f4a2c2bf51abb9ed35 100644 (file)
@@ -49,8 +49,22 @@ static int scsi_bus_suspend_common(struct device *dev, pm_message_t msg)
 {
        int err = 0;
 
-       if (scsi_is_sdev_device(dev))
+       if (scsi_is_sdev_device(dev)) {
+               /*
+                * sd is the only high-level SCSI driver to implement runtime
+                * PM, and sd treats runtime suspend, system suspend, and
+                * system hibernate identically (but not system freeze).
+                */
+               if (pm_runtime_suspended(dev)) {
+                       if (msg.event == PM_EVENT_SUSPEND ||
+                           msg.event == PM_EVENT_HIBERNATE)
+                               return 0;       /* already suspended */
+
+                       /* wake up device so that FREEZE will succeed */
+                       pm_runtime_resume(dev);
+               }
                err = scsi_dev_type_suspend(dev, msg);
+       }
        return err;
 }
 
@@ -58,8 +72,17 @@ static int scsi_bus_resume_common(struct device *dev)
 {
        int err = 0;
 
-       if (scsi_is_sdev_device(dev))
+       if (scsi_is_sdev_device(dev)) {
+               /*
+                * Parent device may have runtime suspended as soon as
+                * it is woken up during the system resume.
+                *
+                * Resume it on behalf of child.
+                */
+               pm_runtime_get_sync(dev->parent);
                err = scsi_dev_type_resume(dev);
+               pm_runtime_put_sync(dev->parent);
+       }
 
        if (err == 0) {
                pm_runtime_disable(dev);
index 2a588955423a58f971d91472171830137adda6ce..68eadd1c67fd290d0941d357a1cd857389c0e98c 100644 (file)
@@ -45,7 +45,6 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
 enum {
        SCSI_DEVINFO_GLOBAL = 0,
        SCSI_DEVINFO_SPI,
-       SCSI_DEVINFO_DH,
 };
 
 extern int scsi_get_device_flags(struct scsi_device *sdev,
index e8447fbc31f30a45b3e65d7e08cc20c23f16b58c..cfd491437239750937b7a3d16f5235f6503f158b 100644 (file)
@@ -1030,6 +1030,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
                return NULL;
 
        session->transport = transport;
+       session->creator = -1;
        session->recovery_tmo = 120;
        session->state = ISCSI_SESSION_FREE;
        INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
@@ -1634,8 +1635,9 @@ EXPORT_SYMBOL_GPL(iscsi_session_event);
 
 static int
 iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
-                       struct iscsi_uevent *ev, uint32_t initial_cmdsn,
-                       uint16_t cmds_max, uint16_t queue_depth)
+                       struct iscsi_uevent *ev, pid_t pid,
+                       uint32_t initial_cmdsn, uint16_t cmds_max,
+                       uint16_t queue_depth)
 {
        struct iscsi_transport *transport = priv->iscsi_transport;
        struct iscsi_cls_session *session;
@@ -1646,6 +1648,7 @@ iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
        if (!session)
                return -ENOMEM;
 
+       session->creator = pid;
        shost = iscsi_session_to_shost(session);
        ev->r.c_session_ret.host_no = shost->host_no;
        ev->r.c_session_ret.sid = session->sid;
@@ -1938,6 +1941,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
        switch (nlh->nlmsg_type) {
        case ISCSI_UEVENT_CREATE_SESSION:
                err = iscsi_if_create_session(priv, ep, ev,
+                                             NETLINK_CREDS(skb)->pid,
                                              ev->u.c_session.initial_cmdsn,
                                              ev->u.c_session.cmds_max,
                                              ev->u.c_session.queue_depth);
@@ -1950,6 +1954,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
                }
 
                err = iscsi_if_create_session(priv, ep, ev,
+                                       NETLINK_CREDS(skb)->pid,
                                        ev->u.c_bound_session.initial_cmdsn,
                                        ev->u.c_bound_session.cmds_max,
                                        ev->u.c_bound_session.queue_depth);
@@ -2298,6 +2303,15 @@ show_priv_session_state(struct device *dev, struct device_attribute *attr,
 }
 static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
                        NULL);
+static ssize_t
+show_priv_session_creator(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+       return sprintf(buf, "%d\n", session->creator);
+}
+static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
+                       NULL);
 
 #define iscsi_priv_session_attr_show(field, format)                    \
 static ssize_t                                                         \
@@ -2367,6 +2381,7 @@ static struct attribute *iscsi_session_attrs[] = {
        &dev_attr_sess_targetalias.attr,
        &dev_attr_priv_sess_recovery_tmo.attr,
        &dev_attr_priv_sess_state.attr,
+       &dev_attr_priv_sess_creator.attr,
        NULL,
 };
 
@@ -2424,6 +2439,8 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
                return S_IRUGO | S_IWUSR;
        else if (attr == &dev_attr_priv_sess_state.attr)
                return S_IRUGO;
+       else if (attr == &dev_attr_priv_sess_creator.attr)
+               return S_IRUGO;
        else {
                WARN_ONCE(1, "Invalid session attr");
                return 0;
index fa3a5918009cfd9e9223a650a6c487585af36fa4..7b3f8075e2a5d9f53e8f4d322ff5756fd496b51b 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/string_helpers.h>
 #include <linux/async.h>
 #include <linux/slab.h>
+#include <linux/pm_runtime.h>
 #include <asm/uaccess.h>
 #include <asm/unaligned.h>
 
@@ -2741,6 +2742,9 @@ static void sd_shutdown(struct device *dev)
        if (!sdkp)
                return;         /* this can happen */
 
+       if (pm_runtime_suspended(dev))
+               goto exit;
+
        if (sdkp->WCE) {
                sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
                sd_sync_cache(sdkp);
@@ -2751,6 +2755,7 @@ static void sd_shutdown(struct device *dev)
                sd_start_stop_device(sdkp, 0);
        }
 
+exit:
        scsi_disk_put(sdkp);
 }
 
index 114b99a1ce1914d8e5b906d7e070986350e974ae..b8f78ebbb145f0957663549a4713675d1a7be18f 100644 (file)
@@ -253,7 +253,7 @@ static void psbfb_copyarea_accel(struct fb_info *info,
                return;
 
        offset = psbfb->gtt->offset;
-       stride = fb->pitch;
+       stride = fb->pitches[0];
 
        switch (fb->depth) {
        case 8:
index 7b97c600eff0c49885b742f14b9f11cf80bdbefd..c63a32776a9ec09436fd632b92eed8ba4ace4b5c 100644 (file)
@@ -507,9 +507,9 @@ int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
        if (ret < 0)
                goto psb_intel_pipe_set_base_exit;
        start = psbfb->gtt->offset;
-       offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+       offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
 
-       REG_WRITE(dspstride, crtc->fb->pitch);
+       REG_WRITE(dspstride, crtc->fb->pitches[0]);
 
        dspcntr = REG_READ(dspcntr_reg);
        dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
index 3f39a37456fca0a04cf7de813e4b8acfb7f823e8..b00761cba1448a4f78360c625349a08ed6e36c46 100644 (file)
@@ -32,6 +32,7 @@
 #include <drm/drmP.h>
 #include <drm/drm.h>
 #include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
 
 #include "psb_drv.h"
 #include "psb_intel_reg.h"
@@ -273,14 +274,17 @@ static struct fb_ops psbfb_unaccel_ops = {
  */
 static int psb_framebuffer_init(struct drm_device *dev,
                                        struct psb_framebuffer *fb,
-                                       struct drm_mode_fb_cmd *mode_cmd,
+                                       struct drm_mode_fb_cmd2 *mode_cmd,
                                        struct gtt_range *gt)
 {
+       u32 bpp, depth;
        int ret;
 
-       if (mode_cmd->pitch & 63)
+       drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+       if (mode_cmd->pitches[0] & 63)
                return -EINVAL;
-       switch (mode_cmd->bpp) {
+       switch (bpp) {
        case 8:
        case 16:
        case 24:
@@ -313,7 +317,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
 
 static struct drm_framebuffer *psb_framebuffer_create
                        (struct drm_device *dev,
-                        struct drm_mode_fb_cmd *mode_cmd,
+                        struct drm_mode_fb_cmd2 *mode_cmd,
                         struct gtt_range *gt)
 {
        struct psb_framebuffer *fb;
@@ -387,27 +391,28 @@ static int psbfb_create(struct psb_fbdev *fbdev,
        struct fb_info *info;
        struct drm_framebuffer *fb;
        struct psb_framebuffer *psbfb = &fbdev->pfb;
-       struct drm_mode_fb_cmd mode_cmd;
+       struct drm_mode_fb_cmd2 mode_cmd;
        struct device *device = &dev->pdev->dev;
        int size;
        int ret;
        struct gtt_range *backing;
        int gtt_roll = 1;
+       u32 bpp, depth;
 
        mode_cmd.width = sizes->surface_width;
        mode_cmd.height = sizes->surface_height;
-       mode_cmd.bpp = sizes->surface_bpp;
+       bpp = sizes->surface_bpp;
 
        /* No 24bit packed */
-       if (mode_cmd.bpp == 24)
-               mode_cmd.bpp = 32;
+       if (bpp == 24)
+               bpp = 32;
 
        /* Acceleration via the GTT requires pitch to be 4096 byte aligned 
           (ie 1024 or 2048 pixels in normal use) */
-       mode_cmd.pitch =  ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 4096);
-       mode_cmd.depth = sizes->surface_depth;
+       mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096);
+       depth = sizes->surface_depth;
 
-       size = mode_cmd.pitch * mode_cmd.height;
+       size = mode_cmd.pitches[0] * mode_cmd.height;
        size = ALIGN(size, PAGE_SIZE);
 
        /* Allocate the framebuffer in the GTT with stolen page backing */
@@ -421,10 +426,10 @@ static int psbfb_create(struct psb_fbdev *fbdev,
 
                gtt_roll = 0;   /* Don't use GTT accelerated scrolling */
 
-               mode_cmd.pitch =  ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
-               mode_cmd.depth = sizes->surface_depth;
+               mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
+               depth = sizes->surface_depth;
 
-               size = mode_cmd.pitch * mode_cmd.height;
+               size = mode_cmd.pitches[0] * mode_cmd.height;
                size = ALIGN(size, PAGE_SIZE);
 
                /* Allocate the framebuffer in the GTT with stolen page
@@ -443,6 +448,8 @@ static int psbfb_create(struct psb_fbdev *fbdev,
        }
        info->par = fbdev;
 
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
        ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
        if (ret)
                goto out_unref;
@@ -504,7 +511,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
                info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
        }
 
-       drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+       drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
        drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
                                sizes->fb_width, sizes->fb_height);
 
@@ -546,7 +553,7 @@ out_err1:
  */
 static struct drm_framebuffer *psb_user_framebuffer_create
                        (struct drm_device *dev, struct drm_file *filp,
-                        struct drm_mode_fb_cmd *cmd)
+                        struct drm_mode_fb_cmd2 *cmd)
 {
        struct gtt_range *r;
        struct drm_gem_object *obj;
@@ -555,7 +562,7 @@ static struct drm_framebuffer *psb_user_framebuffer_create
         *      Find the GEM object and thus the gtt range object that is
         *      to back this space
         */
-       obj = drm_gem_object_lookup(dev, filp, cmd->handle);
+       obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
        if (obj == NULL)
                return ERR_PTR(-ENOENT);
 
index 8eb827ecc3d35da5ce37bbfbb261f0bb9a98e021..0b37b7b6b02afadb29bfe629635d9fea87f21626 100644 (file)
@@ -390,9 +390,9 @@ int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_f
                goto psb_intel_pipe_set_base_exit;
 
        start = psbfb->gtt->offset;
-       offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+       offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
 
-       REG_WRITE(dspstride, crtc->fb->pitch);
+       REG_WRITE(dspstride, crtc->fb->pitches[0]);
        dspcntr = REG_READ(dspcntr_reg);
        dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
 
index c9311a573c2838ecc947c58286a6c7fb366f3e79..980837e37d80e23cd27c3f9190978053f499acf8 100644 (file)
@@ -543,9 +543,9 @@ int mrst_pipe_set_base(struct drm_crtc *crtc,
                return 0;
 
        start = psbfb->gtt->offset;
-       offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+       offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
 
-       REG_WRITE(dspstride, crtc->fb->pitch);
+       REG_WRITE(dspstride, crtc->fb->pitches[0]);
 
        dspcntr = REG_READ(dspcntr_reg);
        dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
index 986a04d16ba8641f2aaf4c265e3bb54087206186..95816808f86761c85ba9fb5ac05b0e123866f195 100644 (file)
@@ -1151,6 +1151,17 @@ static struct vm_operations_struct psb_gem_vm_ops = {
        .close = drm_gem_vm_close,
 };
 
+static const struct file_operations gma500_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = psb_unlocked_ioctl,
+       .mmap = drm_gem_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .read = drm_read,
+};
+
 static struct drm_driver driver = {
        .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
                           DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
@@ -1179,17 +1190,7 @@ static struct drm_driver driver = {
        .dumb_create = psb_gem_dumb_create,
        .dumb_map_offset = psb_gem_dumb_map_gtt,
        .dumb_destroy = psb_gem_dumb_destroy,
-
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = psb_unlocked_ioctl,
-                .mmap = drm_gem_mmap,
-                .poll = drm_poll,
-                .fasync = drm_fasync,
-                .read = drm_read,
-        },
+       .fops = &gma500_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = PSB_DRM_DRIVER_DATE,
index caa9d86f26d82f0db7d665b8e9bcb6fdf30bb176..85659613ae624e38d58e303c6fa5a36a80fcda25 100644 (file)
@@ -367,9 +367,9 @@ int psb_intel_pipe_set_base(struct drm_crtc *crtc,
                goto psb_intel_pipe_set_base_exit;
        start = psbfb->gtt->offset;
 
-       offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+       offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
 
-       REG_WRITE(dspstride, crtc->fb->pitch);
+       REG_WRITE(dspstride, crtc->fb->pitches[0]);
 
        dspcntr = REG_READ(dspcntr_reg);
        dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
index 4af56fbc3c0691ccc243f05de359f838a9f8528f..12d03e7ad6366fea465687fb321d8384119f1377 100644 (file)
@@ -31,6 +31,8 @@ static const struct usb_device_id id_table[] = {
                        .driver_info = DELCOM_VISUAL_SIGNAL_INDICATOR },
        { USB_DEVICE(0x1d34, 0x0004),
                        .driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER },
+       { USB_DEVICE(0x1d34, 0x000a),
+                       .driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER },
        { },
 };
 MODULE_DEVICE_TABLE(usb, id_table);
index 1105fa1ed7f4a47faf5e26e0796588d4dcaef585..a1376dc73d71cd79492c388afc340d7fba229545 100644 (file)
@@ -270,17 +270,7 @@ static struct platform_driver pm860x_backlight_driver = {
        .remove         = pm860x_backlight_remove,
 };
 
-static int __init pm860x_backlight_init(void)
-{
-       return platform_driver_register(&pm860x_backlight_driver);
-}
-module_init(pm860x_backlight_init);
-
-static void __exit pm860x_backlight_exit(void)
-{
-       platform_driver_unregister(&pm860x_backlight_driver);
-}
-module_exit(pm860x_backlight_exit);
+module_platform_driver(pm860x_backlight_driver);
 
 MODULE_DESCRIPTION("Backlight Driver for Marvell Semiconductor 88PM8606");
 MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
index 278aeaa925059673fdcefa7b1c243f5a92b41837..681b36929fe406642756ca6bcd8614422a16aa0c 100644 (file)
@@ -280,14 +280,6 @@ config BACKLIGHT_WM831X
          If you have a backlight driven by the ISINK and DCDC of a
          WM831x PMIC say y to enable the backlight driver for it.
 
-config BACKLIGHT_ADX
-       tristate "Avionic Design Xanthos Backlight Driver"
-       depends on ARCH_PXA_ADX
-       default y
-       help
-         Say Y to enable the backlight driver on Avionic Design Xanthos-based
-         boards.
-
 config BACKLIGHT_ADP5520
        tristate "Backlight Driver for ADP5520/ADP5501 using WLED"
        depends on PMIC_ADP5520
index fdd1fc4b277062333846f4f9cd3ba75855d394a7..af5cf654ec7c5c4dba3a632f0d4fc2867a681112 100644 (file)
@@ -32,7 +32,6 @@ obj-$(CONFIG_BACKLIGHT_APPLE) += apple_bl.o
 obj-$(CONFIG_BACKLIGHT_TOSA)   += tosa_bl.o
 obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
 obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
-obj-$(CONFIG_BACKLIGHT_ADX)    += adx_bl.o
 obj-$(CONFIG_BACKLIGHT_ADP5520)        += adp5520_bl.o
 obj-$(CONFIG_BACKLIGHT_ADP8860)        += adp8860_bl.o
 obj-$(CONFIG_BACKLIGHT_ADP8870)        += adp8870_bl.o
index dfb763e9147ff923676e9ff2778116e3677f61ea..2e630bf1164cab84c771da9accb52109b6b69ff7 100644 (file)
@@ -384,17 +384,7 @@ static struct platform_driver adp5520_bl_driver = {
        .resume         = adp5520_bl_resume,
 };
 
-static int __init adp5520_bl_init(void)
-{
-       return platform_driver_register(&adp5520_bl_driver);
-}
-module_init(adp5520_bl_init);
-
-static void __exit adp5520_bl_exit(void)
-{
-       platform_driver_unregister(&adp5520_bl_driver);
-}
-module_exit(adp5520_bl_exit);
+module_platform_driver(adp5520_bl_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("ADP5520(01) Backlight Driver");
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
deleted file mode 100644 (file)
index c861c41..0000000
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * linux/drivers/video/backlight/adx.c
- *
- * Copyright (C) 2009 Avionic Design GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Written by Thierry Reding <thierry.reding@avionic-design.de>
- */
-
-#include <linux/backlight.h>
-#include <linux/fb.h>
-#include <linux/gfp.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-/* register definitions */
-#define ADX_BACKLIGHT_CONTROL          0x00
-#define ADX_BACKLIGHT_CONTROL_ENABLE   (1 << 0)
-#define ADX_BACKLIGHT_BRIGHTNESS       0x08
-#define ADX_BACKLIGHT_STATUS           0x10
-#define ADX_BACKLIGHT_ERROR            0x18
-
-struct adxbl {
-       void __iomem *base;
-};
-
-static int adx_backlight_update_status(struct backlight_device *bldev)
-{
-       struct adxbl *bl = bl_get_data(bldev);
-       u32 value;
-
-       value = bldev->props.brightness;
-       writel(value, bl->base + ADX_BACKLIGHT_BRIGHTNESS);
-
-       value = readl(bl->base + ADX_BACKLIGHT_CONTROL);
-
-       if (bldev->props.state & BL_CORE_FBBLANK)
-               value &= ~ADX_BACKLIGHT_CONTROL_ENABLE;
-       else
-               value |= ADX_BACKLIGHT_CONTROL_ENABLE;
-
-       writel(value, bl->base + ADX_BACKLIGHT_CONTROL);
-
-       return 0;
-}
-
-static int adx_backlight_get_brightness(struct backlight_device *bldev)
-{
-       struct adxbl *bl = bl_get_data(bldev);
-       u32 brightness;
-
-       brightness = readl(bl->base + ADX_BACKLIGHT_BRIGHTNESS);
-       return brightness & 0xff;
-}
-
-static int adx_backlight_check_fb(struct backlight_device *bldev, struct fb_info *fb)
-{
-       return 1;
-}
-
-static const struct backlight_ops adx_backlight_ops = {
-       .options = 0,
-       .update_status = adx_backlight_update_status,
-       .get_brightness = adx_backlight_get_brightness,
-       .check_fb = adx_backlight_check_fb,
-};
-
-static int __devinit adx_backlight_probe(struct platform_device *pdev)
-{
-       struct backlight_properties props;
-       struct backlight_device *bldev;
-       struct resource *res;
-       struct adxbl *bl;
-       int ret = 0;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               ret = -ENXIO;
-               goto out;
-       }
-
-       res = devm_request_mem_region(&pdev->dev, res->start,
-                       resource_size(res), res->name);
-       if (!res) {
-               ret = -ENXIO;
-               goto out;
-       }
-
-       bl = devm_kzalloc(&pdev->dev, sizeof(*bl), GFP_KERNEL);
-       if (!bl) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       bl->base = devm_ioremap_nocache(&pdev->dev, res->start,
-                       resource_size(res));
-       if (!bl->base) {
-               ret = -ENXIO;
-               goto out;
-       }
-
-       memset(&props, 0, sizeof(struct backlight_properties));
-       props.type = BACKLIGHT_RAW;
-       props.max_brightness = 0xff;
-       bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev,
-                                         bl, &adx_backlight_ops, &props);
-       if (IS_ERR(bldev)) {
-               ret = PTR_ERR(bldev);
-               goto out;
-       }
-
-       bldev->props.brightness = 0xff;
-       bldev->props.power = FB_BLANK_UNBLANK;
-
-       platform_set_drvdata(pdev, bldev);
-
-out:
-       return ret;
-}
-
-static int __devexit adx_backlight_remove(struct platform_device *pdev)
-{
-       struct backlight_device *bldev;
-       int ret = 0;
-
-       bldev = platform_get_drvdata(pdev);
-       bldev->props.power = FB_BLANK_UNBLANK;
-       bldev->props.brightness = 0xff;
-       backlight_update_status(bldev);
-       backlight_device_unregister(bldev);
-       platform_set_drvdata(pdev, NULL);
-
-       return ret;
-}
-
-#ifdef CONFIG_PM
-static int adx_backlight_suspend(struct platform_device *pdev,
-               pm_message_t state)
-{
-       return 0;
-}
-
-static int adx_backlight_resume(struct platform_device *pdev)
-{
-       return 0;
-}
-#else
-#define adx_backlight_suspend NULL
-#define adx_backlight_resume NULL
-#endif
-
-static struct platform_driver adx_backlight_driver = {
-       .probe = adx_backlight_probe,
-       .remove = __devexit_p(adx_backlight_remove),
-       .suspend = adx_backlight_suspend,
-       .resume = adx_backlight_resume,
-       .driver = {
-               .name = "adx-backlight",
-               .owner = THIS_MODULE,
-       },
-};
-
-static int __init adx_backlight_init(void)
-{
-       return platform_driver_register(&adx_backlight_driver);
-}
-
-static void __exit adx_backlight_exit(void)
-{
-       platform_driver_unregister(&adx_backlight_driver);
-}
-
-module_init(adx_backlight_init);
-module_exit(adx_backlight_exit);
-
-MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
-MODULE_DESCRIPTION("Avionic Design Xanthos Backlight Driver");
-MODULE_LICENSE("GPL v2");
index 7363c1b169e8f5719e5ba9bc1dee3542de468169..bf5b1ece71605d701516f599af7ef1ce18751a11 100644 (file)
@@ -102,7 +102,7 @@ static void backlight_generate_event(struct backlight_device *bd,
 }
 
 static ssize_t backlight_show_power(struct device *dev,
-               struct device_attribute *attr,char *buf)
+               struct device_attribute *attr, char *buf)
 {
        struct backlight_device *bd = to_backlight_device(dev);
 
@@ -116,7 +116,7 @@ static ssize_t backlight_store_power(struct device *dev,
        struct backlight_device *bd = to_backlight_device(dev);
        unsigned long power;
 
-       rc = strict_strtoul(buf, 0, &power);
+       rc = kstrtoul(buf, 0, &power);
        if (rc)
                return rc;
 
@@ -150,7 +150,7 @@ static ssize_t backlight_store_brightness(struct device *dev,
        struct backlight_device *bd = to_backlight_device(dev);
        unsigned long brightness;
 
-       rc = strict_strtoul(buf, 0, &brightness);
+       rc = kstrtoul(buf, 0, &brightness);
        if (rc)
                return rc;
 
index d68f14bbb687d8391b60f72f8949c3d2c396fbcf..abb4a06268f1831e57815475338447fb83822543 100644 (file)
@@ -199,17 +199,7 @@ static struct platform_driver da903x_backlight_driver = {
        .remove         = da903x_backlight_remove,
 };
 
-static int __init da903x_backlight_init(void)
-{
-       return platform_driver_register(&da903x_backlight_driver);
-}
-module_init(da903x_backlight_init);
-
-static void __exit da903x_backlight_exit(void)
-{
-       platform_driver_unregister(&da903x_backlight_driver);
-}
-module_exit(da903x_backlight_exit);
+module_platform_driver(da903x_backlight_driver);
 
 MODULE_DESCRIPTION("Backlight Driver for Dialog Semiconductor DA9030/DA9034");
 MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"
index c74a6f4baa127a313ea8eea5bf16adde62856b2a..b62b8b9063b53beeacffa86c8259d971372e3ec9 100644 (file)
@@ -13,7 +13,6 @@
 
 #include <linux/module.h>
 #include <linux/platform_device.h>
-#include <linux/module.h>
 #include <linux/io.h>
 #include <linux/fb.h>
 #include <linux/backlight.h>
@@ -144,17 +143,7 @@ static struct platform_driver ep93xxbl_driver = {
        .resume         = ep93xxbl_resume,
 };
 
-static int __init ep93xxbl_init(void)
-{
-       return platform_driver_register(&ep93xxbl_driver);
-}
-module_init(ep93xxbl_init);
-
-static void __exit ep93xxbl_exit(void)
-{
-       platform_driver_unregister(&ep93xxbl_driver);
-}
-module_exit(ep93xxbl_exit);
+module_platform_driver(ep93xxbl_driver);
 
 MODULE_DESCRIPTION("EP93xx Backlight Driver");
 MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
index adb191466d646355fdfcb5193c80b864631c3919..9ce6170c186079414dcb62123100e7786b032721 100644 (file)
@@ -132,18 +132,7 @@ static struct platform_driver genericbl_driver = {
        },
 };
 
-static int __init genericbl_init(void)
-{
-       return platform_driver_register(&genericbl_driver);
-}
-
-static void __exit genericbl_exit(void)
-{
-       platform_driver_unregister(&genericbl_driver);
-}
-
-module_init(genericbl_init);
-module_exit(genericbl_exit);
+module_platform_driver(genericbl_driver);
 
 MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
 MODULE_DESCRIPTION("Generic Backlight Driver");
index de65d80159beed3b90b21313070538b42c3ee4aa..2f8af5d786abbb5971aed6cad8c153229a4b4b84 100644 (file)
@@ -147,19 +147,8 @@ static struct platform_driver jornada_bl_driver = {
        },
 };
 
-static int __init jornada_bl_init(void)
-{
-       return platform_driver_register(&jornada_bl_driver);
-}
-
-static void __exit jornada_bl_exit(void)
-{
-       platform_driver_unregister(&jornada_bl_driver);
-}
+module_platform_driver(jornada_bl_driver);
 
 MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson>");
 MODULE_DESCRIPTION("HP Jornada 710/720/728 Backlight driver");
 MODULE_LICENSE("GPL");
-
-module_init(jornada_bl_init);
-module_exit(jornada_bl_exit);
index d2ff658b4144f91cf6d10a6e04f5178822db26f8..22d231a17e3c4649652ec1992567111e5b70cd6c 100644 (file)
@@ -135,19 +135,8 @@ static struct platform_driver jornada_lcd_driver = {
        },
 };
 
-static int __init jornada_lcd_init(void)
-{
-       return platform_driver_register(&jornada_lcd_driver);
-}
-
-static void __exit jornada_lcd_exit(void)
-{
-       platform_driver_unregister(&jornada_lcd_driver);
-}
+module_platform_driver(jornada_lcd_driver);
 
 MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
 MODULE_DESCRIPTION("HP Jornada 710/720/728 LCD driver");
 MODULE_LICENSE("GPL");
-
-module_init(jornada_lcd_init);
-module_exit(jornada_lcd_exit);
index 71a11cadffc48134b3ab164cb798f7278be6f802..79c1b0d609a809e189f43515ee20f648a7d2e8fa 100644 (file)
@@ -97,19 +97,16 @@ static ssize_t lcd_store_power(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        int rc = -ENXIO;
-       char *endp;
        struct lcd_device *ld = to_lcd_device(dev);
-       int power = simple_strtoul(buf, &endp, 0);
-       size_t size = endp - buf;
+       unsigned long power;
 
-       if (isspace(*endp))
-               size++;
-       if (size != count)
-               return -EINVAL;
+       rc = kstrtoul(buf, 0, &power);
+       if (rc)
+               return rc;
 
        mutex_lock(&ld->ops_lock);
        if (ld->ops && ld->ops->set_power) {
-               pr_debug("lcd: set power to %d\n", power);
+               pr_debug("lcd: set power to %lu\n", power);
                ld->ops->set_power(ld, power);
                rc = count;
        }
@@ -136,19 +133,16 @@ static ssize_t lcd_store_contrast(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        int rc = -ENXIO;
-       char *endp;
        struct lcd_device *ld = to_lcd_device(dev);
-       int contrast = simple_strtoul(buf, &endp, 0);
-       size_t size = endp - buf;
+       unsigned long contrast;
 
-       if (isspace(*endp))
-               size++;
-       if (size != count)
-               return -EINVAL;
+       rc = kstrtoul(buf, 0, &contrast);
+       if (rc)
+               return rc;
 
        mutex_lock(&ld->ops_lock);
        if (ld->ops && ld->ops->set_contrast) {
-               pr_debug("lcd: set contrast to %d\n", contrast);
+               pr_debug("lcd: set contrast to %lu\n", contrast);
                ld->ops->set_contrast(ld, contrast);
                rc = count;
        }
index da9a5ce0ccb8463dcb85b18c708738b483383352..78dafc0c8fc5a1ec4830d04660d5914441e4fe09 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/lcd.h>
 #include <linux/backlight.h>
 #include <linux/module.h>
+#include <linux/regulator/consumer.h>
 
 #include "ld9040_gamma.h"
 
@@ -53,8 +54,51 @@ struct ld9040 {
        struct lcd_device               *ld;
        struct backlight_device         *bd;
        struct lcd_platform_data        *lcd_pd;
+
+       struct mutex                    lock;
+       bool  enabled;
+};
+
+static struct regulator_bulk_data supplies[] = {
+       { .supply = "vdd3", },
+       { .supply = "vci", },
 };
 
+static void ld9040_regulator_enable(struct ld9040 *lcd)
+{
+       int ret = 0;
+       struct lcd_platform_data *pd = NULL;
+
+       pd = lcd->lcd_pd;
+       mutex_lock(&lcd->lock);
+       if (!lcd->enabled) {
+               ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
+               if (ret)
+                       goto out;
+
+               lcd->enabled = true;
+       }
+       mdelay(pd->power_on_delay);
+out:
+       mutex_unlock(&lcd->lock);
+}
+
+static void ld9040_regulator_disable(struct ld9040 *lcd)
+{
+       int ret = 0;
+
+       mutex_lock(&lcd->lock);
+       if (lcd->enabled) {
+               ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
+               if (ret)
+                       goto out;
+
+               lcd->enabled = false;
+       }
+out:
+       mutex_unlock(&lcd->lock);
+}
+
 static const unsigned short seq_swreset[] = {
        0x01, COMMAND_ONLY,
        ENDDEF, 0x00
@@ -532,13 +576,8 @@ static int ld9040_power_on(struct ld9040 *lcd)
                return -EFAULT;
        }
 
-       if (!pd->power_on) {
-               dev_err(lcd->dev, "power_on is NULL.\n");
-               return -EFAULT;
-       } else {
-               pd->power_on(lcd->ld, 1);
-               mdelay(pd->power_on_delay);
-       }
+       /* lcd power on */
+       ld9040_regulator_enable(lcd);
 
        if (!pd->reset) {
                dev_err(lcd->dev, "reset is NULL.\n");
@@ -582,11 +621,8 @@ static int ld9040_power_off(struct ld9040 *lcd)
 
        mdelay(pd->power_off_delay);
 
-       if (!pd->power_on) {
-               dev_err(lcd->dev, "power_on is NULL.\n");
-               return -EFAULT;
-       } else
-               pd->power_on(lcd->ld, 0);
+       /* lcd power off */
+       ld9040_regulator_disable(lcd);
 
        return 0;
 }
@@ -693,6 +729,14 @@ static int ld9040_probe(struct spi_device *spi)
                goto out_free_lcd;
        }
 
+       mutex_init(&lcd->lock);
+
+       ret = regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
+       if (ret) {
+               dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
+               goto out_free_lcd;
+       }
+
        ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
        if (IS_ERR(ld)) {
                ret = PTR_ERR(ld);
@@ -739,6 +783,8 @@ static int ld9040_probe(struct spi_device *spi)
 out_unregister_lcd:
        lcd_device_unregister(lcd->ld);
 out_free_lcd:
+       regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
+
        kfree(lcd);
        return ret;
 }
@@ -750,6 +796,7 @@ static int __devexit ld9040_remove(struct spi_device *spi)
        ld9040_power(lcd, FB_BLANK_POWERDOWN);
        backlight_device_unregister(lcd->bd);
        lcd_device_unregister(lcd->ld);
+       regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
        kfree(lcd);
 
        return 0;
index 7bbc802560ea0473229e74ce89b2da299e140fce..c915e3b5388698b2c8c8007167ed531ad5e38580 100644 (file)
@@ -188,17 +188,7 @@ static struct platform_driver max8925_backlight_driver = {
        .remove         = __devexit_p(max8925_backlight_remove),
 };
 
-static int __init max8925_backlight_init(void)
-{
-       return platform_driver_register(&max8925_backlight_driver);
-}
-module_init(max8925_backlight_init);
-
-static void __exit max8925_backlight_exit(void)
-{
-       platform_driver_unregister(&max8925_backlight_driver);
-};
-module_exit(max8925_backlight_exit);
+module_platform_driver(max8925_backlight_driver);
 
 MODULE_DESCRIPTION("Backlight Driver for Maxim MAX8925");
 MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
index 08d26a72394c8e9920cc23e115802c642902305d..d8cde277ec83a691c658b9e0afef6c3aef63ffb4 100644 (file)
@@ -195,18 +195,7 @@ static struct platform_driver omapbl_driver = {
        },
 };
 
-static int __init omapbl_init(void)
-{
-       return platform_driver_register(&omapbl_driver);
-}
-
-static void __exit omapbl_exit(void)
-{
-       platform_driver_unregister(&omapbl_driver);
-}
-
-module_init(omapbl_init);
-module_exit(omapbl_exit);
+module_platform_driver(omapbl_driver);
 
 MODULE_AUTHOR("Andrzej Zaborowski <balrog@zabor.org>");
 MODULE_DESCRIPTION("OMAP LCD Backlight driver");
index ef5628d60563841aceab7bca62191c0faf57f763..13e88b71daecc42b5e833c252fd1305a19cc5ed0 100644 (file)
@@ -173,17 +173,7 @@ static struct platform_driver pcf50633_bl_driver = {
        },
 };
 
-static int __init pcf50633_bl_init(void)
-{
-       return platform_driver_register(&pcf50633_bl_driver);
-}
-module_init(pcf50633_bl_init);
-
-static void __exit pcf50633_bl_exit(void)
-{
-       platform_driver_unregister(&pcf50633_bl_driver);
-}
-module_exit(pcf50633_bl_exit);
+module_platform_driver(pcf50633_bl_driver);
 
 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
 MODULE_DESCRIPTION("PCF50633 backlight driver");
index 302330acf6284e0b22e7dc77a6ea055603aa5358..f0bf491ed087a57d86fade373ad326fb6c68300d 100644 (file)
@@ -85,7 +85,8 @@ static int __devinit platform_lcd_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       plcd = kzalloc(sizeof(struct platform_lcd), GFP_KERNEL);
+       plcd = devm_kzalloc(&pdev->dev, sizeof(struct platform_lcd),
+                           GFP_KERNEL);
        if (!plcd) {
                dev_err(dev, "no memory for state\n");
                return -ENOMEM;
@@ -98,7 +99,7 @@ static int __devinit platform_lcd_probe(struct platform_device *pdev)
        if (IS_ERR(plcd->lcd)) {
                dev_err(dev, "cannot register lcd device\n");
                err = PTR_ERR(plcd->lcd);
-               goto err_mem;
+               goto err;
        }
 
        platform_set_drvdata(pdev, plcd);
@@ -106,8 +107,7 @@ static int __devinit platform_lcd_probe(struct platform_device *pdev)
 
        return 0;
 
- err_mem:
-       kfree(plcd);
+ err:
        return err;
 }
 
@@ -116,7 +116,6 @@ static int __devexit platform_lcd_remove(struct platform_device *pdev)
        struct platform_lcd *plcd = platform_get_drvdata(pdev);
 
        lcd_device_unregister(plcd->lcd);
-       kfree(plcd);
 
        return 0;
 }
@@ -157,18 +156,7 @@ static struct platform_driver platform_lcd_driver = {
        .resume         = platform_lcd_resume,
 };
 
-static int __init platform_lcd_init(void)
-{
-       return platform_driver_register(&platform_lcd_driver);
-}
-
-static void __exit platform_lcd_cleanup(void)
-{
-       platform_driver_unregister(&platform_lcd_driver);
-}
-
-module_init(platform_lcd_init);
-module_exit(platform_lcd_cleanup);
+module_platform_driver(platform_lcd_driver);
 
 MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
 MODULE_LICENSE("GPL v2");
index 8b5b2a4124c7980129be8146f881772f5c33fa97..7496d04e1d3c1fcda109ae04d12e79c24edacc0e 100644 (file)
@@ -169,10 +169,9 @@ static int pwm_backlight_remove(struct platform_device *pdev)
 }
 
 #ifdef CONFIG_PM
-static int pwm_backlight_suspend(struct platform_device *pdev,
-                                pm_message_t state)
+static int pwm_backlight_suspend(struct device *dev)
 {
-       struct backlight_device *bl = platform_get_drvdata(pdev);
+       struct backlight_device *bl = dev_get_drvdata(dev);
        struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
 
        if (pb->notify)
@@ -184,40 +183,32 @@ static int pwm_backlight_suspend(struct platform_device *pdev,
        return 0;
 }
 
-static int pwm_backlight_resume(struct platform_device *pdev)
+static int pwm_backlight_resume(struct device *dev)
 {
-       struct backlight_device *bl = platform_get_drvdata(pdev);
+       struct backlight_device *bl = dev_get_drvdata(dev);
 
        backlight_update_status(bl);
        return 0;
 }
-#else
-#define pwm_backlight_suspend  NULL
-#define pwm_backlight_resume   NULL
+
+static SIMPLE_DEV_PM_OPS(pwm_backlight_pm_ops, pwm_backlight_suspend,
+                        pwm_backlight_resume);
+
 #endif
 
 static struct platform_driver pwm_backlight_driver = {
        .driver         = {
                .name   = "pwm-backlight",
                .owner  = THIS_MODULE,
+#ifdef CONFIG_PM
+               .pm     = &pwm_backlight_pm_ops,
+#endif
        },
        .probe          = pwm_backlight_probe,
        .remove         = pwm_backlight_remove,
-       .suspend        = pwm_backlight_suspend,
-       .resume         = pwm_backlight_resume,
 };
 
-static int __init pwm_backlight_init(void)
-{
-       return platform_driver_register(&pwm_backlight_driver);
-}
-module_init(pwm_backlight_init);
-
-static void __exit pwm_backlight_exit(void)
-{
-       platform_driver_unregister(&pwm_backlight_driver);
-}
-module_exit(pwm_backlight_exit);
+module_platform_driver(pwm_backlight_driver);
 
 MODULE_DESCRIPTION("PWM based Backlight Driver");
 MODULE_LICENSE("GPL");
index fbe9e9316f3b51a09e5de299a9a544d878bf5b33..4e915f5eca99b43697e195207150a00bae631f22 100644 (file)
@@ -236,17 +236,7 @@ static struct platform_driver wm831x_backlight_driver = {
        .remove         = wm831x_backlight_remove,
 };
 
-static int __init wm831x_backlight_init(void)
-{
-       return platform_driver_register(&wm831x_backlight_driver);
-}
-module_init(wm831x_backlight_init);
-
-static void __exit wm831x_backlight_exit(void)
-{
-       platform_driver_unregister(&wm831x_backlight_driver);
-}
-module_exit(wm831x_backlight_exit);
+module_platform_driver(wm831x_backlight_driver);
 
 MODULE_DESCRIPTION("Backlight Driver for WM831x PMICs");
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com");
index beac52fc1c0eea6c1cadf337983c3b2b5c78614d..cb4529c40d742b93ebe7479b3688e43d4e249233 100644 (file)
@@ -671,20 +671,17 @@ InitWait:
        }
 }
 
-static struct xenbus_device_id xenfb_ids[] = {
+static const struct xenbus_device_id xenfb_ids[] = {
        { "vfb" },
        { "" }
 };
 
-static struct xenbus_driver xenfb_driver = {
-       .name = "vfb",
-       .owner = THIS_MODULE,
-       .ids = xenfb_ids,
+static DEFINE_XENBUS_DRIVER(xenfb, ,
        .probe = xenfb_probe,
        .remove = xenfb_remove,
        .resume = xenfb_resume,
        .otherend_changed = xenfb_backend_changed,
-};
+);
 
 static int __init xenfb_init(void)
 {
index 79fd606b7cd5cad60f487120db2e3ebf408ddedb..877b107f77a769577111ce5f9eee41742be8fa32 100644 (file)
@@ -772,6 +772,19 @@ config SMSC37B787_WDT
 
          Most people will say N.
 
+config VIA_WDT
+       tristate "VIA Watchdog Timer"
+       depends on X86
+       select WATCHDOG_CORE
+       ---help---
+       This is the driver for the hardware watchdog timer on VIA
+       southbridge chipset CX700, VX800/VX820 or VX855/VX875.
+
+       To compile this driver as a module, choose M here; the module
+       will be called via_wdt.
+
+       Most people will say N.
+
 config W83627HF_WDT
        tristate "W83627HF/W83627DHG Watchdog Timer"
        depends on X86
index fe893e91935b6652e680b3b722ab3bb7b71bf3b3..e8f479a16402c9cfa7904c61a8f9c2beebedcb37 100644 (file)
@@ -99,6 +99,7 @@ obj-$(CONFIG_SBC7240_WDT) += sbc7240_wdt.o
 obj-$(CONFIG_CPU5_WDT) += cpu5wdt.o
 obj-$(CONFIG_SMSC_SCH311X_WDT) += sch311x_wdt.o
 obj-$(CONFIG_SMSC37B787_WDT) += smsc37b787_wdt.o
+obj-$(CONFIG_VIA_WDT) += via_wdt.o
 obj-$(CONFIG_W83627HF_WDT) += w83627hf_wdt.o
 obj-$(CONFIG_W83697HF_WDT) += w83697hf_wdt.o
 obj-$(CONFIG_W83697UG_WDT) += w83697ug_wdt.o
index b29221783598f7ab2cfc2030d702b6263089a719..502773ad5acda8889ec5c0770ac38702d6db16de 100644 (file)
@@ -70,8 +70,8 @@ struct ar7_wdt {
 };
 
 static unsigned long wdt_is_open;
-static spinlock_t wdt_lock;
 static unsigned expect_close;
+static DEFINE_SPINLOCK(wdt_lock);
 
 /* XXX currently fixed, allows max margin ~68.72 secs */
 #define prescale_value 0xffff
@@ -280,8 +280,6 @@ static int __devinit ar7_wdt_probe(struct platform_device *pdev)
 {
        int rc;
 
-       spin_lock_init(&wdt_lock);
-
        ar7_regs_wdt =
                platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
        if (!ar7_regs_wdt) {
@@ -355,15 +353,4 @@ static struct platform_driver ar7_wdt_driver = {
        },
 };
 
-static int __init ar7_wdt_init(void)
-{
-       return platform_driver_register(&ar7_wdt_driver);
-}
-
-static void __exit ar7_wdt_cleanup(void)
-{
-       platform_driver_unregister(&ar7_wdt_driver);
-}
-
-module_init(ar7_wdt_init);
-module_exit(ar7_wdt_cleanup);
+module_platform_driver(ar7_wdt_driver);
index 725c84bfdd76ee98d83f1497f1b6f11bcace61df..9db808349f8b1e6dc29a892920602fb497f5ef6d 100644 (file)
@@ -68,17 +68,23 @@ static int max_timeout;
 static inline void ath79_wdt_keepalive(void)
 {
        ath79_reset_wr(AR71XX_RESET_REG_WDOG, wdt_freq * timeout);
+       /* flush write */
+       ath79_reset_rr(AR71XX_RESET_REG_WDOG);
 }
 
 static inline void ath79_wdt_enable(void)
 {
        ath79_wdt_keepalive();
        ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_FCR);
+       /* flush write */
+       ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
 }
 
 static inline void ath79_wdt_disable(void)
 {
        ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_NONE);
+       /* flush write */
+       ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
 }
 
 static int ath79_wdt_set_timeout(int val)
index 5064e831752128a41ee810dbb854af8ef1cadb40..8dc7de641e2610f1042cd8a09f7170ed2195df34 100644 (file)
@@ -311,18 +311,7 @@ static struct platform_driver bcm63xx_wdt = {
        }
 };
 
-static int __init bcm63xx_wdt_init(void)
-{
-       return platform_driver_register(&bcm63xx_wdt);
-}
-
-static void __exit bcm63xx_wdt_exit(void)
-{
-       platform_driver_unregister(&bcm63xx_wdt);
-}
-
-module_init(bcm63xx_wdt_init);
-module_exit(bcm63xx_wdt_exit);
+module_platform_driver(bcm63xx_wdt);
 
 MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>");
 MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
index edd3475f41db7ef2494a9befb7256b0e88262c72..251c863d71ddc123481a1bb4743973c8ebde02b6 100644 (file)
@@ -39,7 +39,7 @@
 static int verbose;
 static int port = 0x91;
 static int ticks = 10000;
-static spinlock_t cpu5wdt_lock;
+static DEFINE_SPINLOCK(cpu5wdt_lock);
 
 #define PFX                    "cpu5wdt: "
 
@@ -223,7 +223,6 @@ static int __devinit cpu5wdt_init(void)
                                "port=0x%x, verbose=%i\n", port, verbose);
 
        init_completion(&cpu5wdt_device.stop);
-       spin_lock_init(&cpu5wdt_lock);
        cpu5wdt_device.queue = 0;
        setup_timer(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
        cpu5wdt_device.default_ticks = ticks;
index 1e013e8457b7c1ee24c7d8731ba48c7ab4357583..1b793dfd868fdbe009c244e3cf0a5150bc431035 100644 (file)
@@ -687,15 +687,4 @@ static struct platform_driver cpwd_driver = {
        .remove         = __devexit_p(cpwd_remove),
 };
 
-static int __init cpwd_init(void)
-{
-       return platform_driver_register(&cpwd_driver);
-}
-
-static void __exit cpwd_exit(void)
-{
-       platform_driver_unregister(&cpwd_driver);
-}
-
-module_init(cpwd_init);
-module_exit(cpwd_exit);
+module_platform_driver(cpwd_driver);
index 51b5551b4e3fe219d7f934970898024216fab3c3..c8c5c8032bcb61f763517cb688b37de98a6deb68 100644 (file)
@@ -271,18 +271,7 @@ static struct platform_driver platform_wdt_driver = {
        .remove = __devexit_p(davinci_wdt_remove),
 };
 
-static int __init davinci_wdt_init(void)
-{
-       return platform_driver_register(&platform_wdt_driver);
-}
-
-static void __exit davinci_wdt_exit(void)
-{
-       platform_driver_unregister(&platform_wdt_driver);
-}
-
-module_init(davinci_wdt_init);
-module_exit(davinci_wdt_exit);
+module_platform_driver(platform_wdt_driver);
 
 MODULE_AUTHOR("Texas Instruments");
 MODULE_DESCRIPTION("DaVinci Watchdog Driver");
index f10f8c0abba45b0819c9be82805058f7d2d4b38d..1b0e3dd81c1a2751be3db157790f677d27eee5e9 100644 (file)
@@ -358,17 +358,7 @@ static struct platform_driver dw_wdt_driver = {
        },
 };
 
-static int __init dw_wdt_watchdog_init(void)
-{
-       return platform_driver_register(&dw_wdt_driver);
-}
-module_init(dw_wdt_watchdog_init);
-
-static void __exit dw_wdt_watchdog_exit(void)
-{
-       platform_driver_unregister(&dw_wdt_driver);
-}
-module_exit(dw_wdt_watchdog_exit);
+module_platform_driver(dw_wdt_driver);
 
 MODULE_AUTHOR("Jamie Iles");
 MODULE_DESCRIPTION("Synopsys DesignWare Watchdog Driver");
index 41018d429abb14ff80b0309d2b85bd6f53b745af..3946c51099c07c6fc211658fd0df5aa51a8970bb 100644 (file)
@@ -64,7 +64,7 @@
 static unsigned long eurwdt_is_open;
 static int eurwdt_timeout;
 static char eur_expect_close;
-static spinlock_t eurwdt_lock;
+static DEFINE_SPINLOCK(eurwdt_lock);
 
 /*
  * You must set these - there is no sane way to probe for this board.
@@ -446,8 +446,6 @@ static int __init eurwdt_init(void)
                goto outreg;
        }
 
-       spin_lock_init(&eurwdt_lock);
-
        ret = misc_register(&eurwdt_miscdev);
        if (ret) {
                printk(KERN_ERR "eurwdt: can't misc_register on minor=%d\n",
index 195e0f798e769d4fda32ea7e164b3fd818c41d69..c7481ad51629d5b3a47088e0b37466673c2c0afc 100644 (file)
@@ -68,7 +68,7 @@ static char asr_expect_close;
 static unsigned int asr_type, asr_base, asr_length;
 static unsigned int asr_read_addr, asr_write_addr;
 static unsigned char asr_toggle_mask, asr_disable_mask;
-static spinlock_t asr_lock;
+static DEFINE_SPINLOCK(asr_lock);
 
 static void __asr_toggle(void)
 {
@@ -386,8 +386,6 @@ static int __init ibmasr_init(void)
        if (!asr_type)
                return -ENODEV;
 
-       spin_lock_init(&asr_lock);
-
        rc = asr_get_base_address();
        if (rc)
                return rc;
index 1cc5609666d10fe45f513c24393232f8180a3ec9..1475e09f9af214f5656e6d4ff9b1067555f5554f 100644 (file)
@@ -28,7 +28,7 @@
 
 #define PFX "indydog: "
 static unsigned long indydog_alive;
-static spinlock_t indydog_lock;
+static DEFINE_SPINLOCK(indydog_lock);
 
 #define WATCHDOG_TIMEOUT 30            /* 30 sec default timeout */
 
@@ -185,8 +185,6 @@ static int __init watchdog_init(void)
 {
        int ret;
 
-       spin_lock_init(&indydog_lock);
-
        ret = register_reboot_notifier(&indydog_notifier);
        if (ret) {
                printk(KERN_ERR PFX
index aef94789019f3ffb886fb5aadd9f15e8575c982c..82fa7a92a8d280b7f439489dd43ac70e97e5b86d 100644 (file)
@@ -37,7 +37,7 @@
 static int nowayout = WATCHDOG_NOWAYOUT;
 static unsigned long wdt_status;
 static unsigned long boot_status;
-static spinlock_t wdt_lock;
+static DEFINE_SPINLOCK(wdt_lock);
 
 #define WDT_IN_USE             0
 #define WDT_OK_TO_CLOSE                1
@@ -226,9 +226,6 @@ static int __init iop_wdt_init(void)
 {
        int ret;
 
-       spin_lock_init(&wdt_lock);
-
-
        /* check if the reset was caused by the watchdog timer */
        boot_status = (read_rcsr() & IOP_RCSR_WDT) ? WDIOF_CARDRESET : 0;
 
index e86952a7168c8b247ea5e9252c1977ba38bde94f..084f71aa855a7a7599359d081ca30478386998ef 100644 (file)
@@ -32,7 +32,7 @@
 static int nowayout = WATCHDOG_NOWAYOUT;
 static unsigned int heartbeat = 60;    /* (secs) Default is 1 minute */
 static unsigned long wdt_status;
-static spinlock_t wdt_lock;
+static DEFINE_SPINLOCK(wdt_lock);
 
 #define        WDT_IN_USE              0
 #define        WDT_OK_TO_CLOSE         1
@@ -189,7 +189,6 @@ static int __init ixp2000_wdt_init(void)
                return -EIO;
        }
        wdt_tick_rate = (*IXP2000_T1_CLD * HZ) / 256;
-       spin_lock_init(&wdt_lock);
        return misc_register(&ixp2000_wdt_miscdev);
 }
 
index e02c0ecda26b73a7688ddc3352e99016350b67ab..4fc2e9ac26f7c818768c0e6f1a05a78b789cd7e7 100644 (file)
@@ -181,7 +181,6 @@ static int __init ixp4xx_wdt_init(void)
 
                return -ENODEV;
        }
-       spin_lock_init(&wdt_lock);
        boot_status = (*IXP4XX_OSST & IXP4XX_OSST_TIMER_WARM_RESET) ?
                        WDIOF_CARDRESET : 0;
        ret = misc_register(&ixp4xx_wdt_miscdev);
index 684ba01fb54010a034c7522656f9b891fb22ba3b..17ef300bccc52ca6e8c78edd1f0cc391f2504147 100644 (file)
@@ -295,18 +295,7 @@ static struct platform_driver jz4740_wdt_driver = {
        },
 };
 
-
-static int __init jz4740_wdt_init(void)
-{
-       return platform_driver_register(&jz4740_wdt_driver);
-}
-module_init(jz4740_wdt_init);
-
-static void __exit jz4740_wdt_exit(void)
-{
-       platform_driver_unregister(&jz4740_wdt_driver);
-}
-module_exit(jz4740_wdt_exit);
+module_platform_driver(jz4740_wdt_driver);
 
 MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
 MODULE_DESCRIPTION("jz4740 Watchdog Driver");
index 811471903e8a7390f68d46b60998307be76b2be1..51757a520e8f07d96f8e247b960eda886fda8528 100644 (file)
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
 
 
 static unsigned long ks8695wdt_busy;
-static spinlock_t ks8695_lock;
+static DEFINE_SPINLOCK(ks8695_lock);
 
 /* ......................................................................... */
 
@@ -288,7 +288,6 @@ static struct platform_driver ks8695wdt_driver = {
 
 static int __init ks8695_wdt_init(void)
 {
-       spin_lock_init(&ks8695_lock);
        /* Check that the heartbeat value is within range;
           if not reset to the default */
        if (ks8695_wdt_settimeout(wdt_time)) {
index 102aed0efbf158a02271cb06dc25b1388a899736..d3a63be2e28d07974249b082749781dbc46df2f1 100644 (file)
@@ -222,9 +222,6 @@ ltq_wdt_remove(struct platform_device *pdev)
 {
        misc_deregister(&ltq_wdt_miscdev);
 
-       if (ltq_wdt_membase)
-               iounmap(ltq_wdt_membase);
-
        return 0;
 }
 
index 73ba2fd8e591435c485bfa2574d2d8b2d70bf274..af63ecfbfa6f4c132082f856d7fd80d0d7d61ae5 100644 (file)
@@ -364,18 +364,7 @@ static struct platform_driver max63xx_wdt_driver = {
        },
 };
 
-static int __init max63xx_wdt_init(void)
-{
-       return platform_driver_register(&max63xx_wdt_driver);
-}
-
-static void __exit max63xx_wdt_exit(void)
-{
-       platform_driver_unregister(&max63xx_wdt_driver);
-}
-
-module_init(max63xx_wdt_init);
-module_exit(max63xx_wdt_exit);
+module_platform_driver(max63xx_wdt_driver);
 
 MODULE_AUTHOR("Marc Zyngier <maz@misterjones.org>");
 MODULE_DESCRIPTION("max63xx Watchdog Driver");
index ac37bb82392cce7462918d3e36b7ab7e74aeec13..c29e31d99fe846ae63479c3ae22f9bffa705056d 100644 (file)
@@ -253,18 +253,7 @@ static struct platform_driver mtx1_wdt_driver = {
        .driver.owner = THIS_MODULE,
 };
 
-static int __init mtx1_wdt_init(void)
-{
-       return platform_driver_register(&mtx1_wdt_driver);
-}
-
-static void __exit mtx1_wdt_exit(void)
-{
-       platform_driver_unregister(&mtx1_wdt_driver);
-}
-
-module_init(mtx1_wdt_init);
-module_exit(mtx1_wdt_exit);
+module_platform_driver(mtx1_wdt_driver);
 
 MODULE_AUTHOR("Michael Stickel, Florian Fainelli");
 MODULE_DESCRIPTION("Driver for the MTX-1 watchdog");
index 6cee33d4b1612cba6ff4ea1b96cfae5691689c36..50359bad91770d41d8b3dedbb959cc0dac1d4ad0 100644 (file)
@@ -334,18 +334,7 @@ static struct platform_driver nuc900wdt_driver = {
        },
 };
 
-static int __init nuc900_wdt_init(void)
-{
-       return platform_driver_register(&nuc900wdt_driver);
-}
-
-static void __exit nuc900_wdt_exit(void)
-{
-       platform_driver_unregister(&nuc900wdt_driver);
-}
-
-module_init(nuc900_wdt_init);
-module_exit(nuc900_wdt_exit);
+module_platform_driver(nuc900wdt_driver);
 
 MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
 MODULE_DESCRIPTION("Watchdog driver for NUC900");
index 4ec741ac952c292c4578b5148d3e840c2d9c73a1..f359ab85c3bc465284c6d3137d60b5e0ef5abad8 100644 (file)
@@ -414,18 +414,7 @@ static struct platform_driver xwdt_driver = {
        },
 };
 
-static int __init xwdt_init(void)
-{
-       return platform_driver_register(&xwdt_driver);
-}
-
-static void __exit xwdt_exit(void)
-{
-       platform_driver_unregister(&xwdt_driver);
-}
-
-module_init(xwdt_init);
-module_exit(xwdt_exit);
+module_platform_driver(xwdt_driver);
 
 MODULE_AUTHOR("Alejandro Cabrera <aldaya@gmail.com>");
 MODULE_DESCRIPTION("Xilinx Watchdog driver");
index 2b4acb86c191eec0c4a23f0541eb0c8d33079445..4b33e3fd726bb4b2664dbc3c8ff66d0cef71be3d 100644 (file)
@@ -55,7 +55,7 @@ module_param(timer_margin, uint, 0);
 MODULE_PARM_DESC(timer_margin, "initial watchdog timeout (in seconds)");
 
 static unsigned int wdt_trgr_pattern = 0x1234;
-static spinlock_t wdt_lock;
+static DEFINE_SPINLOCK(wdt_lock);
 
 struct omap_wdt_dev {
        void __iomem    *base;          /* physical */
@@ -232,6 +232,7 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
                if (cpu_is_omap24xx())
                        return put_user(omap_prcm_get_reset_sources(),
                                        (int __user *)arg);
+               return put_user(0, (int __user *)arg);
        case WDIOC_KEEPALIVE:
                pm_runtime_get_sync(wdev->dev);
                spin_lock(&wdt_lock);
@@ -437,19 +438,7 @@ static struct platform_driver omap_wdt_driver = {
        },
 };
 
-static int __init omap_wdt_init(void)
-{
-       spin_lock_init(&wdt_lock);
-       return platform_driver_register(&omap_wdt_driver);
-}
-
-static void __exit omap_wdt_exit(void)
-{
-       platform_driver_unregister(&omap_wdt_driver);
-}
-
-module_init(omap_wdt_init);
-module_exit(omap_wdt_exit);
+module_platform_driver(omap_wdt_driver);
 
 MODULE_AUTHOR("George G. Davis");
 MODULE_LICENSE("GPL");
index 2d9fb96a9ee9c42020f71903a403da9b769041a6..4ad78f8685154ca1348002dbb4a41f0e0d873adc 100644 (file)
@@ -41,7 +41,7 @@ static int heartbeat = -1;            /* module parameter (seconds) */
 static unsigned int wdt_max_duration;  /* (seconds) */
 static unsigned int wdt_tclk;
 static unsigned long wdt_status;
-static spinlock_t wdt_lock;
+static DEFINE_SPINLOCK(wdt_lock);
 
 static void orion_wdt_ping(void)
 {
@@ -294,19 +294,7 @@ static struct platform_driver orion_wdt_driver = {
        },
 };
 
-static int __init orion_wdt_init(void)
-{
-       spin_lock_init(&wdt_lock);
-       return platform_driver_register(&orion_wdt_driver);
-}
-
-static void __exit orion_wdt_exit(void)
-{
-       platform_driver_unregister(&orion_wdt_driver);
-}
-
-module_init(orion_wdt_init);
-module_exit(orion_wdt_exit);
+module_platform_driver(orion_wdt_driver);
 
 MODULE_AUTHOR("Sylver Bruneau <sylver.bruneau@googlemail.com>");
 MODULE_DESCRIPTION("Orion Processor Watchdog");
index 614933225560ca5c9b6f2f30bf76d81dec68235d..bd143c9dd3e6d0084c08c40d972082e4b9f9db29 100644 (file)
@@ -334,18 +334,7 @@ static struct platform_driver platform_wdt_driver = {
        .remove = __devexit_p(pnx4008_wdt_remove),
 };
 
-static int __init pnx4008_wdt_init(void)
-{
-       return platform_driver_register(&platform_wdt_driver);
-}
-
-static void __exit pnx4008_wdt_exit(void)
-{
-       platform_driver_unregister(&platform_wdt_driver);
-}
-
-module_init(pnx4008_wdt_init);
-module_exit(pnx4008_wdt_exit);
+module_platform_driver(platform_wdt_driver);
 
 MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
 MODULE_DESCRIPTION("PNX4008 Watchdog Driver");
index d4c29b5311a4d574dea0c4d67300392472c03b3f..bf7bc8aa1c016fee830d5a1cbb2c6d949fa948ff 100644 (file)
@@ -332,18 +332,7 @@ static struct platform_driver rc32434_wdt_driver = {
        }
 };
 
-static int __init rc32434_wdt_init(void)
-{
-       return platform_driver_register(&rc32434_wdt_driver);
-}
-
-static void __exit rc32434_wdt_exit(void)
-{
-       platform_driver_unregister(&rc32434_wdt_driver);
-}
-
-module_init(rc32434_wdt_init);
-module_exit(rc32434_wdt_exit);
+module_platform_driver(rc32434_wdt_driver);
 
 MODULE_AUTHOR("Ondrej Zajicek <santiago@crfreenet.org>,"
                "Florian Fainelli <florian@openwrt.org>");
index 428f8a1583e8598ae69414f8171e21a3bbbf2ba8..042ccc56ae26f34dad5f6ef2dbe1327d47cc56bf 100644 (file)
@@ -293,18 +293,7 @@ static struct platform_driver rdc321x_wdt_driver = {
        },
 };
 
-static int __init rdc321x_wdt_init(void)
-{
-       return platform_driver_register(&rdc321x_wdt_driver);
-}
-
-static void __exit rdc321x_wdt_exit(void)
-{
-       platform_driver_unregister(&rdc321x_wdt_driver);
-}
-
-module_init(rdc321x_wdt_init);
-module_exit(rdc321x_wdt_exit);
+module_platform_driver(rdc321x_wdt_driver);
 
 MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
 MODULE_DESCRIPTION("RDC321x watchdog driver");
index 109b533896b70c0c5e81b3e983dbdb783d10821d..c7e17ceafa6aeca9b83f77edc41a4e372d068585 100644 (file)
@@ -247,15 +247,4 @@ static struct platform_driver riowd_driver = {
        .remove         = __devexit_p(riowd_remove),
 };
 
-static int __init riowd_init(void)
-{
-       return platform_driver_register(&riowd_driver);
-}
-
-static void __exit riowd_exit(void)
-{
-       platform_driver_unregister(&riowd_driver);
-}
-
-module_init(riowd_init);
-module_exit(riowd_exit);
+module_platform_driver(riowd_driver);
index a79e3840782ad3f286f0971b4a3cd7f6d5a1ff0b..4bc3744e14e4bf54f80a84585d3626d64635ba21 100644 (file)
@@ -378,6 +378,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
                                                        "cannot start\n");
        }
 
+       watchdog_set_nowayout(&s3c2410_wdd, nowayout);
+
        ret = watchdog_register_device(&s3c2410_wdd);
        if (ret) {
                dev_err(dev, "cannot register watchdog (%d)\n", ret);
index ac2346a452e571468a23db6905e975018d2169a4..4c2a4e8698f9922813f8d31b86bca583f9985239 100644 (file)
@@ -272,18 +272,7 @@ static struct platform_driver platform_wdt_driver = {
        .resume = stmp3xxx_wdt_resume,
 };
 
-static int __init stmp3xxx_wdt_init(void)
-{
-       return platform_driver_register(&platform_wdt_driver);
-}
-
-static void __exit stmp3xxx_wdt_exit(void)
-{
-       return platform_driver_unregister(&platform_wdt_driver);
-}
-
-module_init(stmp3xxx_wdt_init);
-module_exit(stmp3xxx_wdt_exit);
+module_platform_driver(platform_wdt_driver);
 
 MODULE_DESCRIPTION("STMP3XXX Watchdog Driver");
 MODULE_LICENSE("GPL");
index 5a90a4a871ddf7c06f8c73b3f515d9000b7b3702..1490293dc7dad9167f61043db7187074ff0ff021 100644 (file)
@@ -506,17 +506,7 @@ static struct platform_driver ts72xx_wdt_driver = {
        },
 };
 
-static __init int ts72xx_wdt_init(void)
-{
-       return platform_driver_register(&ts72xx_wdt_driver);
-}
-module_init(ts72xx_wdt_init);
-
-static __exit void ts72xx_wdt_exit(void)
-{
-       platform_driver_unregister(&ts72xx_wdt_driver);
-}
-module_exit(ts72xx_wdt_exit);
+module_platform_driver(ts72xx_wdt_driver);
 
 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
 MODULE_DESCRIPTION("TS-72xx SBC Watchdog");
index b5045ca7e61ccd03c3fa631026f27fe4ddc72e4f..0764c6239b98c8bb7793aca5c4b4486d79d7dac0 100644 (file)
@@ -256,17 +256,7 @@ static struct platform_driver twl4030_wdt_driver = {
        },
 };
 
-static int __devinit twl4030_wdt_init(void)
-{
-       return platform_driver_register(&twl4030_wdt_driver);
-}
-module_init(twl4030_wdt_init);
-
-static void __devexit twl4030_wdt_exit(void)
-{
-       platform_driver_unregister(&twl4030_wdt_driver);
-}
-module_exit(twl4030_wdt_exit);
+module_platform_driver(twl4030_wdt_driver);
 
 MODULE_AUTHOR("Nokia Corporation");
 MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
new file mode 100644 (file)
index 0000000..026b4bb
--- /dev/null
@@ -0,0 +1,267 @@
+/*
+ * VIA Chipset Watchdog Driver
+ *
+ * Copyright (C) 2011 Sigfox
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Marc Vertes <marc.vertes@sigfox.com>
+ * Based on a preliminary version from Harald Welte <HaraldWelte@viatech.com>
+ * Timer code by Wim Van Sebroeck <wim@iguana.be>
+ *
+ * Caveat: PnP must be enabled in BIOS to allow full access to watchdog
+ * control registers. If not, the watchdog must be configured in BIOS manually.
+ */
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/timer.h>
+#include <linux/watchdog.h>
+
+/* Configuration registers relative to the pci device */
+#define VIA_WDT_MMIO_BASE      0xe8    /* MMIO region base address */
+#define VIA_WDT_CONF           0xec    /* watchdog enable state */
+
+/* Relevant bits for the VIA_WDT_CONF register */
+#define VIA_WDT_CONF_ENABLE    0x01    /* 1: enable watchdog */
+#define VIA_WDT_CONF_MMIO      0x02    /* 1: enable watchdog MMIO */
+
+/*
+ * The MMIO region contains the watchog control register and the
+ * hardware timer counter.
+ */
+#define VIA_WDT_MMIO_LEN       8       /* MMIO region length in bytes */
+#define VIA_WDT_CTL            0       /* MMIO addr+0: state/control reg. */
+#define VIA_WDT_COUNT          4       /* MMIO addr+4: timer counter reg. */
+
+/* Bits for the VIA_WDT_CTL register */
+#define VIA_WDT_RUNNING                0x01    /* 0: stop, 1: running */
+#define VIA_WDT_FIRED          0x02    /* 1: restarted by expired watchdog */
+#define VIA_WDT_PWROFF         0x04    /* 0: reset, 1: poweroff */
+#define VIA_WDT_DISABLED       0x08    /* 1: timer is disabled */
+#define VIA_WDT_TRIGGER                0x80    /* 1: start a new countdown */
+
+/* Hardware heartbeat in seconds */
+#define WDT_HW_HEARTBEAT 1
+
+/* Timer heartbeat (500ms) */
+#define WDT_HEARTBEAT  (HZ/2)  /* should be <= ((WDT_HW_HEARTBEAT*HZ)/2) */
+
+/* User space timeout in seconds */
+#define WDT_TIMEOUT_MAX        1023    /* approx. 17 min. */
+#define WDT_TIMEOUT    60
+static int timeout = WDT_TIMEOUT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds, between 1 and 1023 "
+       "(default = " __MODULE_STRING(WDT_TIMEOUT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+       "(default = " __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static struct watchdog_device wdt_dev;
+static struct resource wdt_res;
+static void __iomem *wdt_mem;
+static unsigned int mmio;
+static void wdt_timer_tick(unsigned long data);
+static DEFINE_TIMER(timer, wdt_timer_tick, 0, 0);
+                                       /* The timer that pings the watchdog */
+static unsigned long next_heartbeat;   /* the next_heartbeat for the timer */
+
+static inline void wdt_reset(void)
+{
+       unsigned int ctl = readl(wdt_mem);
+
+       writel(ctl | VIA_WDT_TRIGGER, wdt_mem);
+}
+
+/*
+ * Timer tick: the timer will make sure that the watchdog timer hardware
+ * is being reset in time. The conditions to do this are:
+ *  1) the watchog timer has been started and /dev/watchdog is open
+ *     and there is still time left before userspace should send the
+ *     next heartbeat/ping. (note: the internal heartbeat is much smaller
+ *     then the external/userspace heartbeat).
+ *  2) the watchdog timer has been stopped by userspace.
+ */
+static void wdt_timer_tick(unsigned long data)
+{
+       if (time_before(jiffies, next_heartbeat) ||
+          (!test_bit(WDOG_ACTIVE, &wdt_dev.status))) {
+               wdt_reset();
+               mod_timer(&timer, jiffies + WDT_HEARTBEAT);
+       } else
+               pr_crit("I will reboot your machine !\n");
+}
+
+static int wdt_ping(struct watchdog_device *wdd)
+{
+       /* calculate when the next userspace timeout will be */
+       next_heartbeat = jiffies + timeout * HZ;
+       return 0;
+}
+
+static int wdt_start(struct watchdog_device *wdd)
+{
+       unsigned int ctl = readl(wdt_mem);
+
+       writel(timeout, wdt_mem + VIA_WDT_COUNT);
+       writel(ctl | VIA_WDT_RUNNING | VIA_WDT_TRIGGER, wdt_mem);
+       wdt_ping(wdd);
+       mod_timer(&timer, jiffies + WDT_HEARTBEAT);
+       return 0;
+}
+
+static int wdt_stop(struct watchdog_device *wdd)
+{
+       unsigned int ctl = readl(wdt_mem);
+
+       writel(ctl & ~VIA_WDT_RUNNING, wdt_mem);
+       return 0;
+}
+
+static int wdt_set_timeout(struct watchdog_device *wdd,
+                          unsigned int new_timeout)
+{
+       if (new_timeout < 1 || new_timeout > WDT_TIMEOUT_MAX)
+               return -EINVAL;
+       writel(new_timeout, wdt_mem + VIA_WDT_COUNT);
+       timeout = new_timeout;
+       return 0;
+}
+
+static const struct watchdog_info wdt_info = {
+       .identity =     "VIA watchdog",
+       .options =      WDIOF_CARDRESET |
+                       WDIOF_SETTIMEOUT |
+                       WDIOF_MAGICCLOSE |
+                       WDIOF_KEEPALIVEPING,
+};
+
+static const struct watchdog_ops wdt_ops = {
+       .owner =        THIS_MODULE,
+       .start =        wdt_start,
+       .stop =         wdt_stop,
+       .ping =         wdt_ping,
+       .set_timeout =  wdt_set_timeout,
+};
+
+static struct watchdog_device wdt_dev = {
+       .info =         &wdt_info,
+       .ops =          &wdt_ops,
+};
+
+static int __devinit wdt_probe(struct pci_dev *pdev,
+                              const struct pci_device_id *ent)
+{
+       unsigned char conf;
+       int ret = -ENODEV;
+
+       if (pci_enable_device(pdev)) {
+               dev_err(&pdev->dev, "cannot enable PCI device\n");
+               return -ENODEV;
+       }
+
+       /*
+        * Allocate a MMIO region which contains watchdog control register
+        * and counter, then configure the watchdog to use this region.
+        * This is possible only if PnP is properly enabled in BIOS.
+        * If not, the watchdog must be configured in BIOS manually.
+        */
+       if (allocate_resource(&iomem_resource, &wdt_res, VIA_WDT_MMIO_LEN,
+                             0xf0000000, 0xffffff00, 0xff, NULL, NULL)) {
+               dev_err(&pdev->dev, "MMIO allocation failed\n");
+               goto err_out_disable_device;
+       }
+
+       pci_write_config_dword(pdev, VIA_WDT_MMIO_BASE, wdt_res.start);
+       pci_read_config_byte(pdev, VIA_WDT_CONF, &conf);
+       conf |= VIA_WDT_CONF_ENABLE | VIA_WDT_CONF_MMIO;
+       pci_write_config_byte(pdev, VIA_WDT_CONF, conf);
+
+       pci_read_config_dword(pdev, VIA_WDT_MMIO_BASE, &mmio);
+       if (mmio) {
+               dev_info(&pdev->dev, "VIA Chipset watchdog MMIO: %x\n", mmio);
+       } else {
+               dev_err(&pdev->dev, "MMIO setting failed. Check BIOS.\n");
+               goto err_out_resource;
+       }
+
+       if (!request_mem_region(mmio, VIA_WDT_MMIO_LEN, "via_wdt")) {
+               dev_err(&pdev->dev, "MMIO region busy\n");
+               goto err_out_resource;
+       }
+
+       wdt_mem = ioremap(mmio, VIA_WDT_MMIO_LEN);
+       if (wdt_mem == NULL) {
+               dev_err(&pdev->dev, "cannot remap VIA wdt MMIO registers\n");
+               goto err_out_release;
+       }
+
+       wdt_dev.timeout = timeout;
+       watchdog_set_nowayout(&wdt_dev, nowayout);
+       if (readl(wdt_mem) & VIA_WDT_FIRED)
+               wdt_dev.bootstatus |= WDIOF_CARDRESET;
+
+       ret = watchdog_register_device(&wdt_dev);
+       if (ret)
+               goto err_out_iounmap;
+
+       /* start triggering, in case of watchdog already enabled by BIOS */
+       mod_timer(&timer, jiffies + WDT_HEARTBEAT);
+       return 0;
+
+err_out_iounmap:
+       iounmap(wdt_mem);
+err_out_release:
+       release_mem_region(mmio, VIA_WDT_MMIO_LEN);
+err_out_resource:
+       release_resource(&wdt_res);
+err_out_disable_device:
+       pci_disable_device(pdev);
+       return ret;
+}
+
+static void __devexit wdt_remove(struct pci_dev *pdev)
+{
+       watchdog_unregister_device(&wdt_dev);
+       del_timer(&timer);
+       iounmap(wdt_mem);
+       release_mem_region(mmio, VIA_WDT_MMIO_LEN);
+       release_resource(&wdt_res);
+       pci_disable_device(pdev);
+}
+
+DEFINE_PCI_DEVICE_TABLE(wdt_pci_table) = {
+       { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700) },
+       { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX800) },
+       { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) },
+       { 0 }
+};
+
+static struct pci_driver wdt_driver = {
+       .name           = "via_wdt",
+       .id_table       = wdt_pci_table,
+       .probe          = wdt_probe,
+       .remove         = __devexit_p(wdt_remove),
+};
+
+static int __init wdt_init(void)
+{
+       if (timeout < 1 || timeout > WDT_TIMEOUT_MAX)
+               timeout = WDT_TIMEOUT;
+       return pci_register_driver(&wdt_driver);
+}
+
+static void __exit wdt_exit(void)
+{
+       pci_unregister_driver(&wdt_driver);
+}
+
+module_init(wdt_init);
+module_exit(wdt_exit);
+
+MODULE_AUTHOR("Marc Vertes");
+MODULE_DESCRIPTION("Driver for watchdog timer on VIA chipset");
+MODULE_LICENSE("GPL");
index e789a47db41f4dc1c618efaba3c8e189851c42bb..263c883f0806933c80212f00b8e3792c3ed73db6 100644 (file)
@@ -199,7 +199,8 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
        if (reg & WM831X_WDOG_DEBUG)
                dev_warn(wm831x->dev, "Watchdog is paused\n");
 
-       driver_data = kzalloc(sizeof(*driver_data), GFP_KERNEL);
+       driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
+                                  GFP_KERNEL);
        if (!driver_data) {
                dev_err(wm831x->dev, "Unable to alloacate watchdog device\n");
                ret = -ENOMEM;
@@ -213,11 +214,9 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
 
        wm831x_wdt->info = &wm831x_wdt_info;
        wm831x_wdt->ops = &wm831x_wdt_ops;
+       watchdog_set_nowayout(wm831x_wdt, nowayout);
        watchdog_set_drvdata(wm831x_wdt, driver_data);
 
-       if (nowayout)
-               wm831x_wdt->status |= WDOG_NO_WAY_OUT;
-
        reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
        reg &= WM831X_WDOG_TO_MASK;
        for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++)
@@ -252,7 +251,7 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
                                dev_err(wm831x->dev,
                                        "Failed to request update GPIO: %d\n",
                                        ret);
-                               goto err_alloc;
+                               goto err;
                        }
 
                        ret = gpio_direction_output(pdata->update_gpio, 0);
@@ -294,8 +293,6 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
 err_gpio:
        if (driver_data->update_gpio)
                gpio_free(driver_data->update_gpio);
-err_alloc:
-       kfree(driver_data);
 err:
        return ret;
 }
@@ -320,17 +317,7 @@ static struct platform_driver wm831x_wdt_driver = {
        },
 };
 
-static int __init wm831x_wdt_init(void)
-{
-       return platform_driver_register(&wm831x_wdt_driver);
-}
-module_init(wm831x_wdt_init);
-
-static void __exit wm831x_wdt_exit(void)
-{
-       platform_driver_unregister(&wm831x_wdt_driver);
-}
-module_exit(wm831x_wdt_exit);
+module_platform_driver(wm831x_wdt_driver);
 
 MODULE_AUTHOR("Mark Brown");
 MODULE_DESCRIPTION("WM831x Watchdog");
index b68d928c8f90fcf259c141b22f4fb7fa84729f84..909c78650d3e8942e7b7c3bb70de90d7c208f9fc 100644 (file)
@@ -311,17 +311,7 @@ static struct platform_driver wm8350_wdt_driver = {
        },
 };
 
-static int __init wm8350_wdt_init(void)
-{
-       return platform_driver_register(&wm8350_wdt_driver);
-}
-module_init(wm8350_wdt_init);
-
-static void __exit wm8350_wdt_exit(void)
-{
-       platform_driver_unregister(&wm8350_wdt_driver);
-}
-module_exit(wm8350_wdt_exit);
+module_platform_driver(wm8350_wdt_driver);
 
 MODULE_AUTHOR("Mark Brown");
 MODULE_DESCRIPTION("WM8350 Watchdog");
index 8795480c2350301362640120ac02e09709cce3d4..a1ced521cf744600e93a4f9bb2a94663abaebdca 100644 (file)
@@ -86,6 +86,7 @@ config XEN_BACKEND
 
 config XENFS
        tristate "Xen filesystem"
+       select XEN_PRIVCMD
        default y
        help
          The xen filesystem provides a way for domains to share
@@ -171,4 +172,10 @@ config XEN_PCIDEV_BACKEND
          xen-pciback.hide=(03:00.0)(04:00.0)
 
          If in doubt, say m.
+
+config XEN_PRIVCMD
+       tristate
+       depends on XEN
+       default m
+
 endmenu
index 974fffdf22b2e15031faa6396721fa9800883b2b..aa31337192cc5421f2df8d1ebc24a34d75952449 100644 (file)
@@ -19,7 +19,9 @@ obj-$(CONFIG_XEN_TMEM)                        += tmem.o
 obj-$(CONFIG_SWIOTLB_XEN)              += swiotlb-xen.o
 obj-$(CONFIG_XEN_DOM0)                 += pci.o
 obj-$(CONFIG_XEN_PCIDEV_BACKEND)       += xen-pciback/
+obj-$(CONFIG_XEN_PRIVCMD)              += xen-privcmd.o
 
 xen-evtchn-y                           := evtchn.o
 xen-gntdev-y                           := gntdev.o
 xen-gntalloc-y                         := gntalloc.o
+xen-privcmd-y                          := privcmd.o
index 6e075cdd0c6bf56ff8daacf986d443de23c49998..e5e5812a1014cbf2c306ec802391980a7926754c 100644 (file)
@@ -87,6 +87,7 @@ enum xen_irq_type {
  */
 struct irq_info {
        struct list_head list;
+       int refcnt;
        enum xen_irq_type type; /* type */
        unsigned irq;
        unsigned short evtchn;  /* event channel */
@@ -406,6 +407,7 @@ static void xen_irq_init(unsigned irq)
                panic("Unable to allocate metadata for IRQ%d\n", irq);
 
        info->type = IRQT_UNBOUND;
+       info->refcnt = -1;
 
        irq_set_handler_data(irq, info);
 
@@ -469,6 +471,8 @@ static void xen_free_irq(unsigned irq)
 
        irq_set_handler_data(irq, NULL);
 
+       WARN_ON(info->refcnt > 0);
+
        kfree(info);
 
        /* Legacy IRQ descriptors are managed by the arch. */
@@ -637,7 +641,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
        if (irq != -1) {
                printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
                       irq, gsi);
-               goto out;       /* XXX need refcount? */
+               goto out;
        }
 
        irq = xen_allocate_irq_gsi(gsi);
@@ -939,9 +943,16 @@ static void unbind_from_irq(unsigned int irq)
 {
        struct evtchn_close close;
        int evtchn = evtchn_from_irq(irq);
+       struct irq_info *info = irq_get_handler_data(irq);
 
        mutex_lock(&irq_mapping_update_lock);
 
+       if (info->refcnt > 0) {
+               info->refcnt--;
+               if (info->refcnt != 0)
+                       goto done;
+       }
+
        if (VALID_EVTCHN(evtchn)) {
                close.port = evtchn;
                if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
@@ -970,6 +981,7 @@ static void unbind_from_irq(unsigned int irq)
 
        xen_free_irq(irq);
 
+ done:
        mutex_unlock(&irq_mapping_update_lock);
 }
 
@@ -1065,6 +1077,69 @@ void unbind_from_irqhandler(unsigned int irq, void *dev_id)
 }
 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
 
+int evtchn_make_refcounted(unsigned int evtchn)
+{
+       int irq = evtchn_to_irq[evtchn];
+       struct irq_info *info;
+
+       if (irq == -1)
+               return -ENOENT;
+
+       info = irq_get_handler_data(irq);
+
+       if (!info)
+               return -ENOENT;
+
+       WARN_ON(info->refcnt != -1);
+
+       info->refcnt = 1;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
+
+int evtchn_get(unsigned int evtchn)
+{
+       int irq;
+       struct irq_info *info;
+       int err = -ENOENT;
+
+       if (evtchn >= NR_EVENT_CHANNELS)
+               return -EINVAL;
+
+       mutex_lock(&irq_mapping_update_lock);
+
+       irq = evtchn_to_irq[evtchn];
+       if (irq == -1)
+               goto done;
+
+       info = irq_get_handler_data(irq);
+
+       if (!info)
+               goto done;
+
+       err = -EINVAL;
+       if (info->refcnt <= 0)
+               goto done;
+
+       info->refcnt++;
+       err = 0;
+ done:
+       mutex_unlock(&irq_mapping_update_lock);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(evtchn_get);
+
+void evtchn_put(unsigned int evtchn)
+{
+       int irq = evtchn_to_irq[evtchn];
+       if (WARN_ON(irq == -1))
+               return;
+       unbind_from_irq(irq);
+}
+EXPORT_SYMBOL_GPL(evtchn_put);
+
 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
 {
        int irq = per_cpu(ipi_to_irq, cpu)[vector];
index dbc13e94b612b39759a01fb3605e956dbca807f6..b1f60a0c0bea3e00627293101fd3dd38bb1fd96e 100644 (file)
@@ -268,7 +268,7 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
        rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED,
                                       u->name, (void *)(unsigned long)port);
        if (rc >= 0)
-               rc = 0;
+               rc = evtchn_make_refcounted(port);
 
        return rc;
 }
index e1c4c6e5b469c44449f68e9e841d264eb08dbc48..934985d14c2459f6893a8b1c4712b98207830d8a 100644 (file)
@@ -74,7 +74,7 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be allocated by "
                "the gntalloc device");
 
 static LIST_HEAD(gref_list);
-static DEFINE_SPINLOCK(gref_lock);
+static DEFINE_MUTEX(gref_mutex);
 static int gref_size;
 
 struct notify_info {
@@ -99,6 +99,12 @@ struct gntalloc_file_private_data {
        uint64_t index;
 };
 
+struct gntalloc_vma_private_data {
+       struct gntalloc_gref *gref;
+       int users;
+       int count;
+};
+
 static void __del_gref(struct gntalloc_gref *gref);
 
 static void do_cleanup(void)
@@ -143,15 +149,15 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
        }
 
        /* Add to gref lists. */
-       spin_lock(&gref_lock);
+       mutex_lock(&gref_mutex);
        list_splice_tail(&queue_gref, &gref_list);
        list_splice_tail(&queue_file, &priv->list);
-       spin_unlock(&gref_lock);
+       mutex_unlock(&gref_mutex);
 
        return 0;
 
 undo:
-       spin_lock(&gref_lock);
+       mutex_lock(&gref_mutex);
        gref_size -= (op->count - i);
 
        list_for_each_entry(gref, &queue_file, next_file) {
@@ -167,7 +173,7 @@ undo:
         */
        if (unlikely(!list_empty(&queue_gref)))
                list_splice_tail(&queue_gref, &gref_list);
-       spin_unlock(&gref_lock);
+       mutex_unlock(&gref_mutex);
        return rc;
 }
 
@@ -178,8 +184,10 @@ static void __del_gref(struct gntalloc_gref *gref)
                tmp[gref->notify.pgoff] = 0;
                kunmap(gref->page);
        }
-       if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+       if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
                notify_remote_via_evtchn(gref->notify.event);
+               evtchn_put(gref->notify.event);
+       }
 
        gref->notify.flags = 0;
 
@@ -189,6 +197,8 @@ static void __del_gref(struct gntalloc_gref *gref)
 
                if (!gnttab_end_foreign_access_ref(gref->gref_id, 0))
                        return;
+
+               gnttab_free_grant_reference(gref->gref_id);
        }
 
        gref_size--;
@@ -251,7 +261,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp)
 
        pr_debug("%s: priv %p\n", __func__, priv);
 
-       spin_lock(&gref_lock);
+       mutex_lock(&gref_mutex);
        while (!list_empty(&priv->list)) {
                gref = list_entry(priv->list.next,
                        struct gntalloc_gref, next_file);
@@ -261,7 +271,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp)
                        __del_gref(gref);
        }
        kfree(priv);
-       spin_unlock(&gref_lock);
+       mutex_unlock(&gref_mutex);
 
        return 0;
 }
@@ -286,21 +296,21 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
                goto out;
        }
 
-       spin_lock(&gref_lock);
+       mutex_lock(&gref_mutex);
        /* Clean up pages that were at zero (local) users but were still mapped
         * by remote domains. Since those pages count towards the limit that we
         * are about to enforce, removing them here is a good idea.
         */
        do_cleanup();
        if (gref_size + op.count > limit) {
-               spin_unlock(&gref_lock);
+               mutex_unlock(&gref_mutex);
                rc = -ENOSPC;
                goto out_free;
        }
        gref_size += op.count;
        op.index = priv->index;
        priv->index += op.count * PAGE_SIZE;
-       spin_unlock(&gref_lock);
+       mutex_unlock(&gref_mutex);
 
        rc = add_grefs(&op, gref_ids, priv);
        if (rc < 0)
@@ -343,7 +353,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
                goto dealloc_grant_out;
        }
 
-       spin_lock(&gref_lock);
+       mutex_lock(&gref_mutex);
        gref = find_grefs(priv, op.index, op.count);
        if (gref) {
                /* Remove from the file list only, and decrease reference count.
@@ -363,7 +373,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
 
        do_cleanup();
 
-       spin_unlock(&gref_lock);
+       mutex_unlock(&gref_mutex);
 dealloc_grant_out:
        return rc;
 }
@@ -383,7 +393,7 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
        index = op.index & ~(PAGE_SIZE - 1);
        pgoff = op.index & (PAGE_SIZE - 1);
 
-       spin_lock(&gref_lock);
+       mutex_lock(&gref_mutex);
 
        gref = find_grefs(priv, index, 1);
        if (!gref) {
@@ -396,12 +406,30 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
                goto unlock_out;
        }
 
+       /* We need to grab a reference to the event channel we are going to use
+        * to send the notify before releasing the reference we may already have
+        * (if someone has called this ioctl twice). This is required so that
+        * it is possible to change the clear_byte part of the notification
+        * without disturbing the event channel part, which may now be the last
+        * reference to that event channel.
+        */
+       if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
+               if (evtchn_get(op.event_channel_port)) {
+                       rc = -EINVAL;
+                       goto unlock_out;
+               }
+       }
+
+       if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+               evtchn_put(gref->notify.event);
+
        gref->notify.flags = op.action;
        gref->notify.pgoff = pgoff;
        gref->notify.event = op.event_channel_port;
        rc = 0;
+
  unlock_out:
-       spin_unlock(&gref_lock);
+       mutex_unlock(&gref_mutex);
        return rc;
 }
 
@@ -429,26 +457,40 @@ static long gntalloc_ioctl(struct file *filp, unsigned int cmd,
 
 static void gntalloc_vma_open(struct vm_area_struct *vma)
 {
-       struct gntalloc_gref *gref = vma->vm_private_data;
-       if (!gref)
+       struct gntalloc_vma_private_data *priv = vma->vm_private_data;
+
+       if (!priv)
                return;
 
-       spin_lock(&gref_lock);
-       gref->users++;
-       spin_unlock(&gref_lock);
+       mutex_lock(&gref_mutex);
+       priv->users++;
+       mutex_unlock(&gref_mutex);
 }
 
 static void gntalloc_vma_close(struct vm_area_struct *vma)
 {
-       struct gntalloc_gref *gref = vma->vm_private_data;
-       if (!gref)
+       struct gntalloc_vma_private_data *priv = vma->vm_private_data;
+       struct gntalloc_gref *gref, *next;
+       int i;
+
+       if (!priv)
                return;
 
-       spin_lock(&gref_lock);
-       gref->users--;
-       if (gref->users == 0)
-               __del_gref(gref);
-       spin_unlock(&gref_lock);
+       mutex_lock(&gref_mutex);
+       priv->users--;
+       if (priv->users == 0) {
+               gref = priv->gref;
+               for (i = 0; i < priv->count; i++) {
+                       gref->users--;
+                       next = list_entry(gref->next_gref.next,
+                                         struct gntalloc_gref, next_gref);
+                       if (gref->users == 0)
+                               __del_gref(gref);
+                       gref = next;
+               }
+               kfree(priv);
+       }
+       mutex_unlock(&gref_mutex);
 }
 
 static struct vm_operations_struct gntalloc_vmops = {
@@ -459,30 +501,41 @@ static struct vm_operations_struct gntalloc_vmops = {
 static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
 {
        struct gntalloc_file_private_data *priv = filp->private_data;
+       struct gntalloc_vma_private_data *vm_priv;
        struct gntalloc_gref *gref;
        int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
        int rv, i;
 
-       pr_debug("%s: priv %p, page %lu+%d\n", __func__,
-                      priv, vma->vm_pgoff, count);
-
        if (!(vma->vm_flags & VM_SHARED)) {
                printk(KERN_ERR "%s: Mapping must be shared.\n", __func__);
                return -EINVAL;
        }
 
-       spin_lock(&gref_lock);
+       vm_priv = kmalloc(sizeof(*vm_priv), GFP_KERNEL);
+       if (!vm_priv)
+               return -ENOMEM;
+
+       mutex_lock(&gref_mutex);
+
+       pr_debug("%s: priv %p,%p, page %lu+%d\n", __func__,
+                      priv, vm_priv, vma->vm_pgoff, count);
+
        gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count);
        if (gref == NULL) {
                rv = -ENOENT;
                pr_debug("%s: Could not find grant reference",
                                __func__);
+               kfree(vm_priv);
                goto out_unlock;
        }
 
-       vma->vm_private_data = gref;
+       vm_priv->gref = gref;
+       vm_priv->users = 1;
+       vm_priv->count = count;
+
+       vma->vm_private_data = vm_priv;
 
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
 
        vma->vm_ops = &gntalloc_vmops;
 
@@ -499,7 +552,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
        rv = 0;
 
 out_unlock:
-       spin_unlock(&gref_lock);
+       mutex_unlock(&gref_mutex);
        return rv;
 }
 
index afca14d9042e6cd2ae03f238143ac8188aaf35fb..99d8151c824adbff0611a16f493dde6e6927de4c 100644 (file)
@@ -193,8 +193,10 @@ static void gntdev_put_map(struct grant_map *map)
 
        atomic_sub(map->count, &pages_mapped);
 
-       if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+       if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
                notify_remote_via_evtchn(map->notify.event);
+               evtchn_put(map->notify.event);
+       }
 
        if (map->pages) {
                if (!use_ptemod)
@@ -312,7 +314,8 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
                }
        }
 
-       err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, pages);
+       err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset,
+                               pages, true);
        if (err)
                return err;
 
@@ -599,6 +602,8 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
        struct ioctl_gntdev_unmap_notify op;
        struct grant_map *map;
        int rc;
+       int out_flags;
+       unsigned int out_event;
 
        if (copy_from_user(&op, u, sizeof(op)))
                return -EFAULT;
@@ -606,6 +611,21 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
        if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
                return -EINVAL;
 
+       /* We need to grab a reference to the event channel we are going to use
+        * to send the notify before releasing the reference we may already have
+        * (if someone has called this ioctl twice). This is required so that
+        * it is possible to change the clear_byte part of the notification
+        * without disturbing the event channel part, which may now be the last
+        * reference to that event channel.
+        */
+       if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
+               if (evtchn_get(op.event_channel_port))
+                       return -EINVAL;
+       }
+
+       out_flags = op.action;
+       out_event = op.event_channel_port;
+
        spin_lock(&priv->lock);
 
        list_for_each_entry(map, &priv->maps, next) {
@@ -624,12 +644,22 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
                goto unlock_out;
        }
 
+       out_flags = map->notify.flags;
+       out_event = map->notify.event;
+
        map->notify.flags = op.action;
        map->notify.addr = op.index - (map->index << PAGE_SHIFT);
        map->notify.event = op.event_channel_port;
+
        rc = 0;
+
  unlock_out:
        spin_unlock(&priv->lock);
+
+       /* Drop the reference to the event channel we did not save in the map */
+       if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
+               evtchn_put(out_event);
+
        return rc;
 }
 
index bf1c094f4ebf12ea234b9a60fdaea118d6217d8e..1cd94daa71db8379443115dd052bc5705fc74432 100644 (file)
 #include <xen/page.h>
 #include <xen/grant_table.h>
 #include <xen/interface/memory.h>
+#include <xen/hvc-console.h>
 #include <asm/xen/hypercall.h>
 
 #include <asm/pgtable.h>
 #include <asm/sync_bitops.h>
 
-
 /* External tools reserve first few grant table entries. */
 #define NR_RESERVED_ENTRIES 8
 #define GNTTAB_LIST_END 0xffffffff
-#define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(struct grant_entry))
+#define GREFS_PER_GRANT_FRAME \
+(grant_table_version == 1 ?                      \
+(PAGE_SIZE / sizeof(struct grant_entry_v1)) :   \
+(PAGE_SIZE / sizeof(union grant_entry_v2)))
 
 static grant_ref_t **gnttab_list;
 static unsigned int nr_grant_frames;
@@ -64,13 +67,97 @@ static DEFINE_SPINLOCK(gnttab_list_lock);
 unsigned long xen_hvm_resume_frames;
 EXPORT_SYMBOL_GPL(xen_hvm_resume_frames);
 
-static struct grant_entry *shared;
+static union {
+       struct grant_entry_v1 *v1;
+       union grant_entry_v2 *v2;
+       void *addr;
+} gnttab_shared;
+
+/*This is a structure of function pointers for grant table*/
+struct gnttab_ops {
+       /*
+        * Mapping a list of frames for storing grant entries. Frames parameter
+        * is used to store grant table address when grant table being setup,
+        * nr_gframes is the number of frames to map grant table. Returning
+        * GNTST_okay means success and negative value means failure.
+        */
+       int (*map_frames)(unsigned long *frames, unsigned int nr_gframes);
+       /*
+        * Release a list of frames which are mapped in map_frames for grant
+        * entry status.
+        */
+       void (*unmap_frames)(void);
+       /*
+        * Introducing a valid entry into the grant table, granting the frame of
+        * this grant entry to domain for accessing or transfering. Ref
+        * parameter is reference of this introduced grant entry, domid is id of
+        * granted domain, frame is the page frame to be granted, and flags is
+        * status of the grant entry to be updated.
+        */
+       void (*update_entry)(grant_ref_t ref, domid_t domid,
+                            unsigned long frame, unsigned flags);
+       /*
+        * Stop granting a grant entry to domain for accessing. Ref parameter is
+        * reference of a grant entry whose grant access will be stopped,
+        * readonly is not in use in this function. If the grant entry is
+        * currently mapped for reading or writing, just return failure(==0)
+        * directly and don't tear down the grant access. Otherwise, stop grant
+        * access for this entry and return success(==1).
+        */
+       int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
+       /*
+        * Stop granting a grant entry to domain for transfer. Ref parameter is
+        * reference of a grant entry whose grant transfer will be stopped. If
+        * tranfer has not started, just reclaim the grant entry and return
+        * failure(==0). Otherwise, wait for the transfer to complete and then
+        * return the frame.
+        */
+       unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
+       /*
+        * Query the status of a grant entry. Ref parameter is reference of
+        * queried grant entry, return value is the status of queried entry.
+        * Detailed status(writing/reading) can be gotten from the return value
+        * by bit operations.
+        */
+       int (*query_foreign_access)(grant_ref_t ref);
+       /*
+        * Grant a domain to access a range of bytes within the page referred by
+        * an available grant entry. Ref parameter is reference of a grant entry
+        * which will be sub-page accessed, domid is id of grantee domain, frame
+        * is frame address of subpage grant, flags is grant type and flag
+        * information, page_off is offset of the range of bytes, and length is
+        * length of bytes to be accessed.
+        */
+       void (*update_subpage_entry)(grant_ref_t ref, domid_t domid,
+                                    unsigned long frame, int flags,
+                                    unsigned page_off, unsigned length);
+       /*
+        * Redirect an available grant entry on domain A to another grant
+        * reference of domain B, then allow domain C to use grant reference
+        * of domain B transitively. Ref parameter is an available grant entry
+        * reference on domain A, domid is id of domain C which accesses grant
+        * entry transitively, flags is grant type and flag information,
+        * trans_domid is id of domain B whose grant entry is finally accessed
+        * transitively, trans_gref is grant entry transitive reference of
+        * domain B.
+        */
+       void (*update_trans_entry)(grant_ref_t ref, domid_t domid, int flags,
+                                  domid_t trans_domid, grant_ref_t trans_gref);
+};
+
+static struct gnttab_ops *gnttab_interface;
+
+/*This reflects status of grant entries, so act as a global value*/
+static grant_status_t *grstatus;
+
+static int grant_table_version;
 
 static struct gnttab_free_callback *gnttab_free_callback_list;
 
 static int gnttab_expand(unsigned int req_entries);
 
 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
+#define SPP (PAGE_SIZE / sizeof(grant_status_t))
 
 static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
 {
@@ -142,23 +229,33 @@ static void put_free_entry(grant_ref_t ref)
        spin_unlock_irqrestore(&gnttab_list_lock, flags);
 }
 
-static void update_grant_entry(grant_ref_t ref, domid_t domid,
-                              unsigned long frame, unsigned flags)
+/*
+ * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
+ * Introducing a valid entry into the grant table:
+ *  1. Write ent->domid.
+ *  2. Write ent->frame:
+ *      GTF_permit_access:   Frame to which access is permitted.
+ *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
+ *                           frame, or zero if none.
+ *  3. Write memory barrier (WMB).
+ *  4. Write ent->flags, inc. valid type.
+ */
+static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
+                                  unsigned long frame, unsigned flags)
+{
+       gnttab_shared.v1[ref].domid = domid;
+       gnttab_shared.v1[ref].frame = frame;
+       wmb();
+       gnttab_shared.v1[ref].flags = flags;
+}
+
+static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
+                                  unsigned long frame, unsigned flags)
 {
-       /*
-        * Introducing a valid entry into the grant table:
-        *  1. Write ent->domid.
-        *  2. Write ent->frame:
-        *      GTF_permit_access:   Frame to which access is permitted.
-        *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
-        *                           frame, or zero if none.
-        *  3. Write memory barrier (WMB).
-        *  4. Write ent->flags, inc. valid type.
-        */
-       shared[ref].frame = frame;
-       shared[ref].domid = domid;
+       gnttab_shared.v2[ref].hdr.domid = domid;
+       gnttab_shared.v2[ref].full_page.frame = frame;
        wmb();
-       shared[ref].flags = flags;
+       gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
 }
 
 /*
@@ -167,7 +264,7 @@ static void update_grant_entry(grant_ref_t ref, domid_t domid,
 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
                                     unsigned long frame, int readonly)
 {
-       update_grant_entry(ref, domid, frame,
+       gnttab_interface->update_entry(ref, domid, frame,
                           GTF_permit_access | (readonly ? GTF_readonly : 0));
 }
 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
@@ -187,31 +284,184 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
 }
 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
 
-int gnttab_query_foreign_access(grant_ref_t ref)
+void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid,
+                                   unsigned long frame, int flags,
+                                   unsigned page_off,
+                                   unsigned length)
+{
+       gnttab_shared.v2[ref].sub_page.frame = frame;
+       gnttab_shared.v2[ref].sub_page.page_off = page_off;
+       gnttab_shared.v2[ref].sub_page.length = length;
+       gnttab_shared.v2[ref].hdr.domid = domid;
+       wmb();
+       gnttab_shared.v2[ref].hdr.flags =
+                               GTF_permit_access | GTF_sub_page | flags;
+}
+
+int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid,
+                                           unsigned long frame, int flags,
+                                           unsigned page_off,
+                                           unsigned length)
 {
-       u16 nflags;
+       if (flags & (GTF_accept_transfer | GTF_reading |
+                    GTF_writing | GTF_transitive))
+               return -EPERM;
 
-       nflags = shared[ref].flags;
+       if (gnttab_interface->update_subpage_entry == NULL)
+               return -ENOSYS;
 
-       return nflags & (GTF_reading|GTF_writing);
+       gnttab_interface->update_subpage_entry(ref, domid, frame, flags,
+                                              page_off, length);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage_ref);
+
+int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame,
+                                       int flags, unsigned page_off,
+                                       unsigned length)
+{
+       int ref, rc;
+
+       ref = get_free_entries(1);
+       if (unlikely(ref < 0))
+               return -ENOSPC;
+
+       rc = gnttab_grant_foreign_access_subpage_ref(ref, domid, frame, flags,
+                                                    page_off, length);
+       if (rc < 0) {
+               put_free_entry(ref);
+               return rc;
+       }
+
+       return ref;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage);
+
+bool gnttab_subpage_grants_available(void)
+{
+       return gnttab_interface->update_subpage_entry != NULL;
+}
+EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available);
+
+void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid,
+                                 int flags, domid_t trans_domid,
+                                 grant_ref_t trans_gref)
+{
+       gnttab_shared.v2[ref].transitive.trans_domid = trans_domid;
+       gnttab_shared.v2[ref].transitive.gref = trans_gref;
+       gnttab_shared.v2[ref].hdr.domid = domid;
+       wmb();
+       gnttab_shared.v2[ref].hdr.flags =
+                               GTF_permit_access | GTF_transitive | flags;
+}
+
+int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid,
+                                         int flags, domid_t trans_domid,
+                                         grant_ref_t trans_gref)
+{
+       if (flags & (GTF_accept_transfer | GTF_reading |
+                    GTF_writing | GTF_sub_page))
+               return -EPERM;
+
+       if (gnttab_interface->update_trans_entry == NULL)
+               return -ENOSYS;
+
+       gnttab_interface->update_trans_entry(ref, domid, flags, trans_domid,
+                                            trans_gref);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans_ref);
+
+int gnttab_grant_foreign_access_trans(domid_t domid, int flags,
+                                     domid_t trans_domid,
+                                     grant_ref_t trans_gref)
+{
+       int ref, rc;
+
+       ref = get_free_entries(1);
+       if (unlikely(ref < 0))
+               return -ENOSPC;
+
+       rc = gnttab_grant_foreign_access_trans_ref(ref, domid, flags,
+                                                  trans_domid, trans_gref);
+       if (rc < 0) {
+               put_free_entry(ref);
+               return rc;
+       }
+
+       return ref;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans);
+
+bool gnttab_trans_grants_available(void)
+{
+       return gnttab_interface->update_trans_entry != NULL;
+}
+EXPORT_SYMBOL_GPL(gnttab_trans_grants_available);
+
+static int gnttab_query_foreign_access_v1(grant_ref_t ref)
+{
+       return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
+}
+
+static int gnttab_query_foreign_access_v2(grant_ref_t ref)
+{
+       return grstatus[ref] & (GTF_reading|GTF_writing);
+}
+
+int gnttab_query_foreign_access(grant_ref_t ref)
+{
+       return gnttab_interface->query_foreign_access(ref);
 }
 EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
 
-int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
 {
        u16 flags, nflags;
+       u16 *pflags;
 
-       nflags = shared[ref].flags;
+       pflags = &gnttab_shared.v1[ref].flags;
+       nflags = *pflags;
        do {
                flags = nflags;
                if (flags & (GTF_reading|GTF_writing)) {
                        printk(KERN_ALERT "WARNING: g.e. still in use!\n");
                        return 0;
                }
-       } while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) != flags);
+       } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
+
+       return 1;
+}
+
+static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
+{
+       gnttab_shared.v2[ref].hdr.flags = 0;
+       mb();
+       if (grstatus[ref] & (GTF_reading|GTF_writing)) {
+               return 0;
+       } else {
+               /* The read of grstatus needs to have acquire
+               semantics.  On x86, reads already have
+               that, and we just need to protect against
+               compiler reorderings.  On other
+               architectures we may need a full
+               barrier. */
+#ifdef CONFIG_X86
+               barrier();
+#else
+               mb();
+#endif
+       }
 
        return 1;
 }
+
+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+{
+       return gnttab_interface->end_foreign_access_ref(ref, readonly);
+}
 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
 
 void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
@@ -246,37 +496,76 @@ EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
 void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
                                       unsigned long pfn)
 {
-       update_grant_entry(ref, domid, pfn, GTF_accept_transfer);
+       gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
 }
 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
 
-unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
+static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
 {
        unsigned long frame;
        u16           flags;
+       u16          *pflags;
+
+       pflags = &gnttab_shared.v1[ref].flags;
 
        /*
         * If a transfer is not even yet started, try to reclaim the grant
         * reference and return failure (== 0).
         */
-       while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
-               if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags)
+       while (!((flags = *pflags) & GTF_transfer_committed)) {
+               if (sync_cmpxchg(pflags, flags, 0) == flags)
                        return 0;
                cpu_relax();
        }
 
        /* If a transfer is in progress then wait until it is completed. */
        while (!(flags & GTF_transfer_completed)) {
-               flags = shared[ref].flags;
+               flags = *pflags;
                cpu_relax();
        }
 
        rmb();  /* Read the frame number /after/ reading completion status. */
-       frame = shared[ref].frame;
+       frame = gnttab_shared.v1[ref].frame;
+       BUG_ON(frame == 0);
+
+       return frame;
+}
+
+static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
+{
+       unsigned long frame;
+       u16           flags;
+       u16          *pflags;
+
+       pflags = &gnttab_shared.v2[ref].hdr.flags;
+
+       /*
+        * If a transfer is not even yet started, try to reclaim the grant
+        * reference and return failure (== 0).
+        */
+       while (!((flags = *pflags) & GTF_transfer_committed)) {
+               if (sync_cmpxchg(pflags, flags, 0) == flags)
+                       return 0;
+               cpu_relax();
+       }
+
+       /* If a transfer is in progress then wait until it is completed. */
+       while (!(flags & GTF_transfer_completed)) {
+               flags = *pflags;
+               cpu_relax();
+       }
+
+       rmb();  /* Read the frame number /after/ reading completion status. */
+       frame = gnttab_shared.v2[ref].full_page.frame;
        BUG_ON(frame == 0);
 
        return frame;
 }
+
+unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
+{
+       return gnttab_interface->end_foreign_transfer_ref(ref);
+}
 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
 
 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
@@ -448,8 +737,8 @@ unsigned int gnttab_max_grant_frames(void)
 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
 
 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
-                       struct gnttab_map_grant_ref *kmap_ops,
-                       struct page **pages, unsigned int count)
+                   struct gnttab_map_grant_ref *kmap_ops,
+                   struct page **pages, unsigned int count)
 {
        int i, ret;
        pte_t *pte;
@@ -472,24 +761,10 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
                                (map_ops[i].host_addr & ~PAGE_MASK));
                        mfn = pte_mfn(*pte);
                } else {
-                       /* If you really wanted to do this:
-                        * mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
-                        *
-                        * The reason we do not implement it is b/c on the
-                        * unmap path (gnttab_unmap_refs) we have no means of
-                        * checking whether the page is !GNTMAP_contains_pte.
-                        *
-                        * That is without some extra data-structure to carry
-                        * the struct page, bool clear_pte, and list_head next
-                        * tuples and deal with allocation/delallocation, etc.
-                        *
-                        * The users of this API set the GNTMAP_contains_pte
-                        * flag so lets just return not supported until it
-                        * becomes neccessary to implement.
-                        */
-                       return -EOPNOTSUPP;
+                       mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
                }
-               ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
+               ret = m2p_add_override(mfn, pages[i], kmap_ops ?
+                                      &kmap_ops[i] : NULL);
                if (ret)
                        return ret;
        }
@@ -499,7 +774,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
 EXPORT_SYMBOL_GPL(gnttab_map_refs);
 
 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
-               struct page **pages, unsigned int count)
+                     struct page **pages, unsigned int count, bool clear_pte)
 {
        int i, ret;
 
@@ -511,7 +786,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
                return ret;
 
        for (i = 0; i < count; i++) {
-               ret = m2p_remove_override(pages[i], true /* clear the PTE */);
+               ret = m2p_remove_override(pages[i], clear_pte);
                if (ret)
                        return ret;
        }
@@ -520,6 +795,77 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
 }
 EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
 
+static unsigned nr_status_frames(unsigned nr_grant_frames)
+{
+       return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP;
+}
+
+static int gnttab_map_frames_v1(unsigned long *frames, unsigned int nr_gframes)
+{
+       int rc;
+
+       rc = arch_gnttab_map_shared(frames, nr_gframes,
+                                   gnttab_max_grant_frames(),
+                                   &gnttab_shared.addr);
+       BUG_ON(rc);
+
+       return 0;
+}
+
+static void gnttab_unmap_frames_v1(void)
+{
+       arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
+}
+
+static int gnttab_map_frames_v2(unsigned long *frames, unsigned int nr_gframes)
+{
+       uint64_t *sframes;
+       unsigned int nr_sframes;
+       struct gnttab_get_status_frames getframes;
+       int rc;
+
+       nr_sframes = nr_status_frames(nr_gframes);
+
+       /* No need for kzalloc as it is initialized in following hypercall
+        * GNTTABOP_get_status_frames.
+        */
+       sframes = kmalloc(nr_sframes  * sizeof(uint64_t), GFP_ATOMIC);
+       if (!sframes)
+               return -ENOMEM;
+
+       getframes.dom        = DOMID_SELF;
+       getframes.nr_frames  = nr_sframes;
+       set_xen_guest_handle(getframes.frame_list, sframes);
+
+       rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
+                                      &getframes, 1);
+       if (rc == -ENOSYS) {
+               kfree(sframes);
+               return -ENOSYS;
+       }
+
+       BUG_ON(rc || getframes.status);
+
+       rc = arch_gnttab_map_status(sframes, nr_sframes,
+                                   nr_status_frames(gnttab_max_grant_frames()),
+                                   &grstatus);
+       BUG_ON(rc);
+       kfree(sframes);
+
+       rc = arch_gnttab_map_shared(frames, nr_gframes,
+                                   gnttab_max_grant_frames(),
+                                   &gnttab_shared.addr);
+       BUG_ON(rc);
+
+       return 0;
+}
+
+static void gnttab_unmap_frames_v2(void)
+{
+       arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
+       arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
+}
+
 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
 {
        struct gnttab_setup_table setup;
@@ -551,6 +897,9 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
                return rc;
        }
 
+       /* No need for kzalloc as it is initialized in following hypercall
+        * GNTTABOP_setup_table.
+        */
        frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
        if (!frames)
                return -ENOMEM;
@@ -567,19 +916,65 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
 
        BUG_ON(rc || setup.status);
 
-       rc = arch_gnttab_map_shared(frames, nr_gframes, gnttab_max_grant_frames(),
-                                   &shared);
-       BUG_ON(rc);
+       rc = gnttab_interface->map_frames(frames, nr_gframes);
 
        kfree(frames);
 
-       return 0;
+       return rc;
+}
+
+static struct gnttab_ops gnttab_v1_ops = {
+       .map_frames                     = gnttab_map_frames_v1,
+       .unmap_frames                   = gnttab_unmap_frames_v1,
+       .update_entry                   = gnttab_update_entry_v1,
+       .end_foreign_access_ref         = gnttab_end_foreign_access_ref_v1,
+       .end_foreign_transfer_ref       = gnttab_end_foreign_transfer_ref_v1,
+       .query_foreign_access           = gnttab_query_foreign_access_v1,
+};
+
+static struct gnttab_ops gnttab_v2_ops = {
+       .map_frames                     = gnttab_map_frames_v2,
+       .unmap_frames                   = gnttab_unmap_frames_v2,
+       .update_entry                   = gnttab_update_entry_v2,
+       .end_foreign_access_ref         = gnttab_end_foreign_access_ref_v2,
+       .end_foreign_transfer_ref       = gnttab_end_foreign_transfer_ref_v2,
+       .query_foreign_access           = gnttab_query_foreign_access_v2,
+       .update_subpage_entry           = gnttab_update_subpage_entry_v2,
+       .update_trans_entry             = gnttab_update_trans_entry_v2,
+};
+
+static void gnttab_request_version(void)
+{
+       int rc;
+       struct gnttab_set_version gsv;
+
+       gsv.version = 2;
+       rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
+       if (rc == 0) {
+               grant_table_version = 2;
+               gnttab_interface = &gnttab_v2_ops;
+       } else if (grant_table_version == 2) {
+               /*
+                * If we've already used version 2 features,
+                * but then suddenly discover that they're not
+                * available (e.g. migrating to an older
+                * version of Xen), almost unbounded badness
+                * can happen.
+                */
+               panic("we need grant tables version 2, but only version 1 is available");
+       } else {
+               grant_table_version = 1;
+               gnttab_interface = &gnttab_v1_ops;
+       }
+       printk(KERN_INFO "Grant tables using version %d layout.\n",
+               grant_table_version);
 }
 
 int gnttab_resume(void)
 {
        unsigned int max_nr_gframes;
 
+       gnttab_request_version();
        max_nr_gframes = gnttab_max_grant_frames();
        if (max_nr_gframes < nr_grant_frames)
                return -ENOSYS;
@@ -587,9 +982,10 @@ int gnttab_resume(void)
        if (xen_pv_domain())
                return gnttab_map(0, nr_grant_frames - 1);
 
-       if (!shared) {
-               shared = ioremap(xen_hvm_resume_frames, PAGE_SIZE * max_nr_gframes);
-               if (shared == NULL) {
+       if (gnttab_shared.addr == NULL) {
+               gnttab_shared.addr = ioremap(xen_hvm_resume_frames,
+                                               PAGE_SIZE * max_nr_gframes);
+               if (gnttab_shared.addr == NULL) {
                        printk(KERN_WARNING
                                        "Failed to ioremap gnttab share frames!");
                        return -ENOMEM;
@@ -603,7 +999,7 @@ int gnttab_resume(void)
 
 int gnttab_suspend(void)
 {
-       arch_gnttab_unmap_shared(shared, nr_grant_frames);
+       gnttab_interface->unmap_frames();
        return 0;
 }
 
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
new file mode 100644 (file)
index 0000000..ccee0f1
--- /dev/null
@@ -0,0 +1,435 @@
+/******************************************************************************
+ * privcmd.c
+ *
+ * Interface to privileged domain-0 commands.
+ *
+ * Copyright (c) 2002-2004, K A Fraser, B Dragovic
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <linux/swap.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/xen.h>
+#include <xen/privcmd.h>
+#include <xen/interface/xen.h>
+#include <xen/features.h>
+#include <xen/page.h>
+#include <xen/xen-ops.h>
+
+#include "privcmd.h"
+
+MODULE_LICENSE("GPL");
+
+#ifndef HAVE_ARCH_PRIVCMD_MMAP
+static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
+#endif
+
+static long privcmd_ioctl_hypercall(void __user *udata)
+{
+       struct privcmd_hypercall hypercall;
+       long ret;
+
+       if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
+               return -EFAULT;
+
+       ret = privcmd_call(hypercall.op,
+                          hypercall.arg[0], hypercall.arg[1],
+                          hypercall.arg[2], hypercall.arg[3],
+                          hypercall.arg[4]);
+
+       return ret;
+}
+
+static void free_page_list(struct list_head *pages)
+{
+       struct page *p, *n;
+
+       list_for_each_entry_safe(p, n, pages, lru)
+               __free_page(p);
+
+       INIT_LIST_HEAD(pages);
+}
+
+/*
+ * Given an array of items in userspace, return a list of pages
+ * containing the data.  If copying fails, either because of memory
+ * allocation failure or a problem reading user memory, return an
+ * error code; its up to the caller to dispose of any partial list.
+ */
+static int gather_array(struct list_head *pagelist,
+                       unsigned nelem, size_t size,
+                       void __user *data)
+{
+       unsigned pageidx;
+       void *pagedata;
+       int ret;
+
+       if (size > PAGE_SIZE)
+               return 0;
+
+       pageidx = PAGE_SIZE;
+       pagedata = NULL;        /* quiet, gcc */
+       while (nelem--) {
+               if (pageidx > PAGE_SIZE-size) {
+                       struct page *page = alloc_page(GFP_KERNEL);
+
+                       ret = -ENOMEM;
+                       if (page == NULL)
+                               goto fail;
+
+                       pagedata = page_address(page);
+
+                       list_add_tail(&page->lru, pagelist);
+                       pageidx = 0;
+               }
+
+               ret = -EFAULT;
+               if (copy_from_user(pagedata + pageidx, data, size))
+                       goto fail;
+
+               data += size;
+               pageidx += size;
+       }
+
+       ret = 0;
+
+fail:
+       return ret;
+}
+
+/*
+ * Call function "fn" on each element of the array fragmented
+ * over a list of pages.
+ */
+static int traverse_pages(unsigned nelem, size_t size,
+                         struct list_head *pos,
+                         int (*fn)(void *data, void *state),
+                         void *state)
+{
+       void *pagedata;
+       unsigned pageidx;
+       int ret = 0;
+
+       BUG_ON(size > PAGE_SIZE);
+
+       pageidx = PAGE_SIZE;
+       pagedata = NULL;        /* hush, gcc */
+
+       while (nelem--) {
+               if (pageidx > PAGE_SIZE-size) {
+                       struct page *page;
+                       pos = pos->next;
+                       page = list_entry(pos, struct page, lru);
+                       pagedata = page_address(page);
+                       pageidx = 0;
+               }
+
+               ret = (*fn)(pagedata + pageidx, state);
+               if (ret)
+                       break;
+               pageidx += size;
+       }
+
+       return ret;
+}
+
+struct mmap_mfn_state {
+       unsigned long va;
+       struct vm_area_struct *vma;
+       domid_t domain;
+};
+
+static int mmap_mfn_range(void *data, void *state)
+{
+       struct privcmd_mmap_entry *msg = data;
+       struct mmap_mfn_state *st = state;
+       struct vm_area_struct *vma = st->vma;
+       int rc;
+
+       /* Do not allow range to wrap the address space. */
+       if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
+           ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
+               return -EINVAL;
+
+       /* Range chunks must be contiguous in va space. */
+       if ((msg->va != st->va) ||
+           ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
+               return -EINVAL;
+
+       rc = xen_remap_domain_mfn_range(vma,
+                                       msg->va & PAGE_MASK,
+                                       msg->mfn, msg->npages,
+                                       vma->vm_page_prot,
+                                       st->domain);
+       if (rc < 0)
+               return rc;
+
+       st->va += msg->npages << PAGE_SHIFT;
+
+       return 0;
+}
+
+static long privcmd_ioctl_mmap(void __user *udata)
+{
+       struct privcmd_mmap mmapcmd;
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       int rc;
+       LIST_HEAD(pagelist);
+       struct mmap_mfn_state state;
+
+       if (!xen_initial_domain())
+               return -EPERM;
+
+       if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
+               return -EFAULT;
+
+       rc = gather_array(&pagelist,
+                         mmapcmd.num, sizeof(struct privcmd_mmap_entry),
+                         mmapcmd.entry);
+
+       if (rc || list_empty(&pagelist))
+               goto out;
+
+       down_write(&mm->mmap_sem);
+
+       {
+               struct page *page = list_first_entry(&pagelist,
+                                                    struct page, lru);
+               struct privcmd_mmap_entry *msg = page_address(page);
+
+               vma = find_vma(mm, msg->va);
+               rc = -EINVAL;
+
+               if (!vma || (msg->va != vma->vm_start) ||
+                   !privcmd_enforce_singleshot_mapping(vma))
+                       goto out_up;
+       }
+
+       state.va = vma->vm_start;
+       state.vma = vma;
+       state.domain = mmapcmd.dom;
+
+       rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
+                           &pagelist,
+                           mmap_mfn_range, &state);
+
+
+out_up:
+       up_write(&mm->mmap_sem);
+
+out:
+       free_page_list(&pagelist);
+
+       return rc;
+}
+
+struct mmap_batch_state {
+       domid_t domain;
+       unsigned long va;
+       struct vm_area_struct *vma;
+       int err;
+
+       xen_pfn_t __user *user;
+};
+
+static int mmap_batch_fn(void *data, void *state)
+{
+       xen_pfn_t *mfnp = data;
+       struct mmap_batch_state *st = state;
+
+       if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
+                                      st->vma->vm_page_prot, st->domain) < 0) {
+               *mfnp |= 0xf0000000U;
+               st->err++;
+       }
+       st->va += PAGE_SIZE;
+
+       return 0;
+}
+
+static int mmap_return_errors(void *data, void *state)
+{
+       xen_pfn_t *mfnp = data;
+       struct mmap_batch_state *st = state;
+
+       return put_user(*mfnp, st->user++);
+}
+
+static struct vm_operations_struct privcmd_vm_ops;
+
+static long privcmd_ioctl_mmap_batch(void __user *udata)
+{
+       int ret;
+       struct privcmd_mmapbatch m;
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long nr_pages;
+       LIST_HEAD(pagelist);
+       struct mmap_batch_state state;
+
+       if (!xen_initial_domain())
+               return -EPERM;
+
+       if (copy_from_user(&m, udata, sizeof(m)))
+               return -EFAULT;
+
+       nr_pages = m.num;
+       if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
+               return -EINVAL;
+
+       ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t),
+                          m.arr);
+
+       if (ret || list_empty(&pagelist))
+               goto out;
+
+       down_write(&mm->mmap_sem);
+
+       vma = find_vma(mm, m.addr);
+       ret = -EINVAL;
+       if (!vma ||
+           vma->vm_ops != &privcmd_vm_ops ||
+           (m.addr != vma->vm_start) ||
+           ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
+           !privcmd_enforce_singleshot_mapping(vma)) {
+               up_write(&mm->mmap_sem);
+               goto out;
+       }
+
+       state.domain = m.dom;
+       state.vma = vma;
+       state.va = m.addr;
+       state.err = 0;
+
+       ret = traverse_pages(m.num, sizeof(xen_pfn_t),
+                            &pagelist, mmap_batch_fn, &state);
+
+       up_write(&mm->mmap_sem);
+
+       if (state.err > 0) {
+               state.user = m.arr;
+               ret = traverse_pages(m.num, sizeof(xen_pfn_t),
+                              &pagelist,
+                              mmap_return_errors, &state);
+       }
+
+out:
+       free_page_list(&pagelist);
+
+       return ret;
+}
+
+static long privcmd_ioctl(struct file *file,
+                         unsigned int cmd, unsigned long data)
+{
+       int ret = -ENOSYS;
+       void __user *udata = (void __user *) data;
+
+       switch (cmd) {
+       case IOCTL_PRIVCMD_HYPERCALL:
+               ret = privcmd_ioctl_hypercall(udata);
+               break;
+
+       case IOCTL_PRIVCMD_MMAP:
+               ret = privcmd_ioctl_mmap(udata);
+               break;
+
+       case IOCTL_PRIVCMD_MMAPBATCH:
+               ret = privcmd_ioctl_mmap_batch(udata);
+               break;
+
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
+              vma, vma->vm_start, vma->vm_end,
+              vmf->pgoff, vmf->virtual_address);
+
+       return VM_FAULT_SIGBUS;
+}
+
+static struct vm_operations_struct privcmd_vm_ops = {
+       .fault = privcmd_fault
+};
+
+static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       /* Unsupported for auto-translate guests. */
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return -ENOSYS;
+
+       /* DONTCOPY is essential for Xen because copy_page_range doesn't know
+        * how to recreate these mappings */
+       vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
+       vma->vm_ops = &privcmd_vm_ops;
+       vma->vm_private_data = NULL;
+
+       return 0;
+}
+
+static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
+{
+       return (xchg(&vma->vm_private_data, (void *)1) == NULL);
+}
+
+const struct file_operations xen_privcmd_fops = {
+       .owner = THIS_MODULE,
+       .unlocked_ioctl = privcmd_ioctl,
+       .mmap = privcmd_mmap,
+};
+EXPORT_SYMBOL_GPL(xen_privcmd_fops);
+
+static struct miscdevice privcmd_dev = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "xen/privcmd",
+       .fops = &xen_privcmd_fops,
+};
+
+static int __init privcmd_init(void)
+{
+       int err;
+
+       if (!xen_domain())
+               return -ENODEV;
+
+       err = misc_register(&privcmd_dev);
+       if (err != 0) {
+               printk(KERN_ERR "Could not register Xen privcmd device\n");
+               return err;
+       }
+       return 0;
+}
+
+static void __exit privcmd_exit(void)
+{
+       misc_deregister(&privcmd_dev);
+}
+
+module_init(privcmd_init);
+module_exit(privcmd_exit);
diff --git a/drivers/xen/privcmd.h b/drivers/xen/privcmd.h
new file mode 100644 (file)
index 0000000..14facae
--- /dev/null
@@ -0,0 +1,3 @@
+#include <linux/fs.h>
+
+extern const struct file_operations xen_privcmd_fops;
index 284798aaf8b1391fd8d4de7e43bff52e5721272d..19e6a2041371c0fe9a908c71ac5c83cdbad3c015 100644 (file)
@@ -153,7 +153,7 @@ void __init xen_swiotlb_init(int verbose)
        char *m = NULL;
        unsigned int repeat = 3;
 
-       nr_tbl = swioltb_nr_tbl();
+       nr_tbl = swiotlb_nr_tbl();
        if (nr_tbl)
                xen_io_tlb_nslabs = nr_tbl;
        else {
index 8f06e1ed028c9ab737ca61136754ae2c707d43e2..7944a17f5cbf5f073cefac62652a3f0ea53034b1 100644 (file)
@@ -99,6 +99,7 @@ static void pcistub_device_release(struct kref *kref)
        kfree(pci_get_drvdata(psdev->dev));
        pci_set_drvdata(psdev->dev, NULL);
 
+       psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
        pci_dev_put(psdev->dev);
 
        kfree(psdev);
@@ -234,6 +235,8 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
        xen_pcibk_config_free_dyn_fields(found_psdev->dev);
        xen_pcibk_config_reset_dev(found_psdev->dev);
 
+       xen_unregister_device_domain_owner(found_psdev->dev);
+
        spin_lock_irqsave(&found_psdev->lock, flags);
        found_psdev->pdev = NULL;
        spin_unlock_irqrestore(&found_psdev->lock, flags);
@@ -331,6 +334,7 @@ static int __devinit pcistub_init_device(struct pci_dev *dev)
        dev_dbg(&dev->dev, "reset device\n");
        xen_pcibk_reset_device(dev);
 
+       dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
        return 0;
 
 config_release:
index 075525945e36d406c15f2598e7c0b0e4f8e43385..8e1c44d8ab469d2e28d47a838cb003fafb79949c 100644 (file)
@@ -241,11 +241,10 @@ static int xen_pcibk_export_device(struct xen_pcibk_device *pdev,
                goto out;
 
        dev_dbg(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id);
-       dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
        if (xen_register_device_domain_owner(dev,
                                             pdev->xdev->otherend_id) != 0) {
-               dev_err(&dev->dev, "device has been assigned to another " \
-                       "domain! Over-writting the ownership, but beware.\n");
+               dev_err(&dev->dev, "Stealing ownership from dom%d.\n",
+                       xen_find_device_domain_owner(dev));
                xen_unregister_device_domain_owner(dev);
                xen_register_device_domain_owner(dev, pdev->xdev->otherend_id);
        }
@@ -281,7 +280,6 @@ static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev,
        }
 
        dev_dbg(&dev->dev, "unregistering for %d\n", pdev->xdev->otherend_id);
-       dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
        xen_unregister_device_domain_owner(dev);
 
        xen_pcibk_release_pci_dev(pdev, dev);
@@ -707,19 +705,16 @@ static int xen_pcibk_xenbus_remove(struct xenbus_device *dev)
        return 0;
 }
 
-static const struct xenbus_device_id xenpci_ids[] = {
+static const struct xenbus_device_id xen_pcibk_ids[] = {
        {"pci"},
        {""},
 };
 
-static struct xenbus_driver xenbus_xen_pcibk_driver = {
-       .name                   = DRV_NAME,
-       .owner                  = THIS_MODULE,
-       .ids                    = xenpci_ids,
+static DEFINE_XENBUS_DRIVER(xen_pcibk, DRV_NAME,
        .probe                  = xen_pcibk_xenbus_probe,
        .remove                 = xen_pcibk_xenbus_remove,
        .otherend_changed       = xen_pcibk_frontend_changed,
-};
+);
 
 const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend;
 
@@ -735,11 +730,11 @@ int __init xen_pcibk_xenbus_register(void)
        if (passthrough)
                xen_pcibk_backend = &xen_pcibk_passthrough_backend;
        pr_info(DRV_NAME ": backend is %s\n", xen_pcibk_backend->name);
-       return xenbus_register_backend(&xenbus_xen_pcibk_driver);
+       return xenbus_register_backend(&xen_pcibk_driver);
 }
 
 void __exit xen_pcibk_xenbus_unregister(void)
 {
        destroy_workqueue(xen_pcibk_wq);
-       xenbus_unregister_driver(&xenbus_xen_pcibk_driver);
+       xenbus_unregister_driver(&xen_pcibk_driver);
 }
index 8dca685358b4286f17b5f4d362288d83ee4ab3bb..31e2e9050c7a5b5f511bfe780c69ddb1590e358c 100644 (file)
@@ -1,4 +1,5 @@
 obj-y  += xenbus.o
+obj-y  += xenbus_dev_frontend.o
 
 xenbus-objs =
 xenbus-objs += xenbus_client.o
@@ -9,4 +10,5 @@ xenbus-objs += xenbus_probe.o
 xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
 xenbus-objs += $(xenbus-be-objs-y)
 
+obj-$(CONFIG_XEN_BACKEND) += xenbus_dev_backend.o
 obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o
index 1906125eab491bb384293c4424c27b8f0477e6f0..566d2adbd6ea4f349edf875566cfaeef66c48fa9 100644 (file)
 
 #include <linux/slab.h>
 #include <linux/types.h>
+#include <linux/spinlock.h>
 #include <linux/vmalloc.h>
 #include <linux/export.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/page.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/event_channel.h>
+#include <xen/balloon.h>
 #include <xen/events.h>
 #include <xen/grant_table.h>
 #include <xen/xenbus.h>
+#include <xen/xen.h>
+
+#include "xenbus_probe.h"
+
+struct xenbus_map_node {
+       struct list_head next;
+       union {
+               struct vm_struct *area; /* PV */
+               struct page *page;     /* HVM */
+       };
+       grant_handle_t handle;
+};
+
+static DEFINE_SPINLOCK(xenbus_valloc_lock);
+static LIST_HEAD(xenbus_valloc_pages);
+
+struct xenbus_ring_ops {
+       int (*map)(struct xenbus_device *dev, int gnt, void **vaddr);
+       int (*unmap)(struct xenbus_device *dev, void *vaddr);
+};
+
+static const struct xenbus_ring_ops *ring_ops __read_mostly;
 
 const char *xenbus_strstate(enum xenbus_state state)
 {
@@ -435,20 +459,34 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
  * XenbusStateClosing and the error message will be saved in XenStore.
  */
 int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
+{
+       return ring_ops->map(dev, gnt_ref, vaddr);
+}
+EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
+
+static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
+                                    int gnt_ref, void **vaddr)
 {
        struct gnttab_map_grant_ref op = {
                .flags = GNTMAP_host_map | GNTMAP_contains_pte,
                .ref   = gnt_ref,
                .dom   = dev->otherend_id,
        };
+       struct xenbus_map_node *node;
        struct vm_struct *area;
        pte_t *pte;
 
        *vaddr = NULL;
 
+       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       if (!node)
+               return -ENOMEM;
+
        area = alloc_vm_area(PAGE_SIZE, &pte);
-       if (!area)
+       if (!area) {
+               kfree(node);
                return -ENOMEM;
+       }
 
        op.host_addr = arbitrary_virt_to_machine(pte).maddr;
 
@@ -457,19 +495,59 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
 
        if (op.status != GNTST_okay) {
                free_vm_area(area);
+               kfree(node);
                xenbus_dev_fatal(dev, op.status,
                                 "mapping in shared page %d from domain %d",
                                 gnt_ref, dev->otherend_id);
                return op.status;
        }
 
-       /* Stuff the handle in an unused field */
-       area->phys_addr = (unsigned long)op.handle;
+       node->handle = op.handle;
+       node->area = area;
+
+       spin_lock(&xenbus_valloc_lock);
+       list_add(&node->next, &xenbus_valloc_pages);
+       spin_unlock(&xenbus_valloc_lock);
 
        *vaddr = area->addr;
        return 0;
 }
-EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
+
+static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
+                                     int gnt_ref, void **vaddr)
+{
+       struct xenbus_map_node *node;
+       int err;
+       void *addr;
+
+       *vaddr = NULL;
+
+       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       if (!node)
+               return -ENOMEM;
+
+       err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */);
+       if (err)
+               goto out_err;
+
+       addr = pfn_to_kaddr(page_to_pfn(node->page));
+
+       err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
+       if (err)
+               goto out_err;
+
+       spin_lock(&xenbus_valloc_lock);
+       list_add(&node->next, &xenbus_valloc_pages);
+       spin_unlock(&xenbus_valloc_lock);
+
+       *vaddr = addr;
+       return 0;
+
+ out_err:
+       free_xenballooned_pages(1, &node->page);
+       kfree(node);
+       return err;
+}
 
 
 /**
@@ -489,12 +567,10 @@ EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
 int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
                    grant_handle_t *handle, void *vaddr)
 {
-       struct gnttab_map_grant_ref op = {
-               .host_addr = (unsigned long)vaddr,
-               .flags     = GNTMAP_host_map,
-               .ref       = gnt_ref,
-               .dom       = dev->otherend_id,
-       };
+       struct gnttab_map_grant_ref op;
+
+       gnttab_set_map_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, gnt_ref,
+                         dev->otherend_id);
 
        if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
                BUG();
@@ -525,32 +601,36 @@ EXPORT_SYMBOL_GPL(xenbus_map_ring);
  */
 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
 {
-       struct vm_struct *area;
+       return ring_ops->unmap(dev, vaddr);
+}
+EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
+
+static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
+{
+       struct xenbus_map_node *node;
        struct gnttab_unmap_grant_ref op = {
                .host_addr = (unsigned long)vaddr,
        };
        unsigned int level;
 
-       /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
-        * method so that we don't have to muck with vmalloc internals here.
-        * We could force the user to hang on to their struct vm_struct from
-        * xenbus_map_ring_valloc, but these 6 lines considerably simplify
-        * this API.
-        */
-       read_lock(&vmlist_lock);
-       for (area = vmlist; area != NULL; area = area->next) {
-               if (area->addr == vaddr)
-                       break;
+       spin_lock(&xenbus_valloc_lock);
+       list_for_each_entry(node, &xenbus_valloc_pages, next) {
+               if (node->area->addr == vaddr) {
+                       list_del(&node->next);
+                       goto found;
+               }
        }
-       read_unlock(&vmlist_lock);
+       node = NULL;
+ found:
+       spin_unlock(&xenbus_valloc_lock);
 
-       if (!area) {
+       if (!node) {
                xenbus_dev_error(dev, -ENOENT,
                                 "can't find mapped virtual address %p", vaddr);
                return GNTST_bad_virt_addr;
        }
 
-       op.handle = (grant_handle_t)area->phys_addr;
+       op.handle = node->handle;
        op.host_addr = arbitrary_virt_to_machine(
                lookup_address((unsigned long)vaddr, &level)).maddr;
 
@@ -558,16 +638,50 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
                BUG();
 
        if (op.status == GNTST_okay)
-               free_vm_area(area);
+               free_vm_area(node->area);
        else
                xenbus_dev_error(dev, op.status,
                                 "unmapping page at handle %d error %d",
-                                (int16_t)area->phys_addr, op.status);
+                                node->handle, op.status);
 
+       kfree(node);
        return op.status;
 }
-EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
 
+static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
+{
+       int rv;
+       struct xenbus_map_node *node;
+       void *addr;
+
+       spin_lock(&xenbus_valloc_lock);
+       list_for_each_entry(node, &xenbus_valloc_pages, next) {
+               addr = pfn_to_kaddr(page_to_pfn(node->page));
+               if (addr == vaddr) {
+                       list_del(&node->next);
+                       goto found;
+               }
+       }
+       node = NULL;
+ found:
+       spin_unlock(&xenbus_valloc_lock);
+
+       if (!node) {
+               xenbus_dev_error(dev, -ENOENT,
+                                "can't find mapped virtual address %p", vaddr);
+               return GNTST_bad_virt_addr;
+       }
+
+       rv = xenbus_unmap_ring(dev, node->handle, addr);
+
+       if (!rv)
+               free_xenballooned_pages(1, &node->page);
+       else
+               WARN(1, "Leaking %p\n", vaddr);
+
+       kfree(node);
+       return rv;
+}
 
 /**
  * xenbus_unmap_ring
@@ -582,10 +696,9 @@ EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
 int xenbus_unmap_ring(struct xenbus_device *dev,
                      grant_handle_t handle, void *vaddr)
 {
-       struct gnttab_unmap_grant_ref op = {
-               .host_addr = (unsigned long)vaddr,
-               .handle    = handle,
-       };
+       struct gnttab_unmap_grant_ref op;
+
+       gnttab_set_unmap_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, handle);
 
        if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
                BUG();
@@ -617,3 +730,21 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
        return result;
 }
 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
+
+static const struct xenbus_ring_ops ring_ops_pv = {
+       .map = xenbus_map_ring_valloc_pv,
+       .unmap = xenbus_unmap_ring_vfree_pv,
+};
+
+static const struct xenbus_ring_ops ring_ops_hvm = {
+       .map = xenbus_map_ring_valloc_hvm,
+       .unmap = xenbus_unmap_ring_vfree_hvm,
+};
+
+void __init xenbus_ring_ops_init(void)
+{
+       if (xen_pv_domain())
+               ring_ops = &ring_ops_pv;
+       else
+               ring_ops = &ring_ops_hvm;
+}
index c21db7513736a90c399874f3f94524253d25181a..6e42800fa499bc6a303efabd300e763129d9888e 100644 (file)
@@ -31,6 +31,8 @@
 #ifndef _XENBUS_COMMS_H
 #define _XENBUS_COMMS_H
 
+#include <linux/fs.h>
+
 int xs_init(void);
 int xb_init_comms(void);
 
@@ -43,4 +45,6 @@ int xs_input_avail(void);
 extern struct xenstore_domain_interface *xen_store_interface;
 extern int xen_store_evtchn;
 
+extern const struct file_operations xen_xenbus_fops;
+
 #endif /* _XENBUS_COMMS_H */
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c
new file mode 100644 (file)
index 0000000..3d3be78
--- /dev/null
@@ -0,0 +1,90 @@
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/capability.h>
+
+#include <xen/xen.h>
+#include <xen/page.h>
+#include <xen/xenbus_dev.h>
+
+#include "xenbus_comms.h"
+
+MODULE_LICENSE("GPL");
+
+static int xenbus_backend_open(struct inode *inode, struct file *filp)
+{
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       return nonseekable_open(inode, filp);
+}
+
+static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data)
+{
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       switch (cmd) {
+               case IOCTL_XENBUS_BACKEND_EVTCHN:
+                       if (xen_store_evtchn > 0)
+                               return xen_store_evtchn;
+                       return -ENODEV;
+
+               default:
+                       return -ENOTTY;
+       }
+}
+
+static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       size_t size = vma->vm_end - vma->vm_start;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
+               return -EINVAL;
+
+       if (remap_pfn_range(vma, vma->vm_start,
+                           virt_to_pfn(xen_store_interface),
+                           size, vma->vm_page_prot))
+               return -EAGAIN;
+
+       return 0;
+}
+
+const struct file_operations xenbus_backend_fops = {
+       .open = xenbus_backend_open,
+       .mmap = xenbus_backend_mmap,
+       .unlocked_ioctl = xenbus_backend_ioctl,
+};
+
+static struct miscdevice xenbus_backend_dev = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "xen/xenbus_backend",
+       .fops = &xenbus_backend_fops,
+};
+
+static int __init xenbus_backend_init(void)
+{
+       int err;
+
+       if (!xen_initial_domain())
+               return -ENODEV;
+
+       err = misc_register(&xenbus_backend_dev);
+       if (err)
+               printk(KERN_ERR "Could not register xenbus backend device\n");
+       return err;
+}
+
+static void __exit xenbus_backend_exit(void)
+{
+       misc_deregister(&xenbus_backend_dev);
+}
+
+module_init(xenbus_backend_init);
+module_exit(xenbus_backend_exit);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
new file mode 100644 (file)
index 0000000..527dc2a
--- /dev/null
@@ -0,0 +1,625 @@
+/*
+ * Driver giving user-space access to the kernel's xenbus connection
+ * to xenstore.
+ *
+ * Copyright (c) 2005, Christian Limpach
+ * Copyright (c) 2005, Rusty Russell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Changes:
+ * 2008-10-07  Alex Zeffertt    Replaced /proc/xen/xenbus with xenfs filesystem
+ *                              and /proc/xen compatibility mount point.
+ *                              Turned xenfs into a loadable module.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/uio.h>
+#include <linux/notifier.h>
+#include <linux/wait.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+
+#include "xenbus_comms.h"
+
+#include <xen/xenbus.h>
+#include <xen/xen.h>
+#include <asm/xen/hypervisor.h>
+
+MODULE_LICENSE("GPL");
+
+/*
+ * An element of a list of outstanding transactions, for which we're
+ * still waiting a reply.
+ */
+struct xenbus_transaction_holder {
+       struct list_head list;
+       struct xenbus_transaction handle;
+};
+
+/*
+ * A buffer of data on the queue.
+ */
+struct read_buffer {
+       struct list_head list;
+       unsigned int cons;
+       unsigned int len;
+       char msg[];
+};
+
+struct xenbus_file_priv {
+       /*
+        * msgbuffer_mutex is held while partial requests are built up
+        * and complete requests are acted on.  It therefore protects
+        * the "transactions" and "watches" lists, and the partial
+        * request length and buffer.
+        *
+        * reply_mutex protects the reply being built up to return to
+        * usermode.  It nests inside msgbuffer_mutex but may be held
+        * alone during a watch callback.
+        */
+       struct mutex msgbuffer_mutex;
+
+       /* In-progress transactions */
+       struct list_head transactions;
+
+       /* Active watches. */
+       struct list_head watches;
+
+       /* Partial request. */
+       unsigned int len;
+       union {
+               struct xsd_sockmsg msg;
+               char buffer[XENSTORE_PAYLOAD_MAX];
+       } u;
+
+       /* Response queue. */
+       struct mutex reply_mutex;
+       struct list_head read_buffers;
+       wait_queue_head_t read_waitq;
+
+};
+
+/* Read out any raw xenbus messages queued up. */
+static ssize_t xenbus_file_read(struct file *filp,
+                              char __user *ubuf,
+                              size_t len, loff_t *ppos)
+{
+       struct xenbus_file_priv *u = filp->private_data;
+       struct read_buffer *rb;
+       unsigned i;
+       int ret;
+
+       mutex_lock(&u->reply_mutex);
+again:
+       while (list_empty(&u->read_buffers)) {
+               mutex_unlock(&u->reply_mutex);
+               if (filp->f_flags & O_NONBLOCK)
+                       return -EAGAIN;
+
+               ret = wait_event_interruptible(u->read_waitq,
+                                              !list_empty(&u->read_buffers));
+               if (ret)
+                       return ret;
+               mutex_lock(&u->reply_mutex);
+       }
+
+       rb = list_entry(u->read_buffers.next, struct read_buffer, list);
+       i = 0;
+       while (i < len) {
+               unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
+
+               ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
+
+               i += sz - ret;
+               rb->cons += sz - ret;
+
+               if (ret != 0) {
+                       if (i == 0)
+                               i = -EFAULT;
+                       goto out;
+               }
+
+               /* Clear out buffer if it has been consumed */
+               if (rb->cons == rb->len) {
+                       list_del(&rb->list);
+                       kfree(rb);
+                       if (list_empty(&u->read_buffers))
+                               break;
+                       rb = list_entry(u->read_buffers.next,
+                                       struct read_buffer, list);
+               }
+       }
+       if (i == 0)
+               goto again;
+
+out:
+       mutex_unlock(&u->reply_mutex);
+       return i;
+}
+
+/*
+ * Add a buffer to the queue.  Caller must hold the appropriate lock
+ * if the queue is not local.  (Commonly the caller will build up
+ * multiple queued buffers on a temporary local list, and then add it
+ * to the appropriate list under lock once all the buffers have een
+ * successfully allocated.)
+ */
+static int queue_reply(struct list_head *queue, const void *data, size_t len)
+{
+       struct read_buffer *rb;
+
+       if (len == 0)
+               return 0;
+
+       rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
+       if (rb == NULL)
+               return -ENOMEM;
+
+       rb->cons = 0;
+       rb->len = len;
+
+       memcpy(rb->msg, data, len);
+
+       list_add_tail(&rb->list, queue);
+       return 0;
+}
+
+/*
+ * Free all the read_buffer s on a list.
+ * Caller must have sole reference to list.
+ */
+static void queue_cleanup(struct list_head *list)
+{
+       struct read_buffer *rb;
+
+       while (!list_empty(list)) {
+               rb = list_entry(list->next, struct read_buffer, list);
+               list_del(list->next);
+               kfree(rb);
+       }
+}
+
+struct watch_adapter {
+       struct list_head list;
+       struct xenbus_watch watch;
+       struct xenbus_file_priv *dev_data;
+       char *token;
+};
+
+static void free_watch_adapter(struct watch_adapter *watch)
+{
+       kfree(watch->watch.node);
+       kfree(watch->token);
+       kfree(watch);
+}
+
+static struct watch_adapter *alloc_watch_adapter(const char *path,
+                                                const char *token)
+{
+       struct watch_adapter *watch;
+
+       watch = kzalloc(sizeof(*watch), GFP_KERNEL);
+       if (watch == NULL)
+               goto out_fail;
+
+       watch->watch.node = kstrdup(path, GFP_KERNEL);
+       if (watch->watch.node == NULL)
+               goto out_free;
+
+       watch->token = kstrdup(token, GFP_KERNEL);
+       if (watch->token == NULL)
+               goto out_free;
+
+       return watch;
+
+out_free:
+       free_watch_adapter(watch);
+
+out_fail:
+       return NULL;
+}
+
+static void watch_fired(struct xenbus_watch *watch,
+                       const char **vec,
+                       unsigned int len)
+{
+       struct watch_adapter *adap;
+       struct xsd_sockmsg hdr;
+       const char *path, *token;
+       int path_len, tok_len, body_len, data_len = 0;
+       int ret;
+       LIST_HEAD(staging_q);
+
+       adap = container_of(watch, struct watch_adapter, watch);
+
+       path = vec[XS_WATCH_PATH];
+       token = adap->token;
+
+       path_len = strlen(path) + 1;
+       tok_len = strlen(token) + 1;
+       if (len > 2)
+               data_len = vec[len] - vec[2] + 1;
+       body_len = path_len + tok_len + data_len;
+
+       hdr.type = XS_WATCH_EVENT;
+       hdr.len = body_len;
+
+       mutex_lock(&adap->dev_data->reply_mutex);
+
+       ret = queue_reply(&staging_q, &hdr, sizeof(hdr));
+       if (!ret)
+               ret = queue_reply(&staging_q, path, path_len);
+       if (!ret)
+               ret = queue_reply(&staging_q, token, tok_len);
+       if (!ret && len > 2)
+               ret = queue_reply(&staging_q, vec[2], data_len);
+
+       if (!ret) {
+               /* success: pass reply list onto watcher */
+               list_splice_tail(&staging_q, &adap->dev_data->read_buffers);
+               wake_up(&adap->dev_data->read_waitq);
+       } else
+               queue_cleanup(&staging_q);
+
+       mutex_unlock(&adap->dev_data->reply_mutex);
+}
+
+static int xenbus_write_transaction(unsigned msg_type,
+                                   struct xenbus_file_priv *u)
+{
+       int rc;
+       void *reply;
+       struct xenbus_transaction_holder *trans = NULL;
+       LIST_HEAD(staging_q);
+
+       if (msg_type == XS_TRANSACTION_START) {
+               trans = kmalloc(sizeof(*trans), GFP_KERNEL);
+               if (!trans) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+       }
+
+       reply = xenbus_dev_request_and_reply(&u->u.msg);
+       if (IS_ERR(reply)) {
+               kfree(trans);
+               rc = PTR_ERR(reply);
+               goto out;
+       }
+
+       if (msg_type == XS_TRANSACTION_START) {
+               trans->handle.id = simple_strtoul(reply, NULL, 0);
+
+               list_add(&trans->list, &u->transactions);
+       } else if (msg_type == XS_TRANSACTION_END) {
+               list_for_each_entry(trans, &u->transactions, list)
+                       if (trans->handle.id == u->u.msg.tx_id)
+                               break;
+               BUG_ON(&trans->list == &u->transactions);
+               list_del(&trans->list);
+
+               kfree(trans);
+       }
+
+       mutex_lock(&u->reply_mutex);
+       rc = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg));
+       if (!rc)
+               rc = queue_reply(&staging_q, reply, u->u.msg.len);
+       if (!rc) {
+               list_splice_tail(&staging_q, &u->read_buffers);
+               wake_up(&u->read_waitq);
+       } else {
+               queue_cleanup(&staging_q);
+       }
+       mutex_unlock(&u->reply_mutex);
+
+       kfree(reply);
+
+out:
+       return rc;
+}
+
+static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
+{
+       struct watch_adapter *watch, *tmp_watch;
+       char *path, *token;
+       int err, rc;
+       LIST_HEAD(staging_q);
+
+       path = u->u.buffer + sizeof(u->u.msg);
+       token = memchr(path, 0, u->u.msg.len);
+       if (token == NULL) {
+               rc = -EILSEQ;
+               goto out;
+       }
+       token++;
+
+       if (msg_type == XS_WATCH) {
+               watch = alloc_watch_adapter(path, token);
+               if (watch == NULL) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+
+               watch->watch.callback = watch_fired;
+               watch->dev_data = u;
+
+               err = register_xenbus_watch(&watch->watch);
+               if (err) {
+                       free_watch_adapter(watch);
+                       rc = err;
+                       goto out;
+               }
+               list_add(&watch->list, &u->watches);
+       } else {
+               list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
+                       if (!strcmp(watch->token, token) &&
+                           !strcmp(watch->watch.node, path)) {
+                               unregister_xenbus_watch(&watch->watch);
+                               list_del(&watch->list);
+                               free_watch_adapter(watch);
+                               break;
+                       }
+               }
+       }
+
+       /* Success.  Synthesize a reply to say all is OK. */
+       {
+               struct {
+                       struct xsd_sockmsg hdr;
+                       char body[3];
+               } __packed reply = {
+                       {
+                               .type = msg_type,
+                               .len = sizeof(reply.body)
+                       },
+                       "OK"
+               };
+
+               mutex_lock(&u->reply_mutex);
+               rc = queue_reply(&u->read_buffers, &reply, sizeof(reply));
+               wake_up(&u->read_waitq);
+               mutex_unlock(&u->reply_mutex);
+       }
+
+out:
+       return rc;
+}
+
+static ssize_t xenbus_file_write(struct file *filp,
+                               const char __user *ubuf,
+                               size_t len, loff_t *ppos)
+{
+       struct xenbus_file_priv *u = filp->private_data;
+       uint32_t msg_type;
+       int rc = len;
+       int ret;
+       LIST_HEAD(staging_q);
+
+       /*
+        * We're expecting usermode to be writing properly formed
+        * xenbus messages.  If they write an incomplete message we
+        * buffer it up.  Once it is complete, we act on it.
+        */
+
+       /*
+        * Make sure concurrent writers can't stomp all over each
+        * other's messages and make a mess of our partial message
+        * buffer.  We don't make any attemppt to stop multiple
+        * writers from making a mess of each other's incomplete
+        * messages; we're just trying to guarantee our own internal
+        * consistency and make sure that single writes are handled
+        * atomically.
+        */
+       mutex_lock(&u->msgbuffer_mutex);
+
+       /* Get this out of the way early to avoid confusion */
+       if (len == 0)
+               goto out;
+
+       /* Can't write a xenbus message larger we can buffer */
+       if ((len + u->len) > sizeof(u->u.buffer)) {
+               /* On error, dump existing buffer */
+               u->len = 0;
+               rc = -EINVAL;
+               goto out;
+       }
+
+       ret = copy_from_user(u->u.buffer + u->len, ubuf, len);
+
+       if (ret != 0) {
+               rc = -EFAULT;
+               goto out;
+       }
+
+       /* Deal with a partial copy. */
+       len -= ret;
+       rc = len;
+
+       u->len += len;
+
+       /* Return if we haven't got a full message yet */
+       if (u->len < sizeof(u->u.msg))
+               goto out;       /* not even the header yet */
+
+       /* If we're expecting a message that's larger than we can
+          possibly send, dump what we have and return an error. */
+       if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) {
+               rc = -E2BIG;
+               u->len = 0;
+               goto out;
+       }
+
+       if (u->len < (sizeof(u->u.msg) + u->u.msg.len))
+               goto out;       /* incomplete data portion */
+
+       /*
+        * OK, now we have a complete message.  Do something with it.
+        */
+
+       msg_type = u->u.msg.type;
+
+       switch (msg_type) {
+       case XS_WATCH:
+       case XS_UNWATCH:
+               /* (Un)Ask for some path to be watched for changes */
+               ret = xenbus_write_watch(msg_type, u);
+               break;
+
+       default:
+               /* Send out a transaction */
+               ret = xenbus_write_transaction(msg_type, u);
+               break;
+       }
+       if (ret != 0)
+               rc = ret;
+
+       /* Buffered message consumed */
+       u->len = 0;
+
+ out:
+       mutex_unlock(&u->msgbuffer_mutex);
+       return rc;
+}
+
+static int xenbus_file_open(struct inode *inode, struct file *filp)
+{
+       struct xenbus_file_priv *u;
+
+       if (xen_store_evtchn == 0)
+               return -ENOENT;
+
+       nonseekable_open(inode, filp);
+
+       u = kzalloc(sizeof(*u), GFP_KERNEL);
+       if (u == NULL)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&u->transactions);
+       INIT_LIST_HEAD(&u->watches);
+       INIT_LIST_HEAD(&u->read_buffers);
+       init_waitqueue_head(&u->read_waitq);
+
+       mutex_init(&u->reply_mutex);
+       mutex_init(&u->msgbuffer_mutex);
+
+       filp->private_data = u;
+
+       return 0;
+}
+
+static int xenbus_file_release(struct inode *inode, struct file *filp)
+{
+       struct xenbus_file_priv *u = filp->private_data;
+       struct xenbus_transaction_holder *trans, *tmp;
+       struct watch_adapter *watch, *tmp_watch;
+       struct read_buffer *rb, *tmp_rb;
+
+       /*
+        * No need for locking here because there are no other users,
+        * by definition.
+        */
+
+       list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
+               xenbus_transaction_end(trans->handle, 1);
+               list_del(&trans->list);
+               kfree(trans);
+       }
+
+       list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
+               unregister_xenbus_watch(&watch->watch);
+               list_del(&watch->list);
+               free_watch_adapter(watch);
+       }
+
+       list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
+               list_del(&rb->list);
+               kfree(rb);
+       }
+       kfree(u);
+
+       return 0;
+}
+
+static unsigned int xenbus_file_poll(struct file *file, poll_table *wait)
+{
+       struct xenbus_file_priv *u = file->private_data;
+
+       poll_wait(file, &u->read_waitq, wait);
+       if (!list_empty(&u->read_buffers))
+               return POLLIN | POLLRDNORM;
+       return 0;
+}
+
+const struct file_operations xen_xenbus_fops = {
+       .read = xenbus_file_read,
+       .write = xenbus_file_write,
+       .open = xenbus_file_open,
+       .release = xenbus_file_release,
+       .poll = xenbus_file_poll,
+       .llseek = no_llseek,
+};
+EXPORT_SYMBOL_GPL(xen_xenbus_fops);
+
+static struct miscdevice xenbus_dev = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "xen/xenbus",
+       .fops = &xen_xenbus_fops,
+};
+
+static int __init xenbus_init(void)
+{
+       int err;
+
+       if (!xen_domain())
+               return -ENODEV;
+
+       err = misc_register(&xenbus_dev);
+       if (err)
+               printk(KERN_ERR "Could not register xenbus frontend device\n");
+       return err;
+}
+
+static void __exit xenbus_exit(void)
+{
+       misc_deregister(&xenbus_dev);
+}
+
+module_init(xenbus_init);
+module_exit(xenbus_exit);
index 1b178c6e893796c2807d8a5a1992c9cc27a7b97a..3864967202b56aafc1fcc49fb58ea90fcda4f814 100644 (file)
@@ -291,14 +291,9 @@ void xenbus_dev_shutdown(struct device *_dev)
 EXPORT_SYMBOL_GPL(xenbus_dev_shutdown);
 
 int xenbus_register_driver_common(struct xenbus_driver *drv,
-                                 struct xen_bus_type *bus,
-                                 struct module *owner,
-                                 const char *mod_name)
+                                 struct xen_bus_type *bus)
 {
-       drv->driver.name = drv->name;
        drv->driver.bus = &bus->bus;
-       drv->driver.owner = owner;
-       drv->driver.mod_name = mod_name;
 
        return driver_register(&drv->driver);
 }
@@ -730,6 +725,8 @@ static int __init xenbus_init(void)
        if (!xen_domain())
                return -ENODEV;
 
+       xenbus_ring_ops_init();
+
        if (xen_hvm_domain()) {
                uint64_t v = 0;
                err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
index 9b1de4e34c64db2eae45798765bf81f45af1a84b..bb4f92ed87305452a3f2a5ad8a25293c75da352b 100644 (file)
@@ -53,9 +53,7 @@ extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
 extern int xenbus_dev_probe(struct device *_dev);
 extern int xenbus_dev_remove(struct device *_dev);
 extern int xenbus_register_driver_common(struct xenbus_driver *drv,
-                                        struct xen_bus_type *bus,
-                                        struct module *owner,
-                                        const char *mod_name);
+                                        struct xen_bus_type *bus);
 extern int xenbus_probe_node(struct xen_bus_type *bus,
                             const char *type,
                             const char *nodename);
@@ -76,4 +74,6 @@ extern void xenbus_otherend_changed(struct xenbus_watch *watch,
 extern int xenbus_read_otherend_details(struct xenbus_device *xendev,
                                        char *id_node, char *path_node);
 
+void xenbus_ring_ops_init(void);
+
 #endif
index c3c7cd195c115026e3c4c495f8f7d88a3526d1e9..257be37d90911d0721de9c134c0af93639519ad4 100644 (file)
@@ -232,15 +232,13 @@ int xenbus_dev_is_online(struct xenbus_device *dev)
 }
 EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
 
-int __xenbus_register_backend(struct xenbus_driver *drv,
-                             struct module *owner, const char *mod_name)
+int xenbus_register_backend(struct xenbus_driver *drv)
 {
        drv->read_otherend_details = read_frontend_details;
 
-       return xenbus_register_driver_common(drv, &xenbus_backend,
-                                            owner, mod_name);
+       return xenbus_register_driver_common(drv, &xenbus_backend);
 }
-EXPORT_SYMBOL_GPL(__xenbus_register_backend);
+EXPORT_SYMBOL_GPL(xenbus_register_backend);
 
 static int backend_probe_and_watch(struct notifier_block *notifier,
                                   unsigned long event,
index 2f73195512b4dbddad51176cee02e358f1f024b0..9c57819df51ae6fbd10de6c78c5f5916cee6a9f0 100644 (file)
@@ -230,15 +230,13 @@ static void wait_for_devices(struct xenbus_driver *xendrv)
                         print_device_status);
 }
 
-int __xenbus_register_frontend(struct xenbus_driver *drv,
-                              struct module *owner, const char *mod_name)
+int xenbus_register_frontend(struct xenbus_driver *drv)
 {
        int ret;
 
        drv->read_otherend_details = read_backend_details;
 
-       ret = xenbus_register_driver_common(drv, &xenbus_frontend,
-                                           owner, mod_name);
+       ret = xenbus_register_driver_common(drv, &xenbus_frontend);
        if (ret)
                return ret;
 
@@ -247,7 +245,7 @@ int __xenbus_register_frontend(struct xenbus_driver *drv,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+EXPORT_SYMBOL_GPL(xenbus_register_frontend);
 
 static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq);
 static int backend_state;
index ede860f921df847e3c94a2abda905949737c4e3d..d1c217b23a428ee5139727860243aec9a40ff8e8 100644 (file)
@@ -532,21 +532,18 @@ int xenbus_printf(struct xenbus_transaction t,
 {
        va_list ap;
        int ret;
-#define PRINTF_BUFFER_SIZE 4096
-       char *printf_buffer;
-
-       printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
-       if (printf_buffer == NULL)
-               return -ENOMEM;
+       char *buf;
 
        va_start(ap, fmt);
-       ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
+       buf = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap);
        va_end(ap);
 
-       BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
-       ret = xenbus_write(t, dir, node, printf_buffer);
+       if (!buf)
+               return -ENOMEM;
+
+       ret = xenbus_write(t, dir, node, buf);
 
-       kfree(printf_buffer);
+       kfree(buf);
 
        return ret;
 }
@@ -801,6 +798,12 @@ static int process_msg(void)
                goto out;
        }
 
+       if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) {
+               kfree(msg);
+               err = -EINVAL;
+               goto out;
+       }
+
        body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
        if (body == NULL) {
                kfree(msg);
index 4fde9440fe1f453aad9a4d6e4a98dceecbf6dbe4..b019865fcc56b779098c792bc7653cdc2537f2bc 100644 (file)
@@ -1,4 +1,4 @@
 obj-$(CONFIG_XENFS) += xenfs.o
 
-xenfs-y                          = super.o xenbus.o privcmd.o
+xenfs-y                          = super.o
 xenfs-$(CONFIG_XEN_DOM0) += xenstored.o
diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/xenfs/privcmd.c
deleted file mode 100644 (file)
index dbd3b16..0000000
+++ /dev/null
@@ -1,400 +0,0 @@
-/******************************************************************************
- * privcmd.c
- *
- * Interface to privileged domain-0 commands.
- *
- * Copyright (c) 2002-2004, K A Fraser, B Dragovic
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/uaccess.h>
-#include <linux/swap.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/seq_file.h>
-
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/tlb.h>
-#include <asm/xen/hypervisor.h>
-#include <asm/xen/hypercall.h>
-
-#include <xen/xen.h>
-#include <xen/privcmd.h>
-#include <xen/interface/xen.h>
-#include <xen/features.h>
-#include <xen/page.h>
-#include <xen/xen-ops.h>
-
-#ifndef HAVE_ARCH_PRIVCMD_MMAP
-static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
-#endif
-
-static long privcmd_ioctl_hypercall(void __user *udata)
-{
-       struct privcmd_hypercall hypercall;
-       long ret;
-
-       if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
-               return -EFAULT;
-
-       ret = privcmd_call(hypercall.op,
-                          hypercall.arg[0], hypercall.arg[1],
-                          hypercall.arg[2], hypercall.arg[3],
-                          hypercall.arg[4]);
-
-       return ret;
-}
-
-static void free_page_list(struct list_head *pages)
-{
-       struct page *p, *n;
-
-       list_for_each_entry_safe(p, n, pages, lru)
-               __free_page(p);
-
-       INIT_LIST_HEAD(pages);
-}
-
-/*
- * Given an array of items in userspace, return a list of pages
- * containing the data.  If copying fails, either because of memory
- * allocation failure or a problem reading user memory, return an
- * error code; its up to the caller to dispose of any partial list.
- */
-static int gather_array(struct list_head *pagelist,
-                       unsigned nelem, size_t size,
-                       void __user *data)
-{
-       unsigned pageidx;
-       void *pagedata;
-       int ret;
-
-       if (size > PAGE_SIZE)
-               return 0;
-
-       pageidx = PAGE_SIZE;
-       pagedata = NULL;        /* quiet, gcc */
-       while (nelem--) {
-               if (pageidx > PAGE_SIZE-size) {
-                       struct page *page = alloc_page(GFP_KERNEL);
-
-                       ret = -ENOMEM;
-                       if (page == NULL)
-                               goto fail;
-
-                       pagedata = page_address(page);
-
-                       list_add_tail(&page->lru, pagelist);
-                       pageidx = 0;
-               }
-
-               ret = -EFAULT;
-               if (copy_from_user(pagedata + pageidx, data, size))
-                       goto fail;
-
-               data += size;
-               pageidx += size;
-       }
-
-       ret = 0;
-
-fail:
-       return ret;
-}
-
-/*
- * Call function "fn" on each element of the array fragmented
- * over a list of pages.
- */
-static int traverse_pages(unsigned nelem, size_t size,
-                         struct list_head *pos,
-                         int (*fn)(void *data, void *state),
-                         void *state)
-{
-       void *pagedata;
-       unsigned pageidx;
-       int ret = 0;
-
-       BUG_ON(size > PAGE_SIZE);
-
-       pageidx = PAGE_SIZE;
-       pagedata = NULL;        /* hush, gcc */
-
-       while (nelem--) {
-               if (pageidx > PAGE_SIZE-size) {
-                       struct page *page;
-                       pos = pos->next;
-                       page = list_entry(pos, struct page, lru);
-                       pagedata = page_address(page);
-                       pageidx = 0;
-               }
-
-               ret = (*fn)(pagedata + pageidx, state);
-               if (ret)
-                       break;
-               pageidx += size;
-       }
-
-       return ret;
-}
-
-struct mmap_mfn_state {
-       unsigned long va;
-       struct vm_area_struct *vma;
-       domid_t domain;
-};
-
-static int mmap_mfn_range(void *data, void *state)
-{
-       struct privcmd_mmap_entry *msg = data;
-       struct mmap_mfn_state *st = state;
-       struct vm_area_struct *vma = st->vma;
-       int rc;
-
-       /* Do not allow range to wrap the address space. */
-       if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
-           ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
-               return -EINVAL;
-
-       /* Range chunks must be contiguous in va space. */
-       if ((msg->va != st->va) ||
-           ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
-               return -EINVAL;
-
-       rc = xen_remap_domain_mfn_range(vma,
-                                       msg->va & PAGE_MASK,
-                                       msg->mfn, msg->npages,
-                                       vma->vm_page_prot,
-                                       st->domain);
-       if (rc < 0)
-               return rc;
-
-       st->va += msg->npages << PAGE_SHIFT;
-
-       return 0;
-}
-
-static long privcmd_ioctl_mmap(void __user *udata)
-{
-       struct privcmd_mmap mmapcmd;
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-       int rc;
-       LIST_HEAD(pagelist);
-       struct mmap_mfn_state state;
-
-       if (!xen_initial_domain())
-               return -EPERM;
-
-       if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
-               return -EFAULT;
-
-       rc = gather_array(&pagelist,
-                         mmapcmd.num, sizeof(struct privcmd_mmap_entry),
-                         mmapcmd.entry);
-
-       if (rc || list_empty(&pagelist))
-               goto out;
-
-       down_write(&mm->mmap_sem);
-
-       {
-               struct page *page = list_first_entry(&pagelist,
-                                                    struct page, lru);
-               struct privcmd_mmap_entry *msg = page_address(page);
-
-               vma = find_vma(mm, msg->va);
-               rc = -EINVAL;
-
-               if (!vma || (msg->va != vma->vm_start) ||
-                   !privcmd_enforce_singleshot_mapping(vma))
-                       goto out_up;
-       }
-
-       state.va = vma->vm_start;
-       state.vma = vma;
-       state.domain = mmapcmd.dom;
-
-       rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
-                           &pagelist,
-                           mmap_mfn_range, &state);
-
-
-out_up:
-       up_write(&mm->mmap_sem);
-
-out:
-       free_page_list(&pagelist);
-
-       return rc;
-}
-
-struct mmap_batch_state {
-       domid_t domain;
-       unsigned long va;
-       struct vm_area_struct *vma;
-       int err;
-
-       xen_pfn_t __user *user;
-};
-
-static int mmap_batch_fn(void *data, void *state)
-{
-       xen_pfn_t *mfnp = data;
-       struct mmap_batch_state *st = state;
-
-       if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
-                                      st->vma->vm_page_prot, st->domain) < 0) {
-               *mfnp |= 0xf0000000U;
-               st->err++;
-       }
-       st->va += PAGE_SIZE;
-
-       return 0;
-}
-
-static int mmap_return_errors(void *data, void *state)
-{
-       xen_pfn_t *mfnp = data;
-       struct mmap_batch_state *st = state;
-
-       return put_user(*mfnp, st->user++);
-}
-
-static struct vm_operations_struct privcmd_vm_ops;
-
-static long privcmd_ioctl_mmap_batch(void __user *udata)
-{
-       int ret;
-       struct privcmd_mmapbatch m;
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-       unsigned long nr_pages;
-       LIST_HEAD(pagelist);
-       struct mmap_batch_state state;
-
-       if (!xen_initial_domain())
-               return -EPERM;
-
-       if (copy_from_user(&m, udata, sizeof(m)))
-               return -EFAULT;
-
-       nr_pages = m.num;
-       if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
-               return -EINVAL;
-
-       ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t),
-                          m.arr);
-
-       if (ret || list_empty(&pagelist))
-               goto out;
-
-       down_write(&mm->mmap_sem);
-
-       vma = find_vma(mm, m.addr);
-       ret = -EINVAL;
-       if (!vma ||
-           vma->vm_ops != &privcmd_vm_ops ||
-           (m.addr != vma->vm_start) ||
-           ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
-           !privcmd_enforce_singleshot_mapping(vma)) {
-               up_write(&mm->mmap_sem);
-               goto out;
-       }
-
-       state.domain = m.dom;
-       state.vma = vma;
-       state.va = m.addr;
-       state.err = 0;
-
-       ret = traverse_pages(m.num, sizeof(xen_pfn_t),
-                            &pagelist, mmap_batch_fn, &state);
-
-       up_write(&mm->mmap_sem);
-
-       if (state.err > 0) {
-               state.user = m.arr;
-               ret = traverse_pages(m.num, sizeof(xen_pfn_t),
-                              &pagelist,
-                              mmap_return_errors, &state);
-       }
-
-out:
-       free_page_list(&pagelist);
-
-       return ret;
-}
-
-static long privcmd_ioctl(struct file *file,
-                         unsigned int cmd, unsigned long data)
-{
-       int ret = -ENOSYS;
-       void __user *udata = (void __user *) data;
-
-       switch (cmd) {
-       case IOCTL_PRIVCMD_HYPERCALL:
-               ret = privcmd_ioctl_hypercall(udata);
-               break;
-
-       case IOCTL_PRIVCMD_MMAP:
-               ret = privcmd_ioctl_mmap(udata);
-               break;
-
-       case IOCTL_PRIVCMD_MMAPBATCH:
-               ret = privcmd_ioctl_mmap_batch(udata);
-               break;
-
-       default:
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
-}
-
-#ifndef HAVE_ARCH_PRIVCMD_MMAP
-static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
-       printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
-              vma, vma->vm_start, vma->vm_end,
-              vmf->pgoff, vmf->virtual_address);
-
-       return VM_FAULT_SIGBUS;
-}
-
-static struct vm_operations_struct privcmd_vm_ops = {
-       .fault = privcmd_fault
-};
-
-static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
-{
-       /* Unsupported for auto-translate guests. */
-       if (xen_feature(XENFEAT_auto_translated_physmap))
-               return -ENOSYS;
-
-       /* DONTCOPY is essential for Xen because copy_page_range doesn't know
-        * how to recreate these mappings */
-       vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
-       vma->vm_ops = &privcmd_vm_ops;
-       vma->vm_private_data = NULL;
-
-       return 0;
-}
-
-static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
-{
-       return (xchg(&vma->vm_private_data, (void *)1) == NULL);
-}
-#endif
-
-const struct file_operations privcmd_file_ops = {
-       .unlocked_ioctl = privcmd_ioctl,
-       .mmap = privcmd_mmap,
-};
index 1aa3897198462112a3bc1db479986653cf0a716f..a84b53c01436334b68688dca58e75bd54462b023 100644 (file)
@@ -16,6 +16,8 @@
 #include <xen/xen.h>
 
 #include "xenfs.h"
+#include "../privcmd.h"
+#include "../xenbus/xenbus_comms.h"
 
 #include <asm/xen/hypervisor.h>
 
@@ -82,9 +84,9 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
 {
        static struct tree_descr xenfs_files[] = {
                [1] = {},
-               { "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR },
+               { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR },
                { "capabilities", &capabilities_file_ops, S_IRUGO },
-               { "privcmd", &privcmd_file_ops, S_IRUSR|S_IWUSR },
+               { "privcmd", &xen_privcmd_fops, S_IRUSR|S_IWUSR },
                {""},
        };
        int rc;
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c
deleted file mode 100644 (file)
index bbd000f..0000000
+++ /dev/null
@@ -1,593 +0,0 @@
-/*
- * Driver giving user-space access to the kernel's xenbus connection
- * to xenstore.
- *
- * Copyright (c) 2005, Christian Limpach
- * Copyright (c) 2005, Rusty Russell, IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation; or, when distributed
- * separately from the Linux kernel or incorporated into other
- * software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Changes:
- * 2008-10-07  Alex Zeffertt    Replaced /proc/xen/xenbus with xenfs filesystem
- *                              and /proc/xen compatibility mount point.
- *                              Turned xenfs into a loadable module.
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/uio.h>
-#include <linux/notifier.h>
-#include <linux/wait.h>
-#include <linux/fs.h>
-#include <linux/poll.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/mount.h>
-#include <linux/pagemap.h>
-#include <linux/uaccess.h>
-#include <linux/init.h>
-#include <linux/namei.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-
-#include "xenfs.h"
-#include "../xenbus/xenbus_comms.h"
-
-#include <xen/xenbus.h>
-#include <asm/xen/hypervisor.h>
-
-/*
- * An element of a list of outstanding transactions, for which we're
- * still waiting a reply.
- */
-struct xenbus_transaction_holder {
-       struct list_head list;
-       struct xenbus_transaction handle;
-};
-
-/*
- * A buffer of data on the queue.
- */
-struct read_buffer {
-       struct list_head list;
-       unsigned int cons;
-       unsigned int len;
-       char msg[];
-};
-
-struct xenbus_file_priv {
-       /*
-        * msgbuffer_mutex is held while partial requests are built up
-        * and complete requests are acted on.  It therefore protects
-        * the "transactions" and "watches" lists, and the partial
-        * request length and buffer.
-        *
-        * reply_mutex protects the reply being built up to return to
-        * usermode.  It nests inside msgbuffer_mutex but may be held
-        * alone during a watch callback.
-        */
-       struct mutex msgbuffer_mutex;
-
-       /* In-progress transactions */
-       struct list_head transactions;
-
-       /* Active watches. */
-       struct list_head watches;
-
-       /* Partial request. */
-       unsigned int len;
-       union {
-               struct xsd_sockmsg msg;
-               char buffer[PAGE_SIZE];
-       } u;
-
-       /* Response queue. */
-       struct mutex reply_mutex;
-       struct list_head read_buffers;
-       wait_queue_head_t read_waitq;
-
-};
-
-/* Read out any raw xenbus messages queued up. */
-static ssize_t xenbus_file_read(struct file *filp,
-                              char __user *ubuf,
-                              size_t len, loff_t *ppos)
-{
-       struct xenbus_file_priv *u = filp->private_data;
-       struct read_buffer *rb;
-       unsigned i;
-       int ret;
-
-       mutex_lock(&u->reply_mutex);
-again:
-       while (list_empty(&u->read_buffers)) {
-               mutex_unlock(&u->reply_mutex);
-               if (filp->f_flags & O_NONBLOCK)
-                       return -EAGAIN;
-
-               ret = wait_event_interruptible(u->read_waitq,
-                                              !list_empty(&u->read_buffers));
-               if (ret)
-                       return ret;
-               mutex_lock(&u->reply_mutex);
-       }
-
-       rb = list_entry(u->read_buffers.next, struct read_buffer, list);
-       i = 0;
-       while (i < len) {
-               unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
-
-               ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
-
-               i += sz - ret;
-               rb->cons += sz - ret;
-
-               if (ret != 0) {
-                       if (i == 0)
-                               i = -EFAULT;
-                       goto out;
-               }
-
-               /* Clear out buffer if it has been consumed */
-               if (rb->cons == rb->len) {
-                       list_del(&rb->list);
-                       kfree(rb);
-                       if (list_empty(&u->read_buffers))
-                               break;
-                       rb = list_entry(u->read_buffers.next,
-                                       struct read_buffer, list);
-               }
-       }
-       if (i == 0)
-               goto again;
-
-out:
-       mutex_unlock(&u->reply_mutex);
-       return i;
-}
-
-/*
- * Add a buffer to the queue.  Caller must hold the appropriate lock
- * if the queue is not local.  (Commonly the caller will build up
- * multiple queued buffers on a temporary local list, and then add it
- * to the appropriate list under lock once all the buffers have een
- * successfully allocated.)
- */
-static int queue_reply(struct list_head *queue, const void *data, size_t len)
-{
-       struct read_buffer *rb;
-
-       if (len == 0)
-               return 0;
-
-       rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
-       if (rb == NULL)
-               return -ENOMEM;
-
-       rb->cons = 0;
-       rb->len = len;
-
-       memcpy(rb->msg, data, len);
-
-       list_add_tail(&rb->list, queue);
-       return 0;
-}
-
-/*
- * Free all the read_buffer s on a list.
- * Caller must have sole reference to list.
- */
-static void queue_cleanup(struct list_head *list)
-{
-       struct read_buffer *rb;
-
-       while (!list_empty(list)) {
-               rb = list_entry(list->next, struct read_buffer, list);
-               list_del(list->next);
-               kfree(rb);
-       }
-}
-
-struct watch_adapter {
-       struct list_head list;
-       struct xenbus_watch watch;
-       struct xenbus_file_priv *dev_data;
-       char *token;
-};
-
-static void free_watch_adapter(struct watch_adapter *watch)
-{
-       kfree(watch->watch.node);
-       kfree(watch->token);
-       kfree(watch);
-}
-
-static struct watch_adapter *alloc_watch_adapter(const char *path,
-                                                const char *token)
-{
-       struct watch_adapter *watch;
-
-       watch = kzalloc(sizeof(*watch), GFP_KERNEL);
-       if (watch == NULL)
-               goto out_fail;
-
-       watch->watch.node = kstrdup(path, GFP_KERNEL);
-       if (watch->watch.node == NULL)
-               goto out_free;
-
-       watch->token = kstrdup(token, GFP_KERNEL);
-       if (watch->token == NULL)
-               goto out_free;
-
-       return watch;
-
-out_free:
-       free_watch_adapter(watch);
-
-out_fail:
-       return NULL;
-}
-
-static void watch_fired(struct xenbus_watch *watch,
-                       const char **vec,
-                       unsigned int len)
-{
-       struct watch_adapter *adap;
-       struct xsd_sockmsg hdr;
-       const char *path, *token;
-       int path_len, tok_len, body_len, data_len = 0;
-       int ret;
-       LIST_HEAD(staging_q);
-
-       adap = container_of(watch, struct watch_adapter, watch);
-
-       path = vec[XS_WATCH_PATH];
-       token = adap->token;
-
-       path_len = strlen(path) + 1;
-       tok_len = strlen(token) + 1;
-       if (len > 2)
-               data_len = vec[len] - vec[2] + 1;
-       body_len = path_len + tok_len + data_len;
-
-       hdr.type = XS_WATCH_EVENT;
-       hdr.len = body_len;
-
-       mutex_lock(&adap->dev_data->reply_mutex);
-
-       ret = queue_reply(&staging_q, &hdr, sizeof(hdr));
-       if (!ret)
-               ret = queue_reply(&staging_q, path, path_len);
-       if (!ret)
-               ret = queue_reply(&staging_q, token, tok_len);
-       if (!ret && len > 2)
-               ret = queue_reply(&staging_q, vec[2], data_len);
-
-       if (!ret) {
-               /* success: pass reply list onto watcher */
-               list_splice_tail(&staging_q, &adap->dev_data->read_buffers);
-               wake_up(&adap->dev_data->read_waitq);
-       } else
-               queue_cleanup(&staging_q);
-
-       mutex_unlock(&adap->dev_data->reply_mutex);
-}
-
-static int xenbus_write_transaction(unsigned msg_type,
-                                   struct xenbus_file_priv *u)
-{
-       int rc;
-       void *reply;
-       struct xenbus_transaction_holder *trans = NULL;
-       LIST_HEAD(staging_q);
-
-       if (msg_type == XS_TRANSACTION_START) {
-               trans = kmalloc(sizeof(*trans), GFP_KERNEL);
-               if (!trans) {
-                       rc = -ENOMEM;
-                       goto out;
-               }
-       }
-
-       reply = xenbus_dev_request_and_reply(&u->u.msg);
-       if (IS_ERR(reply)) {
-               kfree(trans);
-               rc = PTR_ERR(reply);
-               goto out;
-       }
-
-       if (msg_type == XS_TRANSACTION_START) {
-               trans->handle.id = simple_strtoul(reply, NULL, 0);
-
-               list_add(&trans->list, &u->transactions);
-       } else if (msg_type == XS_TRANSACTION_END) {
-               list_for_each_entry(trans, &u->transactions, list)
-                       if (trans->handle.id == u->u.msg.tx_id)
-                               break;
-               BUG_ON(&trans->list == &u->transactions);
-               list_del(&trans->list);
-
-               kfree(trans);
-       }
-
-       mutex_lock(&u->reply_mutex);
-       rc = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg));
-       if (!rc)
-               rc = queue_reply(&staging_q, reply, u->u.msg.len);
-       if (!rc) {
-               list_splice_tail(&staging_q, &u->read_buffers);
-               wake_up(&u->read_waitq);
-       } else {
-               queue_cleanup(&staging_q);
-       }
-       mutex_unlock(&u->reply_mutex);
-
-       kfree(reply);
-
-out:
-       return rc;
-}
-
-static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
-{
-       struct watch_adapter *watch, *tmp_watch;
-       char *path, *token;
-       int err, rc;
-       LIST_HEAD(staging_q);
-
-       path = u->u.buffer + sizeof(u->u.msg);
-       token = memchr(path, 0, u->u.msg.len);
-       if (token == NULL) {
-               rc = -EILSEQ;
-               goto out;
-       }
-       token++;
-
-       if (msg_type == XS_WATCH) {
-               watch = alloc_watch_adapter(path, token);
-               if (watch == NULL) {
-                       rc = -ENOMEM;
-                       goto out;
-               }
-
-               watch->watch.callback = watch_fired;
-               watch->dev_data = u;
-
-               err = register_xenbus_watch(&watch->watch);
-               if (err) {
-                       free_watch_adapter(watch);
-                       rc = err;
-                       goto out;
-               }
-               list_add(&watch->list, &u->watches);
-       } else {
-               list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
-                       if (!strcmp(watch->token, token) &&
-                           !strcmp(watch->watch.node, path)) {
-                               unregister_xenbus_watch(&watch->watch);
-                               list_del(&watch->list);
-                               free_watch_adapter(watch);
-                               break;
-                       }
-               }
-       }
-
-       /* Success.  Synthesize a reply to say all is OK. */
-       {
-               struct {
-                       struct xsd_sockmsg hdr;
-                       char body[3];
-               } __packed reply = {
-                       {
-                               .type = msg_type,
-                               .len = sizeof(reply.body)
-                       },
-                       "OK"
-               };
-
-               mutex_lock(&u->reply_mutex);
-               rc = queue_reply(&u->read_buffers, &reply, sizeof(reply));
-               wake_up(&u->read_waitq);
-               mutex_unlock(&u->reply_mutex);
-       }
-
-out:
-       return rc;
-}
-
-static ssize_t xenbus_file_write(struct file *filp,
-                               const char __user *ubuf,
-                               size_t len, loff_t *ppos)
-{
-       struct xenbus_file_priv *u = filp->private_data;
-       uint32_t msg_type;
-       int rc = len;
-       int ret;
-       LIST_HEAD(staging_q);
-
-       /*
-        * We're expecting usermode to be writing properly formed
-        * xenbus messages.  If they write an incomplete message we
-        * buffer it up.  Once it is complete, we act on it.
-        */
-
-       /*
-        * Make sure concurrent writers can't stomp all over each
-        * other's messages and make a mess of our partial message
-        * buffer.  We don't make any attemppt to stop multiple
-        * writers from making a mess of each other's incomplete
-        * messages; we're just trying to guarantee our own internal
-        * consistency and make sure that single writes are handled
-        * atomically.
-        */
-       mutex_lock(&u->msgbuffer_mutex);
-
-       /* Get this out of the way early to avoid confusion */
-       if (len == 0)
-               goto out;
-
-       /* Can't write a xenbus message larger we can buffer */
-       if ((len + u->len) > sizeof(u->u.buffer)) {
-               /* On error, dump existing buffer */
-               u->len = 0;
-               rc = -EINVAL;
-               goto out;
-       }
-
-       ret = copy_from_user(u->u.buffer + u->len, ubuf, len);
-
-       if (ret != 0) {
-               rc = -EFAULT;
-               goto out;
-       }
-
-       /* Deal with a partial copy. */
-       len -= ret;
-       rc = len;
-
-       u->len += len;
-
-       /* Return if we haven't got a full message yet */
-       if (u->len < sizeof(u->u.msg))
-               goto out;       /* not even the header yet */
-
-       /* If we're expecting a message that's larger than we can
-          possibly send, dump what we have and return an error. */
-       if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) {
-               rc = -E2BIG;
-               u->len = 0;
-               goto out;
-       }
-
-       if (u->len < (sizeof(u->u.msg) + u->u.msg.len))
-               goto out;       /* incomplete data portion */
-
-       /*
-        * OK, now we have a complete message.  Do something with it.
-        */
-
-       msg_type = u->u.msg.type;
-
-       switch (msg_type) {
-       case XS_WATCH:
-       case XS_UNWATCH:
-               /* (Un)Ask for some path to be watched for changes */
-               ret = xenbus_write_watch(msg_type, u);
-               break;
-
-       default:
-               /* Send out a transaction */
-               ret = xenbus_write_transaction(msg_type, u);
-               break;
-       }
-       if (ret != 0)
-               rc = ret;
-
-       /* Buffered message consumed */
-       u->len = 0;
-
- out:
-       mutex_unlock(&u->msgbuffer_mutex);
-       return rc;
-}
-
-static int xenbus_file_open(struct inode *inode, struct file *filp)
-{
-       struct xenbus_file_priv *u;
-
-       if (xen_store_evtchn == 0)
-               return -ENOENT;
-
-       nonseekable_open(inode, filp);
-
-       u = kzalloc(sizeof(*u), GFP_KERNEL);
-       if (u == NULL)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&u->transactions);
-       INIT_LIST_HEAD(&u->watches);
-       INIT_LIST_HEAD(&u->read_buffers);
-       init_waitqueue_head(&u->read_waitq);
-
-       mutex_init(&u->reply_mutex);
-       mutex_init(&u->msgbuffer_mutex);
-
-       filp->private_data = u;
-
-       return 0;
-}
-
-static int xenbus_file_release(struct inode *inode, struct file *filp)
-{
-       struct xenbus_file_priv *u = filp->private_data;
-       struct xenbus_transaction_holder *trans, *tmp;
-       struct watch_adapter *watch, *tmp_watch;
-       struct read_buffer *rb, *tmp_rb;
-
-       /*
-        * No need for locking here because there are no other users,
-        * by definition.
-        */
-
-       list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
-               xenbus_transaction_end(trans->handle, 1);
-               list_del(&trans->list);
-               kfree(trans);
-       }
-
-       list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
-               unregister_xenbus_watch(&watch->watch);
-               list_del(&watch->list);
-               free_watch_adapter(watch);
-       }
-
-       list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
-               list_del(&rb->list);
-               kfree(rb);
-       }
-       kfree(u);
-
-       return 0;
-}
-
-static unsigned int xenbus_file_poll(struct file *file, poll_table *wait)
-{
-       struct xenbus_file_priv *u = file->private_data;
-
-       poll_wait(file, &u->read_waitq, wait);
-       if (!list_empty(&u->read_buffers))
-               return POLLIN | POLLRDNORM;
-       return 0;
-}
-
-const struct file_operations xenbus_file_ops = {
-       .read = xenbus_file_read,
-       .write = xenbus_file_write,
-       .open = xenbus_file_open,
-       .release = xenbus_file_release,
-       .poll = xenbus_file_poll,
-       .llseek = no_llseek,
-};
index b68aa6200003575549e7a5844ae7b43b6bec135f..6b80c7779c0217bdddca45d977e5d823df150b5a 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef _XENFS_XENBUS_H
 #define _XENFS_XENBUS_H
 
-extern const struct file_operations xenbus_file_ops;
-extern const struct file_operations privcmd_file_ops;
 extern const struct file_operations xsd_kva_file_ops;
 extern const struct file_operations xsd_port_file_ops;
 
index 945aa5f02f9beb9786d8033b3d79d141071cdca0..a9ea73d6dcf311b7703af1225fb71698da2a4fc3 100644 (file)
@@ -62,8 +62,8 @@ static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data,
        uint16_t klen = 0;
 
        v9ses = (struct v9fs_session_info *)cookie_netfs_data;
-       P9_DPRINTK(P9_DEBUG_FSC, "session %p buf %p size %u", v9ses,
-                  buffer, bufmax);
+       p9_debug(P9_DEBUG_FSC, "session %p buf %p size %u\n",
+                v9ses, buffer, bufmax);
 
        if (v9ses->cachetag)
                klen = strlen(v9ses->cachetag);
@@ -72,7 +72,7 @@ static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data,
                return 0;
 
        memcpy(buffer, v9ses->cachetag, klen);
-       P9_DPRINTK(P9_DEBUG_FSC, "cache session tag %s", v9ses->cachetag);
+       p9_debug(P9_DEBUG_FSC, "cache session tag %s\n", v9ses->cachetag);
        return klen;
 }
 
@@ -91,14 +91,14 @@ void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
        v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
                                                &v9fs_cache_session_index_def,
                                                v9ses);
-       P9_DPRINTK(P9_DEBUG_FSC, "session %p get cookie %p", v9ses,
-                  v9ses->fscache);
+       p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
+                v9ses, v9ses->fscache);
 }
 
 void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
 {
-       P9_DPRINTK(P9_DEBUG_FSC, "session %p put cookie %p", v9ses,
-                  v9ses->fscache);
+       p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n",
+                v9ses, v9ses->fscache);
        fscache_relinquish_cookie(v9ses->fscache, 0);
        v9ses->fscache = NULL;
 }
@@ -109,8 +109,8 @@ static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data,
 {
        const struct v9fs_inode *v9inode = cookie_netfs_data;
        memcpy(buffer, &v9inode->qid.path, sizeof(v9inode->qid.path));
-       P9_DPRINTK(P9_DEBUG_FSC, "inode %p get key %llu", &v9inode->vfs_inode,
-                  v9inode->qid.path);
+       p9_debug(P9_DEBUG_FSC, "inode %p get key %llu\n",
+                &v9inode->vfs_inode, v9inode->qid.path);
        return sizeof(v9inode->qid.path);
 }
 
@@ -120,8 +120,8 @@ static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data,
        const struct v9fs_inode *v9inode = cookie_netfs_data;
        *size = i_size_read(&v9inode->vfs_inode);
 
-       P9_DPRINTK(P9_DEBUG_FSC, "inode %p get attr %llu", &v9inode->vfs_inode,
-                  *size);
+       p9_debug(P9_DEBUG_FSC, "inode %p get attr %llu\n",
+                &v9inode->vfs_inode, *size);
 }
 
 static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data,
@@ -129,8 +129,8 @@ static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data,
 {
        const struct v9fs_inode *v9inode = cookie_netfs_data;
        memcpy(buffer, &v9inode->qid.version, sizeof(v9inode->qid.version));
-       P9_DPRINTK(P9_DEBUG_FSC, "inode %p get aux %u", &v9inode->vfs_inode,
-                  v9inode->qid.version);
+       p9_debug(P9_DEBUG_FSC, "inode %p get aux %u\n",
+                &v9inode->vfs_inode, v9inode->qid.version);
        return sizeof(v9inode->qid.version);
 }
 
@@ -206,8 +206,8 @@ void v9fs_cache_inode_get_cookie(struct inode *inode)
                                                  &v9fs_cache_inode_index_def,
                                                  v9inode);
 
-       P9_DPRINTK(P9_DEBUG_FSC, "inode %p get cookie %p", inode,
-                  v9inode->fscache);
+       p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
+                inode, v9inode->fscache);
 }
 
 void v9fs_cache_inode_put_cookie(struct inode *inode)
@@ -216,8 +216,8 @@ void v9fs_cache_inode_put_cookie(struct inode *inode)
 
        if (!v9inode->fscache)
                return;
-       P9_DPRINTK(P9_DEBUG_FSC, "inode %p put cookie %p", inode,
-                  v9inode->fscache);
+       p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n",
+                inode, v9inode->fscache);
 
        fscache_relinquish_cookie(v9inode->fscache, 0);
        v9inode->fscache = NULL;
@@ -229,8 +229,8 @@ void v9fs_cache_inode_flush_cookie(struct inode *inode)
 
        if (!v9inode->fscache)
                return;
-       P9_DPRINTK(P9_DEBUG_FSC, "inode %p flush cookie %p", inode,
-                  v9inode->fscache);
+       p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n",
+                inode, v9inode->fscache);
 
        fscache_relinquish_cookie(v9inode->fscache, 1);
        v9inode->fscache = NULL;
@@ -272,8 +272,8 @@ void v9fs_cache_inode_reset_cookie(struct inode *inode)
        v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
                                                  &v9fs_cache_inode_index_def,
                                                  v9inode);
-       P9_DPRINTK(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p",
-                  inode, old, v9inode->fscache);
+       p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
+                inode, old, v9inode->fscache);
 
        spin_unlock(&v9inode->fscache_lock);
 }
@@ -323,7 +323,7 @@ int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
        int ret;
        const struct v9fs_inode *v9inode = V9FS_I(inode);
 
-       P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page);
+       p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
        if (!v9inode->fscache)
                return -ENOBUFS;
 
@@ -335,13 +335,13 @@ int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
        switch (ret) {
        case -ENOBUFS:
        case -ENODATA:
-               P9_DPRINTK(P9_DEBUG_FSC, "page/inode not in cache %d", ret);
+               p9_debug(P9_DEBUG_FSC, "page/inode not in cache %d\n", ret);
                return 1;
        case 0:
-               P9_DPRINTK(P9_DEBUG_FSC, "BIO submitted");
+               p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
                return ret;
        default:
-               P9_DPRINTK(P9_DEBUG_FSC, "ret %d", ret);
+               p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
                return ret;
        }
 }
@@ -361,7 +361,7 @@ int __v9fs_readpages_from_fscache(struct inode *inode,
        int ret;
        const struct v9fs_inode *v9inode = V9FS_I(inode);
 
-       P9_DPRINTK(P9_DEBUG_FSC, "inode %p pages %u", inode, *nr_pages);
+       p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages);
        if (!v9inode->fscache)
                return -ENOBUFS;
 
@@ -373,15 +373,15 @@ int __v9fs_readpages_from_fscache(struct inode *inode,
        switch (ret) {
        case -ENOBUFS:
        case -ENODATA:
-               P9_DPRINTK(P9_DEBUG_FSC, "pages/inodes not in cache %d", ret);
+               p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret);
                return 1;
        case 0:
                BUG_ON(!list_empty(pages));
                BUG_ON(*nr_pages != 0);
-               P9_DPRINTK(P9_DEBUG_FSC, "BIO submitted");
+               p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
                return ret;
        default:
-               P9_DPRINTK(P9_DEBUG_FSC, "ret %d", ret);
+               p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
                return ret;
        }
 }
@@ -396,9 +396,9 @@ void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
        int ret;
        const struct v9fs_inode *v9inode = V9FS_I(inode);
 
-       P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page);
+       p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
        ret = fscache_write_page(v9inode->fscache, page, GFP_KERNEL);
-       P9_DPRINTK(P9_DEBUG_FSC, "ret =  %d", ret);
+       p9_debug(P9_DEBUG_FSC, "ret =  %d\n", ret);
        if (ret != 0)
                v9fs_uncache_page(inode, page);
 }
@@ -409,7 +409,7 @@ void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
 void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
 {
        const struct v9fs_inode *v9inode = V9FS_I(inode);
-       P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page);
+       p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
        if (PageFsCache(page))
                fscache_wait_on_page_write(v9inode->fscache, page);
 }
index 85b67ffa2a43eb184250b1a167eae215deaa0399..da8eefbe830d9a5beb7304109cf4b254a089c555 100644 (file)
@@ -45,8 +45,8 @@ int v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid)
 {
        struct v9fs_dentry *dent;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "fid %d dentry %s\n",
-                                       fid->fid, dentry->d_name.name);
+       p9_debug(P9_DEBUG_VFS, "fid %d dentry %s\n",
+                fid->fid, dentry->d_name.name);
 
        dent = dentry->d_fsdata;
        if (!dent) {
@@ -79,8 +79,8 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, u32 uid, int any)
        struct v9fs_dentry *dent;
        struct p9_fid *fid, *ret;
 
-       P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p) uid %d any %d\n",
-               dentry->d_name.name, dentry, uid, any);
+       p9_debug(P9_DEBUG_VFS, " dentry: %s (%p) uid %d any %d\n",
+                dentry->d_name.name, dentry, uid, any);
        dent = (struct v9fs_dentry *) dentry->d_fsdata;
        ret = NULL;
        if (dent) {
index 2b78014a124a2522007d11e97eda94b3d82ab8af..1964f98e74bedf861d12950dda7f9382ec3f8093 100644 (file)
@@ -23,6 +23,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
@@ -85,15 +87,15 @@ static int get_cache_mode(char *s)
 
        if (!strcmp(s, "loose")) {
                version = CACHE_LOOSE;
-               P9_DPRINTK(P9_DEBUG_9P, "Cache mode: loose\n");
+               p9_debug(P9_DEBUG_9P, "Cache mode: loose\n");
        } else if (!strcmp(s, "fscache")) {
                version = CACHE_FSCACHE;
-               P9_DPRINTK(P9_DEBUG_9P, "Cache mode: fscache\n");
+               p9_debug(P9_DEBUG_9P, "Cache mode: fscache\n");
        } else if (!strcmp(s, "none")) {
                version = CACHE_NONE;
-               P9_DPRINTK(P9_DEBUG_9P, "Cache mode: none\n");
+               p9_debug(P9_DEBUG_9P, "Cache mode: none\n");
        } else
-               printk(KERN_INFO "9p: Unknown Cache mode %s.\n", s);
+               pr_info("Unknown Cache mode %s\n", s);
        return version;
 }
 
@@ -140,8 +142,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
                case Opt_debug:
                        r = match_int(&args[0], &option);
                        if (r < 0) {
-                               P9_DPRINTK(P9_DEBUG_ERROR,
-                                          "integer field, but no integer?\n");
+                               p9_debug(P9_DEBUG_ERROR,
+                                        "integer field, but no integer?\n");
                                ret = r;
                                continue;
                        }
@@ -154,8 +156,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
                case Opt_dfltuid:
                        r = match_int(&args[0], &option);
                        if (r < 0) {
-                               P9_DPRINTK(P9_DEBUG_ERROR,
-                                          "integer field, but no integer?\n");
+                               p9_debug(P9_DEBUG_ERROR,
+                                        "integer field, but no integer?\n");
                                ret = r;
                                continue;
                        }
@@ -164,8 +166,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
                case Opt_dfltgid:
                        r = match_int(&args[0], &option);
                        if (r < 0) {
-                               P9_DPRINTK(P9_DEBUG_ERROR,
-                                          "integer field, but no integer?\n");
+                               p9_debug(P9_DEBUG_ERROR,
+                                        "integer field, but no integer?\n");
                                ret = r;
                                continue;
                        }
@@ -174,8 +176,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
                case Opt_afid:
                        r = match_int(&args[0], &option);
                        if (r < 0) {
-                               P9_DPRINTK(P9_DEBUG_ERROR,
-                                          "integer field, but no integer?\n");
+                               p9_debug(P9_DEBUG_ERROR,
+                                        "integer field, but no integer?\n");
                                ret = r;
                                continue;
                        }
@@ -205,8 +207,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
                        s = match_strdup(&args[0]);
                        if (!s) {
                                ret = -ENOMEM;
-                               P9_DPRINTK(P9_DEBUG_ERROR,
-                                 "problem allocating copy of cache arg\n");
+                               p9_debug(P9_DEBUG_ERROR,
+                                        "problem allocating copy of cache arg\n");
                                goto free_and_return;
                        }
                        ret = get_cache_mode(s);
@@ -223,8 +225,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
                        s = match_strdup(&args[0]);
                        if (!s) {
                                ret = -ENOMEM;
-                               P9_DPRINTK(P9_DEBUG_ERROR,
-                                 "problem allocating copy of access arg\n");
+                               p9_debug(P9_DEBUG_ERROR,
+                                        "problem allocating copy of access arg\n");
                                goto free_and_return;
                        }
 
@@ -240,8 +242,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
                                v9ses->uid = simple_strtoul(s, &e, 10);
                                if (*e != '\0') {
                                        ret = -EINVAL;
-                                       printk(KERN_INFO "9p: Unknown access "
-                                                       "argument %s.\n", s);
+                                       pr_info("Unknown access argument %s\n",
+                                               s);
                                        kfree(s);
                                        goto free_and_return;
                                }
@@ -254,9 +256,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
 #ifdef CONFIG_9P_FS_POSIX_ACL
                        v9ses->flags |= V9FS_POSIX_ACL;
 #else
-                       P9_DPRINTK(P9_DEBUG_ERROR,
-                                       "Not defined CONFIG_9P_FS_POSIX_ACL. "
-                                       "Ignoring posixacl option\n");
+                       p9_debug(P9_DEBUG_ERROR,
+                                "Not defined CONFIG_9P_FS_POSIX_ACL. Ignoring posixacl option\n");
 #endif
                        break;
 
@@ -318,7 +319,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
        if (IS_ERR(v9ses->clnt)) {
                retval = PTR_ERR(v9ses->clnt);
                v9ses->clnt = NULL;
-               P9_DPRINTK(P9_DEBUG_ERROR, "problem initializing 9p client\n");
+               p9_debug(P9_DEBUG_ERROR, "problem initializing 9p client\n");
                goto error;
        }
 
@@ -371,7 +372,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
        if (IS_ERR(fid)) {
                retval = PTR_ERR(fid);
                fid = NULL;
-               P9_DPRINTK(P9_DEBUG_ERROR, "cannot attach\n");
+               p9_debug(P9_DEBUG_ERROR, "cannot attach\n");
                goto error;
        }
 
@@ -429,7 +430,7 @@ void v9fs_session_close(struct v9fs_session_info *v9ses)
  */
 
 void v9fs_session_cancel(struct v9fs_session_info *v9ses) {
-       P9_DPRINTK(P9_DEBUG_ERROR, "cancel session %p\n", v9ses);
+       p9_debug(P9_DEBUG_ERROR, "cancel session %p\n", v9ses);
        p9_client_disconnect(v9ses->clnt);
 }
 
@@ -442,7 +443,7 @@ void v9fs_session_cancel(struct v9fs_session_info *v9ses) {
 
 void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses)
 {
-       P9_DPRINTK(P9_DEBUG_ERROR, "begin cancel session %p\n", v9ses);
+       p9_debug(P9_DEBUG_ERROR, "begin cancel session %p\n", v9ses);
        p9_client_begin_disconnect(v9ses->clnt);
 }
 
@@ -591,23 +592,23 @@ static void v9fs_cache_unregister(void)
 static int __init init_v9fs(void)
 {
        int err;
-       printk(KERN_INFO "Installing v9fs 9p2000 file system support\n");
+       pr_info("Installing v9fs 9p2000 file system support\n");
        /* TODO: Setup list of registered trasnport modules */
        err = register_filesystem(&v9fs_fs_type);
        if (err < 0) {
-               printk(KERN_ERR "Failed to register filesystem\n");
+               pr_err("Failed to register filesystem\n");
                return err;
        }
 
        err = v9fs_cache_register();
        if (err < 0) {
-               printk(KERN_ERR "Failed to register v9fs for caching\n");
+               pr_err("Failed to register v9fs for caching\n");
                goto out_fs_unreg;
        }
 
        err = v9fs_sysfs_init();
        if (err < 0) {
-               printk(KERN_ERR "Failed to register with sysfs\n");
+               pr_err("Failed to register with sysfs\n");
                goto out_sysfs_cleanup;
        }
 
index 2524e4cbb8ea0801b88b63d315b921dfc9d442c1..0ad61c6a65a5b1964b373432f5e9bf9b031f6d38 100644 (file)
@@ -56,7 +56,7 @@ static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
        struct inode *inode;
 
        inode = page->mapping->host;
-       P9_DPRINTK(P9_DEBUG_VFS, "\n");
+       p9_debug(P9_DEBUG_VFS, "\n");
 
        BUG_ON(!PageLocked(page));
 
@@ -116,14 +116,14 @@ static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
        struct inode *inode;
 
        inode = mapping->host;
-       P9_DPRINTK(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, filp);
+       p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, filp);
 
        ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages);
        if (ret == 0)
                return ret;
 
        ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp);
-       P9_DPRINTK(P9_DEBUG_VFS, "  = %d\n", ret);
+       p9_debug(P9_DEBUG_VFS, "  = %d\n", ret);
        return ret;
 }
 
@@ -263,10 +263,9 @@ v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
         * Now that we do caching with cache mode enabled, We need
         * to support direct IO
         */
-       P9_DPRINTK(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) "
-                       "off/no(%lld/%lu) EINVAL\n",
-                       iocb->ki_filp->f_path.dentry->d_name.name,
-                       (long long) pos, nr_segs);
+       p9_debug(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) off/no(%lld/%lu) EINVAL\n",
+                iocb->ki_filp->f_path.dentry->d_name.name,
+                (long long)pos, nr_segs);
 
        return -EINVAL;
 }
index e022890c6f403283b4da166b7c56e4a27e14b975..d529437ff44269f79cb707592a8e504bf4744cea 100644 (file)
@@ -53,8 +53,8 @@
 
 static int v9fs_dentry_delete(const struct dentry *dentry)
 {
-       P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
-                                                                       dentry);
+       p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n",
+                dentry->d_name.name, dentry);
 
        return 1;
 }
@@ -66,8 +66,8 @@ static int v9fs_dentry_delete(const struct dentry *dentry)
  */
 static int v9fs_cached_dentry_delete(const struct dentry *dentry)
 {
-       P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n",
-                  dentry->d_name.name, dentry);
+       p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n",
+                dentry->d_name.name, dentry);
 
        /* Don't cache negative dentries */
        if (!dentry->d_inode)
@@ -86,8 +86,8 @@ static void v9fs_dentry_release(struct dentry *dentry)
        struct v9fs_dentry *dent;
        struct p9_fid *temp, *current_fid;
 
-       P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
-                                                                       dentry);
+       p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n",
+                dentry->d_name.name, dentry);
        dent = dentry->d_fsdata;
        if (dent) {
                list_for_each_entry_safe(current_fid, temp, &dent->fidlist,
index 598fff1a54e59f7eafedc2b7c87e77300228b9d6..ff911e779651bcf5726c08ea412bceebd1f9b79b 100644 (file)
@@ -140,7 +140,7 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
        int reclen = 0;
        struct p9_rdir *rdir;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name);
+       p9_debug(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name);
        fid = filp->private_data;
 
        buflen = fid->clnt->msize - P9_IOHDRSZ;
@@ -168,7 +168,7 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        err = p9stat_read(fid->clnt, rdir->buf + rdir->head,
                                          rdir->tail - rdir->head, &st);
                        if (err) {
-                               P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err);
+                               p9_debug(P9_DEBUG_VFS, "returned %d\n", err);
                                err = -EIO;
                                p9stat_free(&st);
                                goto unlock_and_exit;
@@ -213,7 +213,7 @@ static int v9fs_dir_readdir_dotl(struct file *filp, void *dirent,
        struct p9_dirent curdirent;
        u64 oldoffset = 0;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name);
+       p9_debug(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name);
        fid = filp->private_data;
 
        buflen = fid->clnt->msize - P9_READDIRHDRSZ;
@@ -244,7 +244,7 @@ static int v9fs_dir_readdir_dotl(struct file *filp, void *dirent,
                                            rdir->tail - rdir->head,
                                            &curdirent);
                        if (err < 0) {
-                               P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err);
+                               p9_debug(P9_DEBUG_VFS, "returned %d\n", err);
                                err = -EIO;
                                goto unlock_and_exit;
                        }
@@ -290,9 +290,8 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
        struct p9_fid *fid;
 
        fid = filp->private_data;
-       P9_DPRINTK(P9_DEBUG_VFS,
-                       "v9fs_dir_release: inode: %p filp: %p fid: %d\n",
-                       inode, filp, fid ? fid->fid : -1);
+       p9_debug(P9_DEBUG_VFS, "inode: %p filp: %p fid: %d\n",
+                inode, filp, fid ? fid->fid : -1);
        if (fid)
                p9_client_clunk(fid);
        return 0;
index 62857a810a79d00332a150c6a02a433e638d72b8..fc06fd27065eb3cade2f5e5f60f7155d3487ae80 100644 (file)
@@ -61,7 +61,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
        struct p9_fid *fid;
        int omode;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
+       p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
        v9inode = V9FS_I(inode);
        v9ses = v9fs_inode2v9ses(inode);
        if (v9fs_proto_dotl(v9ses))
@@ -135,7 +135,7 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
        int res = 0;
        struct inode *inode = filp->f_path.dentry->d_inode;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
+       p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
 
        /* No mandatory locks */
        if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
@@ -204,7 +204,8 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
                        break;
                if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
                        break;
-               schedule_timeout_interruptible(P9_LOCK_TIMEOUT);
+               if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
+                       break;
        }
 
        /* map 9p status to VFS status */
@@ -304,8 +305,8 @@ static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
        struct inode *inode = filp->f_path.dentry->d_inode;
        int ret = -ENOLCK;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n", filp,
-                               cmd, fl, filp->f_path.dentry->d_name.name);
+       p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n",
+                filp, cmd, fl, filp->f_path.dentry->d_name.name);
 
        /* No mandatory locks */
        if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
@@ -340,8 +341,8 @@ static int v9fs_file_flock_dotl(struct file *filp, int cmd,
        struct inode *inode = filp->f_path.dentry->d_inode;
        int ret = -ENOLCK;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n", filp,
-                               cmd, fl, filp->f_path.dentry->d_name.name);
+       p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n",
+                filp, cmd, fl, filp->f_path.dentry->d_name.name);
 
        /* No mandatory locks */
        if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
@@ -384,8 +385,8 @@ v9fs_fid_readn(struct p9_fid *fid, char *data, char __user *udata, u32 count,
 {
        int n, total, size;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "fid %d offset %llu count %d\n", fid->fid,
-                  (long long unsigned) offset, count);
+       p9_debug(P9_DEBUG_VFS, "fid %d offset %llu count %d\n",
+                fid->fid, (long long unsigned)offset, count);
        n = 0;
        total = 0;
        size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
@@ -443,7 +444,7 @@ v9fs_file_read(struct file *filp, char __user *udata, size_t count,
        struct p9_fid *fid;
        size_t size;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset);
+       p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset);
        fid = filp->private_data;
 
        size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
@@ -470,8 +471,8 @@ v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid,
        loff_t origin = *offset;
        unsigned long pg_start, pg_end;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "data %p count %d offset %x\n", data,
-               (int)count, (int)*offset);
+       p9_debug(P9_DEBUG_VFS, "data %p count %d offset %x\n",
+                data, (int)count, (int)*offset);
 
        clnt = fid->clnt;
        do {
@@ -552,7 +553,7 @@ static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
                return retval;
 
        mutex_lock(&inode->i_mutex);
-       P9_DPRINTK(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
+       p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
 
        fid = filp->private_data;
        v9fs_blank_wstat(&wstat);
@@ -575,8 +576,7 @@ int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
                return retval;
 
        mutex_lock(&inode->i_mutex);
-       P9_DPRINTK(P9_DEBUG_VFS, "v9fs_file_fsync_dotl: filp %p datasync %x\n",
-                       filp, datasync);
+       p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
 
        fid = filp->private_data;
 
@@ -607,8 +607,8 @@ v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct inode *inode = filp->f_path.dentry->d_inode;
 
 
-       P9_DPRINTK(P9_DEBUG_VFS, "page %p fid %lx\n",
-                  page, (unsigned long)filp->private_data);
+       p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
+                page, (unsigned long)filp->private_data);
 
        v9inode = V9FS_I(inode);
        /* make sure the cache has finished storing the page */
index e0f20de6aa2bc53f2a6b47b7f9926696f301e389..014c8dd62962c02a8acfb17f6a771bc76de1a73a 100644 (file)
@@ -23,6 +23,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
@@ -87,6 +89,32 @@ static u32 unixmode2p9mode(struct v9fs_session_info *v9ses, umode_t mode)
        return res;
 }
 
+/**
+ * p9mode2perm- convert plan9 mode bits to unix permission bits
+ * @v9ses: v9fs session information
+ * @stat: p9_wstat from which mode need to be derived
+ *
+ */
+static int p9mode2perm(struct v9fs_session_info *v9ses,
+                      struct p9_wstat *stat)
+{
+       int res;
+       int mode = stat->mode;
+
+       res = mode & S_IALLUGO;
+       if (v9fs_proto_dotu(v9ses)) {
+               if ((mode & P9_DMSETUID) == P9_DMSETUID)
+                       res |= S_ISUID;
+
+               if ((mode & P9_DMSETGID) == P9_DMSETGID)
+                       res |= S_ISGID;
+
+               if ((mode & P9_DMSETVTX) == P9_DMSETVTX)
+                       res |= S_ISVTX;
+       }
+       return res;
+}
+
 /**
  * p9mode2unixmode- convert plan9 mode bits to unix mode bits
  * @v9ses: v9fs session information
@@ -100,8 +128,8 @@ static umode_t p9mode2unixmode(struct v9fs_session_info *v9ses,
        int res;
        u32 mode = stat->mode;
 
-       res = mode & S_IALLUGO;
        *rdev = 0;
+       res = p9mode2perm(v9ses, stat);
 
        if ((mode & P9_DMDIR) == P9_DMDIR)
                res |= S_IFDIR;
@@ -128,24 +156,13 @@ static umode_t p9mode2unixmode(struct v9fs_session_info *v9ses,
                        res |= S_IFBLK;
                        break;
                default:
-                       P9_DPRINTK(P9_DEBUG_ERROR,
-                               "Unknown special type %c %s\n", type,
-                               stat->extension);
+                       p9_debug(P9_DEBUG_ERROR, "Unknown special type %c %s\n",
+                                type, stat->extension);
                };
                *rdev = MKDEV(major, minor);
        } else
                res |= S_IFREG;
 
-       if (v9fs_proto_dotu(v9ses)) {
-               if ((mode & P9_DMSETUID) == P9_DMSETUID)
-                       res |= S_ISUID;
-
-               if ((mode & P9_DMSETGID) == P9_DMSETGID)
-                       res |= S_ISGID;
-
-               if ((mode & P9_DMSETVTX) == P9_DMSETVTX)
-                       res |= S_ISVTX;
-       }
        return res;
 }
 
@@ -275,8 +292,8 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
                } else if (v9fs_proto_dotu(v9ses)) {
                        inode->i_op = &v9fs_file_inode_operations;
                } else {
-                       P9_DPRINTK(P9_DEBUG_ERROR,
-                                  "special files without extended mode\n");
+                       p9_debug(P9_DEBUG_ERROR,
+                                "special files without extended mode\n");
                        err = -EINVAL;
                        goto error;
                }
@@ -301,8 +318,8 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
                break;
        case S_IFLNK:
                if (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)) {
-                       P9_DPRINTK(P9_DEBUG_ERROR, "extended modes used with "
-                                               "legacy protocol.\n");
+                       p9_debug(P9_DEBUG_ERROR,
+                                "extended modes used with legacy protocol\n");
                        err = -EINVAL;
                        goto error;
                }
@@ -329,8 +346,8 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
 
                break;
        default:
-               P9_DPRINTK(P9_DEBUG_ERROR, "BAD mode 0x%hx S_IFMT 0x%x\n",
-                          mode, mode & S_IFMT);
+               p9_debug(P9_DEBUG_ERROR, "BAD mode 0x%hx S_IFMT 0x%x\n",
+                        mode, mode & S_IFMT);
                err = -EINVAL;
                goto error;
        }
@@ -352,11 +369,12 @@ struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)
        struct inode *inode;
        struct v9fs_session_info *v9ses = sb->s_fs_info;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "super block: %p mode: %ho\n", sb, mode);
+       p9_debug(P9_DEBUG_VFS, "super block: %p mode: %ho\n", sb, mode);
 
        inode = new_inode(sb);
        if (!inode) {
-               P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n");
+               pr_warn("%s (%d): Problem allocating inode\n",
+                       __func__, task_pid_nr(current));
                return ERR_PTR(-ENOMEM);
        }
        err = v9fs_init_inode(v9ses, inode, mode, rdev);
@@ -573,15 +591,15 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
        struct p9_fid *v9fid, *dfid;
        struct v9fs_session_info *v9ses;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "inode: %p dentry: %p rmdir: %x\n",
-                  dir, dentry, flags);
+       p9_debug(P9_DEBUG_VFS, "inode: %p dentry: %p rmdir: %x\n",
+                dir, dentry, flags);
 
        v9ses = v9fs_inode2v9ses(dir);
        inode = dentry->d_inode;
        dfid = v9fs_fid_lookup(dentry->d_parent);
        if (IS_ERR(dfid)) {
                retval = PTR_ERR(dfid);
-               P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", retval);
+               p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", retval);
                return retval;
        }
        if (v9fs_proto_dotl(v9ses))
@@ -630,7 +648,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
        struct p9_fid *dfid, *ofid, *fid;
        struct inode *inode;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
+       p9_debug(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
 
        err = 0;
        ofid = NULL;
@@ -639,7 +657,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
        dfid = v9fs_fid_lookup(dentry->d_parent);
        if (IS_ERR(dfid)) {
                err = PTR_ERR(dfid);
-               P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+               p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
                return ERR_PTR(err);
        }
 
@@ -647,36 +665,41 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
        ofid = p9_client_walk(dfid, 0, NULL, 1);
        if (IS_ERR(ofid)) {
                err = PTR_ERR(ofid);
-               P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
+               p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
                return ERR_PTR(err);
        }
 
        err = p9_client_fcreate(ofid, name, perm, mode, extension);
        if (err < 0) {
-               P9_DPRINTK(P9_DEBUG_VFS, "p9_client_fcreate failed %d\n", err);
-               goto error;
-       }
-
-       /* now walk from the parent so we can get unopened fid */
-       fid = p9_client_walk(dfid, 1, &name, 1);
-       if (IS_ERR(fid)) {
-               err = PTR_ERR(fid);
-               P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
-               fid = NULL;
+               p9_debug(P9_DEBUG_VFS, "p9_client_fcreate failed %d\n", err);
                goto error;
        }
 
-       /* instantiate inode and assign the unopened fid to the dentry */
-       inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
-       if (IS_ERR(inode)) {
-               err = PTR_ERR(inode);
-               P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
-               goto error;
+       if (!(perm & P9_DMLINK)) {
+               /* now walk from the parent so we can get unopened fid */
+               fid = p9_client_walk(dfid, 1, &name, 1);
+               if (IS_ERR(fid)) {
+                       err = PTR_ERR(fid);
+                       p9_debug(P9_DEBUG_VFS,
+                                  "p9_client_walk failed %d\n", err);
+                       fid = NULL;
+                       goto error;
+               }
+               /*
+                * instantiate inode and assign the unopened fid to the dentry
+                */
+               inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
+               if (IS_ERR(inode)) {
+                       err = PTR_ERR(inode);
+                       p9_debug(P9_DEBUG_VFS,
+                                  "inode creation failed %d\n", err);
+                       goto error;
+               }
+               err = v9fs_fid_add(dentry, fid);
+               if (err < 0)
+                       goto error;
+               d_instantiate(dentry, inode);
        }
-       err = v9fs_fid_add(dentry, fid);
-       if (err < 0)
-               goto error;
-       d_instantiate(dentry, inode);
        return ofid;
 error:
        if (ofid)
@@ -788,7 +811,7 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
        struct p9_fid *fid;
        struct v9fs_session_info *v9ses;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
+       p9_debug(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
        err = 0;
        v9ses = v9fs_inode2v9ses(dir);
        perm = unixmode2p9mode(v9ses, mode | S_IFDIR);
@@ -826,8 +849,8 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
        char *name;
        int result = 0;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "dir: %p dentry: (%s) %p nameidata: %p\n",
-               dir, dentry->d_name.name, dentry, nameidata);
+       p9_debug(P9_DEBUG_VFS, "dir: %p dentry: (%s) %p nameidata: %p\n",
+                dir, dentry->d_name.name, dentry, nameidata);
 
        if (dentry->d_name.len > NAME_MAX)
                return ERR_PTR(-ENAMETOOLONG);
@@ -933,7 +956,7 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct p9_fid *newdirfid;
        struct p9_wstat wstat;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "\n");
+       p9_debug(P9_DEBUG_VFS, "\n");
        retval = 0;
        old_inode = old_dentry->d_inode;
        new_inode = new_dentry->d_inode;
@@ -969,8 +992,7 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                 * 9P .u can only handle file rename in the same directory
                 */
 
-               P9_DPRINTK(P9_DEBUG_ERROR,
-                               "old dir and new dir are different\n");
+               p9_debug(P9_DEBUG_ERROR, "old dir and new dir are different\n");
                retval = -EXDEV;
                goto clunk_newdir;
        }
@@ -1026,7 +1048,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
        struct p9_fid *fid;
        struct p9_wstat *st;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
+       p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
        err = -EPERM;
        v9ses = v9fs_dentry2v9ses(dentry);
        if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
@@ -1063,7 +1085,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
        struct p9_fid *fid;
        struct p9_wstat wstat;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "\n");
+       p9_debug(P9_DEBUG_VFS, "\n");
        retval = inode_change_ok(dentry->d_inode, iattr);
        if (retval)
                return retval;
@@ -1162,7 +1184,7 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
                                set_nlink(inode, i_nlink);
                }
        }
-       mode = stat->mode & S_IALLUGO;
+       mode = p9mode2perm(v9ses, stat);
        mode |= inode->i_mode & ~S_IALLUGO;
        inode->i_mode = mode;
        i_size_write(inode, stat->length);
@@ -1208,7 +1230,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
        struct p9_fid *fid;
        struct p9_wstat *st;
 
-       P9_DPRINTK(P9_DEBUG_VFS, " %s\n", dentry->d_name.name);
+       p9_debug(P9_DEBUG_VFS, " %s\n", dentry->d_name.name);
        retval = -EPERM;
        v9ses = v9fs_dentry2v9ses(dentry);
        fid = v9fs_fid_lookup(dentry);
@@ -1230,8 +1252,8 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
        /* copy extension buffer into buffer */
        strncpy(buffer, st->extension, buflen);
 
-       P9_DPRINTK(P9_DEBUG_VFS,
-               "%s -> %s (%s)\n", dentry->d_name.name, st->extension, buffer);
+       p9_debug(P9_DEBUG_VFS, "%s -> %s (%s)\n",
+                dentry->d_name.name, st->extension, buffer);
 
        retval = strnlen(buffer, buflen);
 done:
@@ -1252,7 +1274,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
        int len = 0;
        char *link = __getname();
 
-       P9_DPRINTK(P9_DEBUG_VFS, "%s n", dentry->d_name.name);
+       p9_debug(P9_DEBUG_VFS, "%s\n", dentry->d_name.name);
 
        if (!link)
                link = ERR_PTR(-ENOMEM);
@@ -1283,8 +1305,8 @@ v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
 {
        char *s = nd_get_link(nd);
 
-       P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
-               IS_ERR(s) ? "<error>" : s);
+       p9_debug(P9_DEBUG_VFS, " %s %s\n",
+                dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
        if (!IS_ERR(s))
                __putname(s);
 }
@@ -1306,7 +1328,7 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
 
        v9ses = v9fs_inode2v9ses(dir);
        if (!v9fs_proto_dotu(v9ses)) {
-               P9_DPRINTK(P9_DEBUG_ERROR, "not extended\n");
+               p9_debug(P9_DEBUG_ERROR, "not extended\n");
                return -EPERM;
        }
 
@@ -1333,8 +1355,8 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
 static int
 v9fs_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
 {
-       P9_DPRINTK(P9_DEBUG_VFS, " %lu,%s,%s\n", dir->i_ino,
-                                       dentry->d_name.name, symname);
+       p9_debug(P9_DEBUG_VFS, " %lu,%s,%s\n",
+                dir->i_ino, dentry->d_name.name, symname);
 
        return v9fs_vfs_mkspecial(dir, dentry, P9_DMSYMLINK, symname);
 }
@@ -1355,9 +1377,8 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
        char *name;
        struct p9_fid *oldfid;
 
-       P9_DPRINTK(P9_DEBUG_VFS,
-               " %lu,%s,%s\n", dir->i_ino, dentry->d_name.name,
-               old_dentry->d_name.name);
+       p9_debug(P9_DEBUG_VFS, " %lu,%s,%s\n",
+                dir->i_ino, dentry->d_name.name, old_dentry->d_name.name);
 
        oldfid = v9fs_fid_clone(old_dentry);
        if (IS_ERR(oldfid))
@@ -1398,9 +1419,9 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rde
        char *name;
        u32 perm;
 
-       P9_DPRINTK(P9_DEBUG_VFS,
-               " %lu,%s mode: %hx MAJOR: %u MINOR: %u\n", dir->i_ino,
-               dentry->d_name.name, mode, MAJOR(rdev), MINOR(rdev));
+       p9_debug(P9_DEBUG_VFS, " %lu,%s mode: %hx MAJOR: %u MINOR: %u\n",
+                dir->i_ino, dentry->d_name.name, mode,
+                MAJOR(rdev), MINOR(rdev));
 
        if (!new_valid_dev(rdev))
                return -EINVAL;
index 8ef152ac6a16003bb5b39e947577d22cf4e667cd..a1e6c990cd410efded55c826f03bc5db13839d75 100644 (file)
@@ -283,13 +283,13 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
        }
 
        name = (char *) dentry->d_name.name;
-       P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x "
-                       "mode:0x%hx\n", name, flags, omode);
+       p9_debug(P9_DEBUG_VFS, "name:%s flags:0x%x mode:0x%hx\n",
+                name, flags, omode);
 
        dfid = v9fs_fid_lookup(dentry->d_parent);
        if (IS_ERR(dfid)) {
                err = PTR_ERR(dfid);
-               P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+               p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
                return err;
        }
 
@@ -297,7 +297,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
        ofid = p9_client_walk(dfid, 0, NULL, 1);
        if (IS_ERR(ofid)) {
                err = PTR_ERR(ofid);
-               P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
+               p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
                return err;
        }
 
@@ -307,16 +307,15 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
        /* Update mode based on ACL value */
        err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
        if (err) {
-               P9_DPRINTK(P9_DEBUG_VFS,
-                          "Failed to get acl values in creat %d\n", err);
+               p9_debug(P9_DEBUG_VFS, "Failed to get acl values in creat %d\n",
+                        err);
                goto error;
        }
        err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags),
                                    mode, gid, &qid);
        if (err < 0) {
-               P9_DPRINTK(P9_DEBUG_VFS,
-                               "p9_client_open_dotl failed in creat %d\n",
-                               err);
+               p9_debug(P9_DEBUG_VFS, "p9_client_open_dotl failed in creat %d\n",
+                        err);
                goto error;
        }
        v9fs_invalidate_inode_attr(dir);
@@ -325,14 +324,14 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
        fid = p9_client_walk(dfid, 1, &name, 1);
        if (IS_ERR(fid)) {
                err = PTR_ERR(fid);
-               P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
+               p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
                fid = NULL;
                goto error;
        }
        inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
-               P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
+               p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err);
                goto error;
        }
        err = v9fs_fid_add(dentry, fid);
@@ -408,7 +407,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
        struct dentry *dir_dentry;
        struct posix_acl *dacl = NULL, *pacl = NULL;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
+       p9_debug(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
        err = 0;
        v9ses = v9fs_inode2v9ses(dir);
 
@@ -420,7 +419,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
        dfid = v9fs_fid_lookup(dir_dentry);
        if (IS_ERR(dfid)) {
                err = PTR_ERR(dfid);
-               P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+               p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
                dfid = NULL;
                goto error;
        }
@@ -430,8 +429,8 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
        /* Update mode based on ACL value */
        err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
        if (err) {
-               P9_DPRINTK(P9_DEBUG_VFS,
-                          "Failed to get acl values in mkdir %d\n", err);
+               p9_debug(P9_DEBUG_VFS, "Failed to get acl values in mkdir %d\n",
+                        err);
                goto error;
        }
        name = (char *) dentry->d_name.name;
@@ -444,8 +443,8 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
                fid = p9_client_walk(dfid, 1, &name, 1);
                if (IS_ERR(fid)) {
                        err = PTR_ERR(fid);
-                       P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
-                               err);
+                       p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+                                err);
                        fid = NULL;
                        goto error;
                }
@@ -453,8 +452,8 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
                inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
-                       P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
-                               err);
+                       p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
+                                err);
                        goto error;
                }
                err = v9fs_fid_add(dentry, fid);
@@ -495,7 +494,7 @@ v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
        struct p9_fid *fid;
        struct p9_stat_dotl *st;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
+       p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
        err = -EPERM;
        v9ses = v9fs_dentry2v9ses(dentry);
        if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
@@ -523,6 +522,46 @@ v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
        return 0;
 }
 
+/*
+ * Attribute flags.
+ */
+#define P9_ATTR_MODE           (1 << 0)
+#define P9_ATTR_UID            (1 << 1)
+#define P9_ATTR_GID            (1 << 2)
+#define P9_ATTR_SIZE           (1 << 3)
+#define P9_ATTR_ATIME          (1 << 4)
+#define P9_ATTR_MTIME          (1 << 5)
+#define P9_ATTR_CTIME          (1 << 6)
+#define P9_ATTR_ATIME_SET      (1 << 7)
+#define P9_ATTR_MTIME_SET      (1 << 8)
+
+struct dotl_iattr_map {
+       int iattr_valid;
+       int p9_iattr_valid;
+};
+
+static int v9fs_mapped_iattr_valid(int iattr_valid)
+{
+       int i;
+       int p9_iattr_valid = 0;
+       struct dotl_iattr_map dotl_iattr_map[] = {
+               { ATTR_MODE,            P9_ATTR_MODE },
+               { ATTR_UID,             P9_ATTR_UID },
+               { ATTR_GID,             P9_ATTR_GID },
+               { ATTR_SIZE,            P9_ATTR_SIZE },
+               { ATTR_ATIME,           P9_ATTR_ATIME },
+               { ATTR_MTIME,           P9_ATTR_MTIME },
+               { ATTR_CTIME,           P9_ATTR_CTIME },
+               { ATTR_ATIME_SET,       P9_ATTR_ATIME_SET },
+               { ATTR_MTIME_SET,       P9_ATTR_MTIME_SET },
+       };
+       for (i = 0; i < ARRAY_SIZE(dotl_iattr_map); i++) {
+               if (iattr_valid & dotl_iattr_map[i].iattr_valid)
+                       p9_iattr_valid |= dotl_iattr_map[i].p9_iattr_valid;
+       }
+       return p9_iattr_valid;
+}
+
 /**
  * v9fs_vfs_setattr_dotl - set file metadata
  * @dentry: file whose metadata to set
@@ -537,13 +576,13 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
        struct p9_fid *fid;
        struct p9_iattr_dotl p9attr;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "\n");
+       p9_debug(P9_DEBUG_VFS, "\n");
 
        retval = inode_change_ok(dentry->d_inode, iattr);
        if (retval)
                return retval;
 
-       p9attr.valid = iattr->ia_valid;
+       p9attr.valid = v9fs_mapped_iattr_valid(iattr->ia_valid);
        p9attr.mode = iattr->ia_mode;
        p9attr.uid = iattr->ia_uid;
        p9attr.gid = iattr->ia_gid;
@@ -670,14 +709,13 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
        struct v9fs_session_info *v9ses;
 
        name = (char *) dentry->d_name.name;
-       P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n",
-                       dir->i_ino, name, symname);
+       p9_debug(P9_DEBUG_VFS, "%lu,%s,%s\n", dir->i_ino, name, symname);
        v9ses = v9fs_inode2v9ses(dir);
 
        dfid = v9fs_fid_lookup(dentry->d_parent);
        if (IS_ERR(dfid)) {
                err = PTR_ERR(dfid);
-               P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+               p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
                return err;
        }
 
@@ -687,7 +725,7 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
        err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid);
 
        if (err < 0) {
-               P9_DPRINTK(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err);
+               p9_debug(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err);
                goto error;
        }
 
@@ -697,8 +735,8 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
                fid = p9_client_walk(dfid, 1, &name, 1);
                if (IS_ERR(fid)) {
                        err = PTR_ERR(fid);
-                       P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
-                                       err);
+                       p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+                                err);
                        fid = NULL;
                        goto error;
                }
@@ -707,8 +745,8 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
                inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
-                       P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
-                                       err);
+                       p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
+                                err);
                        goto error;
                }
                err = v9fs_fid_add(dentry, fid);
@@ -751,9 +789,8 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
        struct p9_fid *dfid, *oldfid;
        struct v9fs_session_info *v9ses;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
-                       dir->i_ino, old_dentry->d_name.name,
-                       dentry->d_name.name);
+       p9_debug(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
+                dir->i_ino, old_dentry->d_name.name, dentry->d_name.name);
 
        v9ses = v9fs_inode2v9ses(dir);
        dir_dentry = v9fs_dentry_from_dir_inode(dir);
@@ -770,7 +807,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
        err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name);
 
        if (err < 0) {
-               P9_DPRINTK(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
+               p9_debug(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
                return err;
        }
 
@@ -813,9 +850,9 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
        struct dentry *dir_dentry;
        struct posix_acl *dacl = NULL, *pacl = NULL;
 
-       P9_DPRINTK(P9_DEBUG_VFS,
-               " %lu,%s mode: %hx MAJOR: %u MINOR: %u\n", dir->i_ino,
-               dentry->d_name.name, omode, MAJOR(rdev), MINOR(rdev));
+       p9_debug(P9_DEBUG_VFS, " %lu,%s mode: %hx MAJOR: %u MINOR: %u\n",
+                dir->i_ino, dentry->d_name.name, omode,
+                MAJOR(rdev), MINOR(rdev));
 
        if (!new_valid_dev(rdev))
                return -EINVAL;
@@ -825,7 +862,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
        dfid = v9fs_fid_lookup(dir_dentry);
        if (IS_ERR(dfid)) {
                err = PTR_ERR(dfid);
-               P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+               p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
                dfid = NULL;
                goto error;
        }
@@ -835,8 +872,8 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
        /* Update mode based on ACL value */
        err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
        if (err) {
-               P9_DPRINTK(P9_DEBUG_VFS,
-                          "Failed to get acl values in mknod %d\n", err);
+               p9_debug(P9_DEBUG_VFS, "Failed to get acl values in mknod %d\n",
+                        err);
                goto error;
        }
        name = (char *) dentry->d_name.name;
@@ -851,8 +888,8 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
                fid = p9_client_walk(dfid, 1, &name, 1);
                if (IS_ERR(fid)) {
                        err = PTR_ERR(fid);
-                       P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
-                               err);
+                       p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+                                err);
                        fid = NULL;
                        goto error;
                }
@@ -860,8 +897,8 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
                inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
-                       P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
-                               err);
+                       p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
+                                err);
                        goto error;
                }
                err = v9fs_fid_add(dentry, fid);
@@ -905,7 +942,7 @@ v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
        char *link = __getname();
        char *target;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "%s\n", dentry->d_name.name);
+       p9_debug(P9_DEBUG_VFS, "%s\n", dentry->d_name.name);
 
        if (!link) {
                link = ERR_PTR(-ENOMEM);
index f68ff65a32a526cbb5bae7f7b31feec512560bc0..7b0cd87b07c20772b79395bb03afdb2bc5ebf414 100644 (file)
@@ -121,7 +121,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
        struct p9_fid *fid;
        int retval = 0;
 
-       P9_DPRINTK(P9_DEBUG_VFS, " \n");
+       p9_debug(P9_DEBUG_VFS, "\n");
 
        v9ses = kzalloc(sizeof(struct v9fs_session_info), GFP_KERNEL);
        if (!v9ses)
@@ -191,7 +191,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
                goto release_sb;
        v9fs_fid_add(root, fid);
 
-       P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
+       p9_debug(P9_DEBUG_VFS, " simple set mount, return 0\n");
        return dget(sb->s_root);
 
 clunk_fid:
@@ -223,7 +223,7 @@ static void v9fs_kill_super(struct super_block *s)
 {
        struct v9fs_session_info *v9ses = s->s_fs_info;
 
-       P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
+       p9_debug(P9_DEBUG_VFS, " %p\n", s);
 
        kill_anon_super(s);
 
@@ -231,7 +231,7 @@ static void v9fs_kill_super(struct super_block *s)
        v9fs_session_close(v9ses);
        kfree(v9ses);
        s->s_fs_info = NULL;
-       P9_DPRINTK(P9_DEBUG_VFS, "exiting kill_super\n");
+       p9_debug(P9_DEBUG_VFS, "exiting kill_super\n");
 }
 
 static void
@@ -303,7 +303,7 @@ static int v9fs_write_inode(struct inode *inode,
         * send an fsync request to server irrespective of
         * wbc->sync_mode.
         */
-       P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
+       p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
        v9inode = V9FS_I(inode);
        if (!v9inode->writeback_fid)
                return 0;
@@ -326,7 +326,7 @@ static int v9fs_write_inode_dotl(struct inode *inode,
         * send an fsync request to server irrespective of
         * wbc->sync_mode.
         */
-       P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
+       p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
        v9inode = V9FS_I(inode);
        if (!v9inode->writeback_fid)
                return 0;
index d288773871b3aa9c1816096cdd59bc43520d64eb..29653b70a9c345b3d8b0ac8162ade170550c4083 100644 (file)
@@ -32,8 +32,8 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
        attr_fid = p9_client_xattrwalk(fid, name, &attr_size);
        if (IS_ERR(attr_fid)) {
                retval = PTR_ERR(attr_fid);
-               P9_DPRINTK(P9_DEBUG_VFS,
-                       "p9_client_attrwalk failed %zd\n", retval);
+               p9_debug(P9_DEBUG_VFS, "p9_client_attrwalk failed %zd\n",
+                        retval);
                attr_fid = NULL;
                goto error;
        }
@@ -87,8 +87,8 @@ ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
 {
        struct p9_fid *fid;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "%s: name = %s value_len = %zu\n",
-               __func__, name, buffer_size);
+       p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu\n",
+                name, buffer_size);
        fid = v9fs_fid_lookup(dentry);
        if (IS_ERR(fid))
                return PTR_ERR(fid);
@@ -115,8 +115,8 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name,
        int retval, msize, write_count;
        struct p9_fid *fid = NULL;
 
-       P9_DPRINTK(P9_DEBUG_VFS, "%s: name = %s value_len = %zu flags = %d\n",
-               __func__, name, value_len, flags);
+       p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n",
+                name, value_len, flags);
 
        fid = v9fs_fid_clone(dentry);
        if (IS_ERR(fid)) {
@@ -129,8 +129,8 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name,
         */
        retval = p9_client_xattrcreate(fid, name, value_len, flags);
        if (retval < 0) {
-               P9_DPRINTK(P9_DEBUG_VFS,
-                       "p9_client_xattrcreate failed %d\n", retval);
+               p9_debug(P9_DEBUG_VFS, "p9_client_xattrcreate failed %d\n",
+                        retval);
                goto error;
        }
        msize = fid->clnt->msize;
index 79e2ca7973b7a2b503a7f7fc168f716a5f6c0284..e95d1b64082cae708f3213a453ea3275f6afbb57 100644 (file)
@@ -27,6 +27,9 @@ config COMPAT_BINFMT_ELF
        bool
        depends on COMPAT && BINFMT_ELF
 
+config ARCH_BINFMT_ELF_RANDOMIZE_PIE
+       bool
+
 config BINFMT_ELF_FDPIC
        bool "Kernel support for FDPIC ELF binaries"
        default y
index 21ac5ee4b43f3e767b56aff2f54c6018175bc6af..bcb884e2d613e76d94570dd81b99ba27e3906a66 100644 (file)
@@ -794,7 +794,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                         * default mmap base, as well as whatever program they
                         * might try to exec.  This is because the brk will
                         * follow the loader, and is not movable.  */
-#if defined(CONFIG_X86) || defined(CONFIG_ARM)
+#ifdef CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE
                        /* Memory randomization might have been switched off
                         * in runtime via sysctl.
                         * If that is the case, retain the original non-zero
index 97fbe939c050dc7d523baeaed960741c053cbf0d..034d985032296cd4dfbc80e4d6717ac8b4ea77c4 100644 (file)
@@ -1081,7 +1081,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
 again:
        for (i = 0; i < num_pages; i++) {
                pages[i] = find_or_create_page(inode->i_mapping, index + i,
-                                              mask);
+                                              mask | __GFP_WRITE);
                if (!pages[i]) {
                        faili = i - 1;
                        err = -ENOMEM;
@@ -1136,7 +1136,8 @@ again:
                                     GFP_NOFS);
        }
        for (i = 0; i < num_pages; i++) {
-               clear_page_dirty_for_io(pages[i]);
+               if (clear_page_dirty_for_io(pages[i]))
+                       account_page_redirty(pages[i]);
                set_page_extent_mapped(pages[i]);
                WARN_ON(!PageLocked(pages[i]));
        }
index 6cf72fcc0d0c48bfab133529b6405912ed63d745..e7e327d43fa59482f5a247b3b904a076bbfe840f 100644 (file)
@@ -2,7 +2,7 @@
 *******************************************************************************
 **
 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
-**  Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
+**  Copyright (C) 2004-2011 Red Hat, Inc.  All rights reserved.
 **
 **  This copyrighted material is made available to anyone wishing to use,
 **  modify, copy, or redistribute it subject to the terms and conditions
@@ -17,6 +17,7 @@
 #include <linux/slab.h>
 #include <linux/in.h>
 #include <linux/in6.h>
+#include <linux/dlmconstants.h>
 #include <net/ipv6.h>
 #include <net/sock.h>
 
@@ -36,6 +37,7 @@
 static struct config_group *space_list;
 static struct config_group *comm_list;
 static struct dlm_comm *local_comm;
+static uint32_t dlm_comm_count;
 
 struct dlm_clusters;
 struct dlm_cluster;
@@ -103,6 +105,8 @@ struct dlm_cluster {
        unsigned int cl_timewarn_cs;
        unsigned int cl_waitwarn_us;
        unsigned int cl_new_rsb_count;
+       unsigned int cl_recover_callbacks;
+       char cl_cluster_name[DLM_LOCKSPACE_LEN];
 };
 
 enum {
@@ -118,6 +122,8 @@ enum {
        CLUSTER_ATTR_TIMEWARN_CS,
        CLUSTER_ATTR_WAITWARN_US,
        CLUSTER_ATTR_NEW_RSB_COUNT,
+       CLUSTER_ATTR_RECOVER_CALLBACKS,
+       CLUSTER_ATTR_CLUSTER_NAME,
 };
 
 struct cluster_attribute {
@@ -126,6 +132,27 @@ struct cluster_attribute {
        ssize_t (*store)(struct dlm_cluster *, const char *, size_t);
 };
 
+static ssize_t cluster_cluster_name_read(struct dlm_cluster *cl, char *buf)
+{
+       return sprintf(buf, "%s\n", cl->cl_cluster_name);
+}
+
+static ssize_t cluster_cluster_name_write(struct dlm_cluster *cl,
+                                         const char *buf, size_t len)
+{
+       strncpy(dlm_config.ci_cluster_name, buf, DLM_LOCKSPACE_LEN);
+       strncpy(cl->cl_cluster_name, buf, DLM_LOCKSPACE_LEN);
+       return len;
+}
+
+static struct cluster_attribute cluster_attr_cluster_name = {
+       .attr   = { .ca_owner = THIS_MODULE,
+                    .ca_name = "cluster_name",
+                    .ca_mode = S_IRUGO | S_IWUSR },
+       .show   = cluster_cluster_name_read,
+       .store  = cluster_cluster_name_write,
+};
+
 static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
                           int *info_field, int check_zero,
                           const char *buf, size_t len)
@@ -171,6 +198,7 @@ CLUSTER_ATTR(protocol, 0);
 CLUSTER_ATTR(timewarn_cs, 1);
 CLUSTER_ATTR(waitwarn_us, 0);
 CLUSTER_ATTR(new_rsb_count, 0);
+CLUSTER_ATTR(recover_callbacks, 0);
 
 static struct configfs_attribute *cluster_attrs[] = {
        [CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port.attr,
@@ -185,6 +213,8 @@ static struct configfs_attribute *cluster_attrs[] = {
        [CLUSTER_ATTR_TIMEWARN_CS] = &cluster_attr_timewarn_cs.attr,
        [CLUSTER_ATTR_WAITWARN_US] = &cluster_attr_waitwarn_us.attr,
        [CLUSTER_ATTR_NEW_RSB_COUNT] = &cluster_attr_new_rsb_count.attr,
+       [CLUSTER_ATTR_RECOVER_CALLBACKS] = &cluster_attr_recover_callbacks.attr,
+       [CLUSTER_ATTR_CLUSTER_NAME] = &cluster_attr_cluster_name.attr,
        NULL,
 };
 
@@ -293,6 +323,7 @@ struct dlm_comms {
 
 struct dlm_comm {
        struct config_item item;
+       int seq;
        int nodeid;
        int local;
        int addr_count;
@@ -309,6 +340,7 @@ struct dlm_node {
        int nodeid;
        int weight;
        int new;
+       int comm_seq; /* copy of cm->seq when nd->nodeid is set */
 };
 
 static struct configfs_group_operations clusters_ops = {
@@ -455,6 +487,9 @@ static struct config_group *make_cluster(struct config_group *g,
        cl->cl_timewarn_cs = dlm_config.ci_timewarn_cs;
        cl->cl_waitwarn_us = dlm_config.ci_waitwarn_us;
        cl->cl_new_rsb_count = dlm_config.ci_new_rsb_count;
+       cl->cl_recover_callbacks = dlm_config.ci_recover_callbacks;
+       memcpy(cl->cl_cluster_name, dlm_config.ci_cluster_name,
+              DLM_LOCKSPACE_LEN);
 
        space_list = &sps->ss_group;
        comm_list = &cms->cs_group;
@@ -558,6 +593,11 @@ static struct config_item *make_comm(struct config_group *g, const char *name)
                return ERR_PTR(-ENOMEM);
 
        config_item_init_type_name(&cm->item, name, &comm_type);
+
+       cm->seq = dlm_comm_count++;
+       if (!cm->seq)
+               cm->seq = dlm_comm_count++;
+
        cm->nodeid = -1;
        cm->local = 0;
        cm->addr_count = 0;
@@ -801,7 +841,10 @@ static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf)
 static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf,
                                 size_t len)
 {
+       uint32_t seq = 0;
        nd->nodeid = simple_strtol(buf, NULL, 0);
+       dlm_comm_seq(nd->nodeid, &seq);
+       nd->comm_seq = seq;
        return len;
 }
 
@@ -908,13 +951,13 @@ static void put_comm(struct dlm_comm *cm)
 }
 
 /* caller must free mem */
-int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
-                   int **new_out, int *new_count_out)
+int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
+                    int *count_out)
 {
        struct dlm_space *sp;
        struct dlm_node *nd;
-       int i = 0, rv = 0, ids_count = 0, new_count = 0;
-       int *ids, *new;
+       struct dlm_config_node *nodes, *node;
+       int rv, count;
 
        sp = get_space(lsname);
        if (!sp)
@@ -927,73 +970,42 @@ int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
                goto out;
        }
 
-       ids_count = sp->members_count;
+       count = sp->members_count;
 
-       ids = kcalloc(ids_count, sizeof(int), GFP_NOFS);
-       if (!ids) {
+       nodes = kcalloc(count, sizeof(struct dlm_config_node), GFP_NOFS);
+       if (!nodes) {
                rv = -ENOMEM;
                goto out;
        }
 
+       node = nodes;
        list_for_each_entry(nd, &sp->members, list) {
-               ids[i++] = nd->nodeid;
-               if (nd->new)
-                       new_count++;
-       }
-
-       if (ids_count != i)
-               printk(KERN_ERR "dlm: bad nodeid count %d %d\n", ids_count, i);
-
-       if (!new_count)
-               goto out_ids;
+               node->nodeid = nd->nodeid;
+               node->weight = nd->weight;
+               node->new = nd->new;
+               node->comm_seq = nd->comm_seq;
+               node++;
 
-       new = kcalloc(new_count, sizeof(int), GFP_NOFS);
-       if (!new) {
-               kfree(ids);
-               rv = -ENOMEM;
-               goto out;
+               nd->new = 0;
        }
 
-       i = 0;
-       list_for_each_entry(nd, &sp->members, list) {
-               if (nd->new) {
-                       new[i++] = nd->nodeid;
-                       nd->new = 0;
-               }
-       }
-       *new_count_out = new_count;
-       *new_out = new;
-
- out_ids:
-       *ids_count_out = ids_count;
-       *ids_out = ids;
+       *count_out = count;
+       *nodes_out = nodes;
+       rv = 0;
  out:
        mutex_unlock(&sp->members_lock);
        put_space(sp);
        return rv;
 }
 
-int dlm_node_weight(char *lsname, int nodeid)
+int dlm_comm_seq(int nodeid, uint32_t *seq)
 {
-       struct dlm_space *sp;
-       struct dlm_node *nd;
-       int w = -EEXIST;
-
-       sp = get_space(lsname);
-       if (!sp)
-               goto out;
-
-       mutex_lock(&sp->members_lock);
-       list_for_each_entry(nd, &sp->members, list) {
-               if (nd->nodeid != nodeid)
-                       continue;
-               w = nd->weight;
-               break;
-       }
-       mutex_unlock(&sp->members_lock);
-       put_space(sp);
- out:
-       return w;
+       struct dlm_comm *cm = get_comm(nodeid, NULL);
+       if (!cm)
+               return -EEXIST;
+       *seq = cm->seq;
+       put_comm(cm);
+       return 0;
 }
 
 int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr)
@@ -1047,6 +1059,8 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num)
 #define DEFAULT_TIMEWARN_CS      500 /* 5 sec = 500 centiseconds */
 #define DEFAULT_WAITWARN_US       0
 #define DEFAULT_NEW_RSB_COUNT    128
+#define DEFAULT_RECOVER_CALLBACKS  0
+#define DEFAULT_CLUSTER_NAME      ""
 
 struct dlm_config_info dlm_config = {
        .ci_tcp_port = DEFAULT_TCP_PORT,
@@ -1060,6 +1074,8 @@ struct dlm_config_info dlm_config = {
        .ci_protocol = DEFAULT_PROTOCOL,
        .ci_timewarn_cs = DEFAULT_TIMEWARN_CS,
        .ci_waitwarn_us = DEFAULT_WAITWARN_US,
-       .ci_new_rsb_count = DEFAULT_NEW_RSB_COUNT
+       .ci_new_rsb_count = DEFAULT_NEW_RSB_COUNT,
+       .ci_recover_callbacks = DEFAULT_RECOVER_CALLBACKS,
+       .ci_cluster_name = DEFAULT_CLUSTER_NAME
 };
 
index 3099d0dd26c0a00265972bafb7408709c9588cbc..9f5e3663bb0c9e9509f4614f54cad427b4a02485 100644 (file)
@@ -2,7 +2,7 @@
 *******************************************************************************
 **
 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
-**  Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
+**  Copyright (C) 2004-2011 Red Hat, Inc.  All rights reserved.
 **
 **  This copyrighted material is made available to anyone wishing to use,
 **  modify, copy, or redistribute it subject to the terms and conditions
 #ifndef __CONFIG_DOT_H__
 #define __CONFIG_DOT_H__
 
+struct dlm_config_node {
+       int nodeid;
+       int weight;
+       int new;
+       uint32_t comm_seq;
+};
+
 #define DLM_MAX_ADDR_COUNT 3
 
 struct dlm_config_info {
@@ -29,15 +36,17 @@ struct dlm_config_info {
        int ci_timewarn_cs;
        int ci_waitwarn_us;
        int ci_new_rsb_count;
+       int ci_recover_callbacks;
+       char ci_cluster_name[DLM_LOCKSPACE_LEN];
 };
 
 extern struct dlm_config_info dlm_config;
 
 int dlm_config_init(void);
 void dlm_config_exit(void);
-int dlm_node_weight(char *lsname, int nodeid);
-int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
-                   int **new_out, int *new_count_out);
+int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
+                    int *count_out);
+int dlm_comm_seq(int nodeid, uint32_t *seq);
 int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr);
 int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid);
 int dlm_our_nodeid(void);
index 59779237e2b40d4af09fa17ad35dab3138feee52..3dca2b39e83fa9c8b5025f0e3aefa94252ed0a41 100644 (file)
@@ -393,6 +393,7 @@ static const struct seq_operations format3_seq_ops;
 
 static void *table_seq_start(struct seq_file *seq, loff_t *pos)
 {
+       struct rb_node *node;
        struct dlm_ls *ls = seq->private;
        struct rsbtbl_iter *ri;
        struct dlm_rsb *r;
@@ -418,9 +419,10 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
                ri->format = 3;
 
        spin_lock(&ls->ls_rsbtbl[bucket].lock);
-       if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
-               list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list,
-                                   res_hashchain) {
+       if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[bucket].keep)) {
+               for (node = rb_first(&ls->ls_rsbtbl[bucket].keep); node;
+                    node = rb_next(node)) {
+                       r = rb_entry(node, struct dlm_rsb, res_hashnode);
                        if (!entry--) {
                                dlm_hold_rsb(r);
                                ri->rsb = r;
@@ -449,9 +451,9 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
                }
 
                spin_lock(&ls->ls_rsbtbl[bucket].lock);
-               if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
-                       r = list_first_entry(&ls->ls_rsbtbl[bucket].list,
-                                            struct dlm_rsb, res_hashchain);
+               if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[bucket].keep)) {
+                       node = rb_first(&ls->ls_rsbtbl[bucket].keep);
+                       r = rb_entry(node, struct dlm_rsb, res_hashnode);
                        dlm_hold_rsb(r);
                        ri->rsb = r;
                        ri->bucket = bucket;
@@ -467,7 +469,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
 {
        struct dlm_ls *ls = seq->private;
        struct rsbtbl_iter *ri = iter_ptr;
-       struct list_head *next;
+       struct rb_node *next;
        struct dlm_rsb *r, *rp;
        loff_t n = *pos;
        unsigned bucket;
@@ -480,10 +482,10 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
 
        spin_lock(&ls->ls_rsbtbl[bucket].lock);
        rp = ri->rsb;
-       next = rp->res_hashchain.next;
+       next = rb_next(&rp->res_hashnode);
 
-       if (next != &ls->ls_rsbtbl[bucket].list) {
-               r = list_entry(next, struct dlm_rsb, res_hashchain);
+       if (next) {
+               r = rb_entry(next, struct dlm_rsb, res_hashnode);
                dlm_hold_rsb(r);
                ri->rsb = r;
                spin_unlock(&ls->ls_rsbtbl[bucket].lock);
@@ -511,9 +513,9 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
                }
 
                spin_lock(&ls->ls_rsbtbl[bucket].lock);
-               if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
-                       r = list_first_entry(&ls->ls_rsbtbl[bucket].list,
-                                            struct dlm_rsb, res_hashchain);
+               if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[bucket].keep)) {
+                       next = rb_first(&ls->ls_rsbtbl[bucket].keep);
+                       r = rb_entry(next, struct dlm_rsb, res_hashnode);
                        dlm_hold_rsb(r);
                        ri->rsb = r;
                        ri->bucket = bucket;
index 7b84c1dbc82ebeaec0a42846537524c73d1db13f..83641574b0168f6753e340cac3962fa390ae621d 100644 (file)
@@ -290,7 +290,6 @@ int dlm_recover_directory(struct dlm_ls *ls)
 
  out_status:
        error = 0;
-       dlm_set_recover_status(ls, DLM_RS_DIR);
        log_debug(ls, "dlm_recover_directory %d entries", count);
  out_free:
        kfree(last_name);
index fe2860c024495d4ca270f75a453caf1c8148bf58..3a564d197e99f2e822f16a6730425575c661248a 100644 (file)
@@ -2,7 +2,7 @@
 *******************************************************************************
 **
 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
-**  Copyright (C) 2004-2010 Red Hat, Inc.  All rights reserved.
+**  Copyright (C) 2004-2011 Red Hat, Inc.  All rights reserved.
 **
 **  This copyrighted material is made available to anyone wishing to use,
 **  modify, copy, or redistribute it subject to the terms and conditions
@@ -103,8 +103,8 @@ struct dlm_dirtable {
 };
 
 struct dlm_rsbtable {
-       struct list_head        list;
-       struct list_head        toss;
+       struct rb_root          keep;
+       struct rb_root          toss;
        spinlock_t              lock;
 };
 
@@ -117,6 +117,10 @@ struct dlm_member {
        struct list_head        list;
        int                     nodeid;
        int                     weight;
+       int                     slot;
+       int                     slot_prev;
+       int                     comm_seq;
+       uint32_t                generation;
 };
 
 /*
@@ -125,10 +129,8 @@ struct dlm_member {
 
 struct dlm_recover {
        struct list_head        list;
-       int                     *nodeids;   /* nodeids of all members */
-       int                     node_count;
-       int                     *new;       /* nodeids of new members */
-       int                     new_count;
+       struct dlm_config_node  *nodes;
+       int                     nodes_count;
        uint64_t                seq;
 };
 
@@ -285,7 +287,10 @@ struct dlm_rsb {
        unsigned long           res_toss_time;
        uint32_t                res_first_lkid;
        struct list_head        res_lookup;     /* lkbs waiting on first */
-       struct list_head        res_hashchain;  /* rsbtbl */
+       union {
+               struct list_head        res_hashchain;
+               struct rb_node          res_hashnode;   /* rsbtbl */
+       };
        struct list_head        res_grantqueue;
        struct list_head        res_convertqueue;
        struct list_head        res_waitqueue;
@@ -334,7 +339,9 @@ static inline int rsb_flag(struct dlm_rsb *r, enum rsb_flags flag)
 /* dlm_header is first element of all structs sent between nodes */
 
 #define DLM_HEADER_MAJOR       0x00030000
-#define DLM_HEADER_MINOR       0x00000000
+#define DLM_HEADER_MINOR       0x00000001
+
+#define DLM_HEADER_SLOTS       0x00000001
 
 #define DLM_MSG                        1
 #define DLM_RCOM               2
@@ -422,10 +429,34 @@ union dlm_packet {
        struct dlm_rcom         rcom;
 };
 
+#define DLM_RSF_NEED_SLOTS     0x00000001
+
+/* RCOM_STATUS data */
+struct rcom_status {
+       __le32                  rs_flags;
+       __le32                  rs_unused1;
+       __le64                  rs_unused2;
+};
+
+/* RCOM_STATUS_REPLY data */
 struct rcom_config {
        __le32                  rf_lvblen;
        __le32                  rf_lsflags;
-       __le64                  rf_unused;
+
+       /* DLM_HEADER_SLOTS adds: */
+       __le32                  rf_flags;
+       __le16                  rf_our_slot;
+       __le16                  rf_num_slots;
+       __le32                  rf_generation;
+       __le32                  rf_unused1;
+       __le64                  rf_unused2;
+};
+
+struct rcom_slot {
+       __le32                  ro_nodeid;
+       __le16                  ro_slot;
+       __le16                  ro_unused1;
+       __le64                  ro_unused2;
 };
 
 struct rcom_lock {
@@ -452,6 +483,7 @@ struct dlm_ls {
        struct list_head        ls_list;        /* list of lockspaces */
        dlm_lockspace_t         *ls_local_handle;
        uint32_t                ls_global_id;   /* global unique lockspace ID */
+       uint32_t                ls_generation;
        uint32_t                ls_exflags;
        int                     ls_lvblen;
        int                     ls_count;       /* refcount of processes in
@@ -490,6 +522,11 @@ struct dlm_ls {
        int                     ls_total_weight;
        int                     *ls_node_array;
 
+       int                     ls_slot;
+       int                     ls_num_slots;
+       int                     ls_slots_size;
+       struct dlm_slot         *ls_slots;
+
        struct dlm_rsb          ls_stub_rsb;    /* for returning errors */
        struct dlm_lkb          ls_stub_lkb;    /* for returning errors */
        struct dlm_message      ls_stub_ms;     /* for faking a reply */
@@ -537,6 +574,9 @@ struct dlm_ls {
        struct list_head        ls_root_list;   /* root resources */
        struct rw_semaphore     ls_root_sem;    /* protect root_list */
 
+       const struct dlm_lockspace_ops *ls_ops;
+       void                    *ls_ops_arg;
+
        int                     ls_namelen;
        char                    ls_name[1];
 };
index 83b5e32514e17b32c0bc809413be60694ae019e5..d47183043c5942ba19100d9b0eaf67147aaddb31 100644 (file)
@@ -56,6 +56,7 @@
    L: receive_xxxx_reply()     <-  R: send_xxxx_reply()
 */
 #include <linux/types.h>
+#include <linux/rbtree.h>
 #include <linux/slab.h>
 #include "dlm_internal.h"
 #include <linux/dlm_device.h>
@@ -380,6 +381,8 @@ static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
 
        r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
        list_del(&r->res_hashchain);
+       /* Convert the empty list_head to a NULL rb_node for tree usage: */
+       memset(&r->res_hashnode, 0, sizeof(struct rb_node));
        ls->ls_new_rsb_count--;
        spin_unlock(&ls->ls_new_rsb_spin);
 
@@ -388,7 +391,6 @@ static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
        memcpy(r->res_name, name, len);
        mutex_init(&r->res_mutex);
 
-       INIT_LIST_HEAD(&r->res_hashchain);
        INIT_LIST_HEAD(&r->res_lookup);
        INIT_LIST_HEAD(&r->res_grantqueue);
        INIT_LIST_HEAD(&r->res_convertqueue);
@@ -400,14 +402,31 @@ static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
        return 0;
 }
 
-static int search_rsb_list(struct list_head *head, char *name, int len,
+static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
+{
+       char maxname[DLM_RESNAME_MAXLEN];
+
+       memset(maxname, 0, DLM_RESNAME_MAXLEN);
+       memcpy(maxname, name, nlen);
+       return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
+}
+
+static int search_rsb_tree(struct rb_root *tree, char *name, int len,
                           unsigned int flags, struct dlm_rsb **r_ret)
 {
+       struct rb_node *node = tree->rb_node;
        struct dlm_rsb *r;
        int error = 0;
-
-       list_for_each_entry(r, head, res_hashchain) {
-               if (len == r->res_length && !memcmp(name, r->res_name, len))
+       int rc;
+
+       while (node) {
+               r = rb_entry(node, struct dlm_rsb, res_hashnode);
+               rc = rsb_cmp(r, name, len);
+               if (rc < 0)
+                       node = node->rb_left;
+               else if (rc > 0)
+                       node = node->rb_right;
+               else
                        goto found;
        }
        *r_ret = NULL;
@@ -420,22 +439,54 @@ static int search_rsb_list(struct list_head *head, char *name, int len,
        return error;
 }
 
+static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
+{
+       struct rb_node **newn = &tree->rb_node;
+       struct rb_node *parent = NULL;
+       int rc;
+
+       while (*newn) {
+               struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
+                                              res_hashnode);
+
+               parent = *newn;
+               rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
+               if (rc < 0)
+                       newn = &parent->rb_left;
+               else if (rc > 0)
+                       newn = &parent->rb_right;
+               else {
+                       log_print("rsb_insert match");
+                       dlm_dump_rsb(rsb);
+                       dlm_dump_rsb(cur);
+                       return -EEXIST;
+               }
+       }
+
+       rb_link_node(&rsb->res_hashnode, parent, newn);
+       rb_insert_color(&rsb->res_hashnode, tree);
+       return 0;
+}
+
 static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b,
                       unsigned int flags, struct dlm_rsb **r_ret)
 {
        struct dlm_rsb *r;
        int error;
 
-       error = search_rsb_list(&ls->ls_rsbtbl[b].list, name, len, flags, &r);
+       error = search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, flags, &r);
        if (!error) {
                kref_get(&r->res_ref);
                goto out;
        }
-       error = search_rsb_list(&ls->ls_rsbtbl[b].toss, name, len, flags, &r);
+       error = search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, flags, &r);
        if (error)
                goto out;
 
-       list_move(&r->res_hashchain, &ls->ls_rsbtbl[b].list);
+       rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
+       error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
+       if (error)
+               return error;
 
        if (dlm_no_directory(ls))
                goto out;
@@ -527,8 +578,7 @@ static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
                        nodeid = 0;
                r->res_nodeid = nodeid;
        }
-       list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
-       error = 0;
+       error = rsb_insert(r, &ls->ls_rsbtbl[bucket].keep);
  out_unlock:
        spin_unlock(&ls->ls_rsbtbl[bucket].lock);
  out:
@@ -556,7 +606,8 @@ static void toss_rsb(struct kref *kref)
 
        DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
        kref_init(&r->res_ref);
-       list_move(&r->res_hashchain, &ls->ls_rsbtbl[r->res_bucket].toss);
+       rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
+       rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
        r->res_toss_time = jiffies;
        if (r->res_lvbptr) {
                dlm_free_lvb(r->res_lvbptr);
@@ -1082,19 +1133,19 @@ static void dir_remove(struct dlm_rsb *r)
                                     r->res_name, r->res_length);
 }
 
-/* FIXME: shouldn't this be able to exit as soon as one non-due rsb is
-   found since they are in order of newest to oldest? */
+/* FIXME: make this more efficient */
 
 static int shrink_bucket(struct dlm_ls *ls, int b)
 {
+       struct rb_node *n;
        struct dlm_rsb *r;
        int count = 0, found;
 
        for (;;) {
                found = 0;
                spin_lock(&ls->ls_rsbtbl[b].lock);
-               list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
-                                           res_hashchain) {
+               for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = rb_next(n)) {
+                       r = rb_entry(n, struct dlm_rsb, res_hashnode);
                        if (!time_after_eq(jiffies, r->res_toss_time +
                                           dlm_config.ci_toss_secs * HZ))
                                continue;
@@ -1108,7 +1159,7 @@ static int shrink_bucket(struct dlm_ls *ls, int b)
                }
 
                if (kref_put(&r->res_ref, kill_rsb)) {
-                       list_del(&r->res_hashchain);
+                       rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
                        spin_unlock(&ls->ls_rsbtbl[b].lock);
 
                        if (is_master(r))
@@ -4441,10 +4492,12 @@ int dlm_purge_locks(struct dlm_ls *ls)
 
 static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket)
 {
+       struct rb_node *n;
        struct dlm_rsb *r, *r_ret = NULL;
 
        spin_lock(&ls->ls_rsbtbl[bucket].lock);
-       list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) {
+       for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
+               r = rb_entry(n, struct dlm_rsb, res_hashnode);
                if (!rsb_flag(r, RSB_LOCKS_PURGED))
                        continue;
                hold_rsb(r);
index a1d8f1af144b92af587d9772d1276b625db508b5..a1ea25face828239a3434d6e036f7acffc9cf092 100644 (file)
@@ -2,7 +2,7 @@
 *******************************************************************************
 **
 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
-**  Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
+**  Copyright (C) 2004-2011 Red Hat, Inc.  All rights reserved.
 **
 **  This copyrighted material is made available to anyone wishing to use,
 **  modify, copy, or redistribute it subject to the terms and conditions
@@ -386,12 +386,15 @@ static void threads_stop(void)
        dlm_lowcomms_stop();
 }
 
-static int new_lockspace(const char *name, int namelen, void **lockspace,
-                        uint32_t flags, int lvblen)
+static int new_lockspace(const char *name, const char *cluster,
+                        uint32_t flags, int lvblen,
+                        const struct dlm_lockspace_ops *ops, void *ops_arg,
+                        int *ops_result, dlm_lockspace_t **lockspace)
 {
        struct dlm_ls *ls;
        int i, size, error;
        int do_unreg = 0;
+       int namelen = strlen(name);
 
        if (namelen > DLM_LOCKSPACE_LEN)
                return -EINVAL;
@@ -403,8 +406,24 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
                return -EINVAL;
 
        if (!dlm_user_daemon_available()) {
-               module_put(THIS_MODULE);
-               return -EUNATCH;
+               log_print("dlm user daemon not available");
+               error = -EUNATCH;
+               goto out;
+       }
+
+       if (ops && ops_result) {
+               if (!dlm_config.ci_recover_callbacks)
+                       *ops_result = -EOPNOTSUPP;
+               else
+                       *ops_result = 0;
+       }
+
+       if (dlm_config.ci_recover_callbacks && cluster &&
+           strncmp(cluster, dlm_config.ci_cluster_name, DLM_LOCKSPACE_LEN)) {
+               log_print("dlm cluster name %s mismatch %s",
+                         dlm_config.ci_cluster_name, cluster);
+               error = -EBADR;
+               goto out;
        }
 
        error = 0;
@@ -442,6 +461,11 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
        ls->ls_flags = 0;
        ls->ls_scan_time = jiffies;
 
+       if (ops && dlm_config.ci_recover_callbacks) {
+               ls->ls_ops = ops;
+               ls->ls_ops_arg = ops_arg;
+       }
+
        if (flags & DLM_LSFL_TIMEWARN)
                set_bit(LSFL_TIMEWARN, &ls->ls_flags);
 
@@ -457,8 +481,8 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
        if (!ls->ls_rsbtbl)
                goto out_lsfree;
        for (i = 0; i < size; i++) {
-               INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
-               INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
+               ls->ls_rsbtbl[i].keep.rb_node = NULL;
+               ls->ls_rsbtbl[i].toss.rb_node = NULL;
                spin_lock_init(&ls->ls_rsbtbl[i].lock);
        }
 
@@ -525,6 +549,11 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
        if (!ls->ls_recover_buf)
                goto out_dirfree;
 
+       ls->ls_slot = 0;
+       ls->ls_num_slots = 0;
+       ls->ls_slots_size = 0;
+       ls->ls_slots = NULL;
+
        INIT_LIST_HEAD(&ls->ls_recover_list);
        spin_lock_init(&ls->ls_recover_list_lock);
        ls->ls_recover_list_count = 0;
@@ -614,8 +643,10 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
        return error;
 }
 
-int dlm_new_lockspace(const char *name, int namelen, void **lockspace,
-                     uint32_t flags, int lvblen)
+int dlm_new_lockspace(const char *name, const char *cluster,
+                     uint32_t flags, int lvblen,
+                     const struct dlm_lockspace_ops *ops, void *ops_arg,
+                     int *ops_result, dlm_lockspace_t **lockspace)
 {
        int error = 0;
 
@@ -625,7 +656,8 @@ int dlm_new_lockspace(const char *name, int namelen, void **lockspace,
        if (error)
                goto out;
 
-       error = new_lockspace(name, namelen, lockspace, flags, lvblen);
+       error = new_lockspace(name, cluster, flags, lvblen, ops, ops_arg,
+                             ops_result, lockspace);
        if (!error)
                ls_count++;
        if (error > 0)
@@ -685,7 +717,7 @@ static int lockspace_busy(struct dlm_ls *ls, int force)
 static int release_lockspace(struct dlm_ls *ls, int force)
 {
        struct dlm_rsb *rsb;
-       struct list_head *head;
+       struct rb_node *n;
        int i, busy, rv;
 
        busy = lockspace_busy(ls, force);
@@ -746,20 +778,15 @@ static int release_lockspace(struct dlm_ls *ls, int force)
         */
 
        for (i = 0; i < ls->ls_rsbtbl_size; i++) {
-               head = &ls->ls_rsbtbl[i].list;
-               while (!list_empty(head)) {
-                       rsb = list_entry(head->next, struct dlm_rsb,
-                                        res_hashchain);
-
-                       list_del(&rsb->res_hashchain);
+               while ((n = rb_first(&ls->ls_rsbtbl[i].keep))) {
+                       rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
+                       rb_erase(n, &ls->ls_rsbtbl[i].keep);
                        dlm_free_rsb(rsb);
                }
 
-               head = &ls->ls_rsbtbl[i].toss;
-               while (!list_empty(head)) {
-                       rsb = list_entry(head->next, struct dlm_rsb,
-                                        res_hashchain);
-                       list_del(&rsb->res_hashchain);
+               while ((n = rb_first(&ls->ls_rsbtbl[i].toss))) {
+                       rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
+                       rb_erase(n, &ls->ls_rsbtbl[i].toss);
                        dlm_free_rsb(rsb);
                }
        }
index b12532e553f8e7947bd4c83435f22fff4724366b..862640a36d5cbba1762ad47317dffbd72eab0531 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
 *******************************************************************************
 **
-**  Copyright (C) 2005-2009 Red Hat, Inc.  All rights reserved.
+**  Copyright (C) 2005-2011 Red Hat, Inc.  All rights reserved.
 **
 **  This copyrighted material is made available to anyone wishing to use,
 **  modify, copy, or redistribute it subject to the terms and conditions
 #include "config.h"
 #include "lowcomms.h"
 
+int dlm_slots_version(struct dlm_header *h)
+{
+       if ((h->h_version & 0x0000FFFF) < DLM_HEADER_SLOTS)
+               return 0;
+       return 1;
+}
+
+void dlm_slot_save(struct dlm_ls *ls, struct dlm_rcom *rc,
+                  struct dlm_member *memb)
+{
+       struct rcom_config *rf = (struct rcom_config *)rc->rc_buf;
+
+       if (!dlm_slots_version(&rc->rc_header))
+               return;
+
+       memb->slot = le16_to_cpu(rf->rf_our_slot);
+       memb->generation = le32_to_cpu(rf->rf_generation);
+}
+
+void dlm_slots_copy_out(struct dlm_ls *ls, struct dlm_rcom *rc)
+{
+       struct dlm_slot *slot;
+       struct rcom_slot *ro;
+       int i;
+
+       ro = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config));
+
+       /* ls_slots array is sparse, but not rcom_slots */
+
+       for (i = 0; i < ls->ls_slots_size; i++) {
+               slot = &ls->ls_slots[i];
+               if (!slot->nodeid)
+                       continue;
+               ro->ro_nodeid = cpu_to_le32(slot->nodeid);
+               ro->ro_slot = cpu_to_le16(slot->slot);
+               ro++;
+       }
+}
+
+#define SLOT_DEBUG_LINE 128
+
+static void log_debug_slots(struct dlm_ls *ls, uint32_t gen, int num_slots,
+                           struct rcom_slot *ro0, struct dlm_slot *array,
+                           int array_size)
+{
+       char line[SLOT_DEBUG_LINE];
+       int len = SLOT_DEBUG_LINE - 1;
+       int pos = 0;
+       int ret, i;
+
+       if (!dlm_config.ci_log_debug)
+               return;
+
+       memset(line, 0, sizeof(line));
+
+       if (array) {
+               for (i = 0; i < array_size; i++) {
+                       if (!array[i].nodeid)
+                               continue;
+
+                       ret = snprintf(line + pos, len - pos, " %d:%d",
+                                      array[i].slot, array[i].nodeid);
+                       if (ret >= len - pos)
+                               break;
+                       pos += ret;
+               }
+       } else if (ro0) {
+               for (i = 0; i < num_slots; i++) {
+                       ret = snprintf(line + pos, len - pos, " %d:%d",
+                                      ro0[i].ro_slot, ro0[i].ro_nodeid);
+                       if (ret >= len - pos)
+                               break;
+                       pos += ret;
+               }
+       }
+
+       log_debug(ls, "generation %u slots %d%s", gen, num_slots, line);
+}
+
+int dlm_slots_copy_in(struct dlm_ls *ls)
+{
+       struct dlm_member *memb;
+       struct dlm_rcom *rc = ls->ls_recover_buf;
+       struct rcom_config *rf = (struct rcom_config *)rc->rc_buf;
+       struct rcom_slot *ro0, *ro;
+       int our_nodeid = dlm_our_nodeid();
+       int i, num_slots;
+       uint32_t gen;
+
+       if (!dlm_slots_version(&rc->rc_header))
+               return -1;
+
+       gen = le32_to_cpu(rf->rf_generation);
+       if (gen <= ls->ls_generation) {
+               log_error(ls, "dlm_slots_copy_in gen %u old %u",
+                         gen, ls->ls_generation);
+       }
+       ls->ls_generation = gen;
+
+       num_slots = le16_to_cpu(rf->rf_num_slots);
+       if (!num_slots)
+               return -1;
+
+       ro0 = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config));
+
+       for (i = 0, ro = ro0; i < num_slots; i++, ro++) {
+               ro->ro_nodeid = le32_to_cpu(ro->ro_nodeid);
+               ro->ro_slot = le16_to_cpu(ro->ro_slot);
+       }
+
+       log_debug_slots(ls, gen, num_slots, ro0, NULL, 0);
+
+       list_for_each_entry(memb, &ls->ls_nodes, list) {
+               for (i = 0, ro = ro0; i < num_slots; i++, ro++) {
+                       if (ro->ro_nodeid != memb->nodeid)
+                               continue;
+                       memb->slot = ro->ro_slot;
+                       memb->slot_prev = memb->slot;
+                       break;
+               }
+
+               if (memb->nodeid == our_nodeid) {
+                       if (ls->ls_slot && ls->ls_slot != memb->slot) {
+                               log_error(ls, "dlm_slots_copy_in our slot "
+                                         "changed %d %d", ls->ls_slot,
+                                         memb->slot);
+                               return -1;
+                       }
+
+                       if (!ls->ls_slot)
+                               ls->ls_slot = memb->slot;
+               }
+
+               if (!memb->slot) {
+                       log_error(ls, "dlm_slots_copy_in nodeid %d no slot",
+                                  memb->nodeid);
+                       return -1;
+               }
+       }
+
+       return 0;
+}
+
+/* for any nodes that do not support slots, we will not have set memb->slot
+   in wait_status_all(), so memb->slot will remain -1, and we will not
+   assign slots or set ls_num_slots here */
+
+int dlm_slots_assign(struct dlm_ls *ls, int *num_slots, int *slots_size,
+                    struct dlm_slot **slots_out, uint32_t *gen_out)
+{
+       struct dlm_member *memb;
+       struct dlm_slot *array;
+       int our_nodeid = dlm_our_nodeid();
+       int array_size, max_slots, i;
+       int need = 0;
+       int max = 0;
+       int num = 0;
+       uint32_t gen = 0;
+
+       /* our own memb struct will have slot -1 gen 0 */
+
+       list_for_each_entry(memb, &ls->ls_nodes, list) {
+               if (memb->nodeid == our_nodeid) {
+                       memb->slot = ls->ls_slot;
+                       memb->generation = ls->ls_generation;
+                       break;
+               }
+       }
+
+       list_for_each_entry(memb, &ls->ls_nodes, list) {
+               if (memb->generation > gen)
+                       gen = memb->generation;
+
+               /* node doesn't support slots */
+
+               if (memb->slot == -1)
+                       return -1;
+
+               /* node needs a slot assigned */
+
+               if (!memb->slot)
+                       need++;
+
+               /* node has a slot assigned */
+
+               num++;
+
+               if (!max || max < memb->slot)
+                       max = memb->slot;
+
+               /* sanity check, once slot is assigned it shouldn't change */
+
+               if (memb->slot_prev && memb->slot && memb->slot_prev != memb->slot) {
+                       log_error(ls, "nodeid %d slot changed %d %d",
+                                 memb->nodeid, memb->slot_prev, memb->slot);
+                       return -1;
+               }
+               memb->slot_prev = memb->slot;
+       }
+
+       array_size = max + need;
+
+       array = kzalloc(array_size * sizeof(struct dlm_slot), GFP_NOFS);
+       if (!array)
+               return -ENOMEM;
+
+       num = 0;
+
+       /* fill in slots (offsets) that are used */
+
+       list_for_each_entry(memb, &ls->ls_nodes, list) {
+               if (!memb->slot)
+                       continue;
+
+               if (memb->slot > array_size) {
+                       log_error(ls, "invalid slot number %d", memb->slot);
+                       kfree(array);
+                       return -1;
+               }
+
+               array[memb->slot - 1].nodeid = memb->nodeid;
+               array[memb->slot - 1].slot = memb->slot;
+               num++;
+       }
+
+       /* assign new slots from unused offsets */
+
+       list_for_each_entry(memb, &ls->ls_nodes, list) {
+               if (memb->slot)
+                       continue;
+
+               for (i = 0; i < array_size; i++) {
+                       if (array[i].nodeid)
+                               continue;
+
+                       memb->slot = i + 1;
+                       memb->slot_prev = memb->slot;
+                       array[i].nodeid = memb->nodeid;
+                       array[i].slot = memb->slot;
+                       num++;
+
+                       if (!ls->ls_slot && memb->nodeid == our_nodeid)
+                               ls->ls_slot = memb->slot;
+                       break;
+               }
+
+               if (!memb->slot) {
+                       log_error(ls, "no free slot found");
+                       kfree(array);
+                       return -1;
+               }
+       }
+
+       gen++;
+
+       log_debug_slots(ls, gen, num, NULL, array, array_size);
+
+       max_slots = (dlm_config.ci_buffer_size - sizeof(struct dlm_rcom) -
+                    sizeof(struct rcom_config)) / sizeof(struct rcom_slot);
+
+       if (num > max_slots) {
+               log_error(ls, "num_slots %d exceeds max_slots %d",
+                         num, max_slots);
+               kfree(array);
+               return -1;
+       }
+
+       *gen_out = gen;
+       *slots_out = array;
+       *slots_size = array_size;
+       *num_slots = num;
+       return 0;
+}
+
 static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new)
 {
        struct dlm_member *memb = NULL;
@@ -43,59 +317,51 @@ static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new)
        }
 }
 
-static int dlm_add_member(struct dlm_ls *ls, int nodeid)
+static int dlm_add_member(struct dlm_ls *ls, struct dlm_config_node *node)
 {
        struct dlm_member *memb;
-       int w, error;
+       int error;
 
        memb = kzalloc(sizeof(struct dlm_member), GFP_NOFS);
        if (!memb)
                return -ENOMEM;
 
-       w = dlm_node_weight(ls->ls_name, nodeid);
-       if (w < 0) {
-               kfree(memb);
-               return w;
-       }
-
-       error = dlm_lowcomms_connect_node(nodeid);
+       error = dlm_lowcomms_connect_node(node->nodeid);
        if (error < 0) {
                kfree(memb);
                return error;
        }
 
-       memb->nodeid = nodeid;
-       memb->weight = w;
+       memb->nodeid = node->nodeid;
+       memb->weight = node->weight;
+       memb->comm_seq = node->comm_seq;
        add_ordered_member(ls, memb);
        ls->ls_num_nodes++;
        return 0;
 }
 
-static void dlm_remove_member(struct dlm_ls *ls, struct dlm_member *memb)
-{
-       list_move(&memb->list, &ls->ls_nodes_gone);
-       ls->ls_num_nodes--;
-}
-
-int dlm_is_member(struct dlm_ls *ls, int nodeid)
+static struct dlm_member *find_memb(struct list_head *head, int nodeid)
 {
        struct dlm_member *memb;
 
-       list_for_each_entry(memb, &ls->ls_nodes, list) {
+       list_for_each_entry(memb, head, list) {
                if (memb->nodeid == nodeid)
-                       return 1;
+                       return memb;
        }
+       return NULL;
+}
+
+int dlm_is_member(struct dlm_ls *ls, int nodeid)
+{
+       if (find_memb(&ls->ls_nodes, nodeid))
+               return 1;
        return 0;
 }
 
 int dlm_is_removed(struct dlm_ls *ls, int nodeid)
 {
-       struct dlm_member *memb;
-
-       list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
-               if (memb->nodeid == nodeid)
-                       return 1;
-       }
+       if (find_memb(&ls->ls_nodes_gone, nodeid))
+               return 1;
        return 0;
 }
 
@@ -176,7 +442,7 @@ static int ping_members(struct dlm_ls *ls)
                error = dlm_recovery_stopped(ls);
                if (error)
                        break;
-               error = dlm_rcom_status(ls, memb->nodeid);
+               error = dlm_rcom_status(ls, memb->nodeid, 0);
                if (error)
                        break;
        }
@@ -186,10 +452,88 @@ static int ping_members(struct dlm_ls *ls)
        return error;
 }
 
+static void dlm_lsop_recover_prep(struct dlm_ls *ls)
+{
+       if (!ls->ls_ops || !ls->ls_ops->recover_prep)
+               return;
+       ls->ls_ops->recover_prep(ls->ls_ops_arg);
+}
+
+static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb)
+{
+       struct dlm_slot slot;
+       uint32_t seq;
+       int error;
+
+       if (!ls->ls_ops || !ls->ls_ops->recover_slot)
+               return;
+
+       /* if there is no comms connection with this node
+          or the present comms connection is newer
+          than the one when this member was added, then
+          we consider the node to have failed (versus
+          being removed due to dlm_release_lockspace) */
+
+       error = dlm_comm_seq(memb->nodeid, &seq);
+
+       if (!error && seq == memb->comm_seq)
+               return;
+
+       slot.nodeid = memb->nodeid;
+       slot.slot = memb->slot;
+
+       ls->ls_ops->recover_slot(ls->ls_ops_arg, &slot);
+}
+
+void dlm_lsop_recover_done(struct dlm_ls *ls)
+{
+       struct dlm_member *memb;
+       struct dlm_slot *slots;
+       int i, num;
+
+       if (!ls->ls_ops || !ls->ls_ops->recover_done)
+               return;
+
+       num = ls->ls_num_nodes;
+
+       slots = kzalloc(num * sizeof(struct dlm_slot), GFP_KERNEL);
+       if (!slots)
+               return;
+
+       i = 0;
+       list_for_each_entry(memb, &ls->ls_nodes, list) {
+               if (i == num) {
+                       log_error(ls, "dlm_lsop_recover_done bad num %d", num);
+                       goto out;
+               }
+               slots[i].nodeid = memb->nodeid;
+               slots[i].slot = memb->slot;
+               i++;
+       }
+
+       ls->ls_ops->recover_done(ls->ls_ops_arg, slots, num,
+                                ls->ls_slot, ls->ls_generation);
+ out:
+       kfree(slots);
+}
+
+static struct dlm_config_node *find_config_node(struct dlm_recover *rv,
+                                               int nodeid)
+{
+       int i;
+
+       for (i = 0; i < rv->nodes_count; i++) {
+               if (rv->nodes[i].nodeid == nodeid)
+                       return &rv->nodes[i];
+       }
+       return NULL;
+}
+
 int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
 {
        struct dlm_member *memb, *safe;
-       int i, error, found, pos = 0, neg = 0, low = -1;
+       struct dlm_config_node *node;
+       int i, error, neg = 0, low = -1;
 
        /* previously removed members that we've not finished removing need to
           count as a negative change so the "neg" recovery steps will happen */
@@ -202,46 +546,32 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
        /* move departed members from ls_nodes to ls_nodes_gone */
 
        list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
-               found = 0;
-               for (i = 0; i < rv->node_count; i++) {
-                       if (memb->nodeid == rv->nodeids[i]) {
-                               found = 1;
-                               break;
-                       }
-               }
+               node = find_config_node(rv, memb->nodeid);
+               if (node && !node->new)
+                       continue;
 
-               if (!found) {
-                       neg++;
-                       dlm_remove_member(ls, memb);
+               if (!node) {
                        log_debug(ls, "remove member %d", memb->nodeid);
+               } else {
+                       /* removed and re-added */
+                       log_debug(ls, "remove member %d comm_seq %u %u",
+                                 memb->nodeid, memb->comm_seq, node->comm_seq);
                }
-       }
-
-       /* Add an entry to ls_nodes_gone for members that were removed and
-          then added again, so that previous state for these nodes will be
-          cleared during recovery. */
-
-       for (i = 0; i < rv->new_count; i++) {
-               if (!dlm_is_member(ls, rv->new[i]))
-                       continue;
-               log_debug(ls, "new nodeid %d is a re-added member", rv->new[i]);
 
-               memb = kzalloc(sizeof(struct dlm_member), GFP_NOFS);
-               if (!memb)
-                       return -ENOMEM;
-               memb->nodeid = rv->new[i];
-               list_add_tail(&memb->list, &ls->ls_nodes_gone);
                neg++;
+               list_move(&memb->list, &ls->ls_nodes_gone);
+               ls->ls_num_nodes--;
+               dlm_lsop_recover_slot(ls, memb);
        }
 
        /* add new members to ls_nodes */
 
-       for (i = 0; i < rv->node_count; i++) {
-               if (dlm_is_member(ls, rv->nodeids[i]))
+       for (i = 0; i < rv->nodes_count; i++) {
+               node = &rv->nodes[i];
+               if (dlm_is_member(ls, node->nodeid))
                        continue;
-               dlm_add_member(ls, rv->nodeids[i]);
-               pos++;
-               log_debug(ls, "add member %d", rv->nodeids[i]);
+               dlm_add_member(ls, node);
+               log_debug(ls, "add member %d", node->nodeid);
        }
 
        list_for_each_entry(memb, &ls->ls_nodes, list) {
@@ -251,7 +581,6 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
        ls->ls_low_nodeid = low;
 
        make_member_array(ls);
-       dlm_set_recover_status(ls, DLM_RS_NODES);
        *neg_out = neg;
 
        error = ping_members(ls);
@@ -261,12 +590,8 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
                ls->ls_members_result = error;
                complete(&ls->ls_members_done);
        }
-       if (error)
-               goto out;
 
-       error = dlm_recover_members_wait(ls);
- out:
-       log_debug(ls, "total members %d error %d", ls->ls_num_nodes, error);
+       log_debug(ls, "dlm_recover_members %d nodes", ls->ls_num_nodes);
        return error;
 }
 
@@ -327,26 +652,35 @@ int dlm_ls_stop(struct dlm_ls *ls)
         */
 
        dlm_recoverd_suspend(ls);
+
+       spin_lock(&ls->ls_recover_lock);
+       kfree(ls->ls_slots);
+       ls->ls_slots = NULL;
+       ls->ls_num_slots = 0;
+       ls->ls_slots_size = 0;
        ls->ls_recover_status = 0;
+       spin_unlock(&ls->ls_recover_lock);
+
        dlm_recoverd_resume(ls);
 
        if (!ls->ls_recover_begin)
                ls->ls_recover_begin = jiffies;
+
+       dlm_lsop_recover_prep(ls);
        return 0;
 }
 
 int dlm_ls_start(struct dlm_ls *ls)
 {
        struct dlm_recover *rv = NULL, *rv_old;
-       int *ids = NULL, *new = NULL;
-       int error, ids_count = 0, new_count = 0;
+       struct dlm_config_node *nodes;
+       int error, count;
 
        rv = kzalloc(sizeof(struct dlm_recover), GFP_NOFS);
        if (!rv)
                return -ENOMEM;
 
-       error = dlm_nodeid_list(ls->ls_name, &ids, &ids_count,
-                               &new, &new_count);
+       error = dlm_config_nodes(ls->ls_name, &nodes, &count);
        if (error < 0)
                goto fail;
 
@@ -361,10 +695,8 @@ int dlm_ls_start(struct dlm_ls *ls)
                goto fail;
        }
 
-       rv->nodeids = ids;
-       rv->node_count = ids_count;
-       rv->new = new;
-       rv->new_count = new_count;
+       rv->nodes = nodes;
+       rv->nodes_count = count;
        rv->seq = ++ls->ls_recover_seq;
        rv_old = ls->ls_recover_args;
        ls->ls_recover_args = rv;
@@ -372,9 +704,8 @@ int dlm_ls_start(struct dlm_ls *ls)
 
        if (rv_old) {
                log_error(ls, "unused recovery %llx %d",
-                         (unsigned long long)rv_old->seq, rv_old->node_count);
-               kfree(rv_old->nodeids);
-               kfree(rv_old->new);
+                         (unsigned long long)rv_old->seq, rv_old->nodes_count);
+               kfree(rv_old->nodes);
                kfree(rv_old);
        }
 
@@ -383,8 +714,7 @@ int dlm_ls_start(struct dlm_ls *ls)
 
  fail:
        kfree(rv);
-       kfree(ids);
-       kfree(new);
+       kfree(nodes);
        return error;
 }
 
index 7a26fca1e0b5386e6b5906655b57f6fd80e1fe3c..3deb70661c699c809e57e6ae588e03036ad62549 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
 *******************************************************************************
 **
-**  Copyright (C) 2005-2008 Red Hat, Inc.  All rights reserved.
+**  Copyright (C) 2005-2011 Red Hat, Inc.  All rights reserved.
 **
 **  This copyrighted material is made available to anyone wishing to use,
 **  modify, copy, or redistribute it subject to the terms and conditions
@@ -20,6 +20,14 @@ void dlm_clear_members_gone(struct dlm_ls *ls);
 int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv,int *neg_out);
 int dlm_is_removed(struct dlm_ls *ls, int nodeid);
 int dlm_is_member(struct dlm_ls *ls, int nodeid);
+int dlm_slots_version(struct dlm_header *h);
+void dlm_slot_save(struct dlm_ls *ls, struct dlm_rcom *rc,
+                  struct dlm_member *memb);
+void dlm_slots_copy_out(struct dlm_ls *ls, struct dlm_rcom *rc);
+int dlm_slots_copy_in(struct dlm_ls *ls);
+int dlm_slots_assign(struct dlm_ls *ls, int *num_slots, int *slots_size,
+                    struct dlm_slot **slots_out, uint32_t *gen_out);
+void dlm_lsop_recover_done(struct dlm_ls *ls);
 
 #endif                          /* __MEMBER_DOT_H__ */
 
index f10a50f24e8fd6fc1889babedbf68aee0990472e..ac5c616c969643addc81c8920f4da2cf439356e9 100644 (file)
@@ -23,6 +23,7 @@
 #include "memory.h"
 #include "lock.h"
 #include "util.h"
+#include "member.h"
 
 
 static int rcom_response(struct dlm_ls *ls)
@@ -72,20 +73,30 @@ static void send_rcom(struct dlm_ls *ls, struct dlm_mhandle *mh,
        dlm_lowcomms_commit_buffer(mh);
 }
 
+static void set_rcom_status(struct dlm_ls *ls, struct rcom_status *rs,
+                           uint32_t flags)
+{
+       rs->rs_flags = cpu_to_le32(flags);
+}
+
 /* When replying to a status request, a node also sends back its
    configuration values.  The requesting node then checks that the remote
    node is configured the same way as itself. */
 
-static void make_config(struct dlm_ls *ls, struct rcom_config *rf)
+static void set_rcom_config(struct dlm_ls *ls, struct rcom_config *rf,
+                           uint32_t num_slots)
 {
        rf->rf_lvblen = cpu_to_le32(ls->ls_lvblen);
        rf->rf_lsflags = cpu_to_le32(ls->ls_exflags);
+
+       rf->rf_our_slot = cpu_to_le16(ls->ls_slot);
+       rf->rf_num_slots = cpu_to_le16(num_slots);
+       rf->rf_generation =  cpu_to_le32(ls->ls_generation);
 }
 
-static int check_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
+static int check_rcom_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
 {
        struct rcom_config *rf = (struct rcom_config *) rc->rc_buf;
-       size_t conf_size = sizeof(struct dlm_rcom) + sizeof(struct rcom_config);
 
        if ((rc->rc_header.h_version & 0xFFFF0000) != DLM_HEADER_MAJOR) {
                log_error(ls, "version mismatch: %x nodeid %d: %x",
@@ -94,12 +105,6 @@ static int check_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
                return -EPROTO;
        }
 
-       if (rc->rc_header.h_length < conf_size) {
-               log_error(ls, "config too short: %d nodeid %d",
-                         rc->rc_header.h_length, nodeid);
-               return -EPROTO;
-       }
-
        if (le32_to_cpu(rf->rf_lvblen) != ls->ls_lvblen ||
            le32_to_cpu(rf->rf_lsflags) != ls->ls_exflags) {
                log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x",
@@ -127,7 +132,18 @@ static void disallow_sync_reply(struct dlm_ls *ls)
        spin_unlock(&ls->ls_rcom_spin);
 }
 
-int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
+/*
+ * low nodeid gathers one slot value at a time from each node.
+ * it sets need_slots=0, and saves rf_our_slot returned from each
+ * rcom_config.
+ *
+ * other nodes gather all slot values at once from the low nodeid.
+ * they set need_slots=1, and ignore the rf_our_slot returned from each
+ * rcom_config.  they use the rf_num_slots returned from the low
+ * node's rcom_config.
+ */
+
+int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags)
 {
        struct dlm_rcom *rc;
        struct dlm_mhandle *mh;
@@ -141,10 +157,13 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
                goto out;
        }
 
-       error = create_rcom(ls, nodeid, DLM_RCOM_STATUS, 0, &rc, &mh);
+       error = create_rcom(ls, nodeid, DLM_RCOM_STATUS,
+                           sizeof(struct rcom_status), &rc, &mh);
        if (error)
                goto out;
 
+       set_rcom_status(ls, (struct rcom_status *)rc->rc_buf, status_flags);
+
        allow_sync_reply(ls, &rc->rc_id);
        memset(ls->ls_recover_buf, 0, dlm_config.ci_buffer_size);
 
@@ -161,8 +180,11 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
                /* we pretend the remote lockspace exists with 0 status */
                log_debug(ls, "remote node %d not ready", nodeid);
                rc->rc_result = 0;
-       } else
-               error = check_config(ls, rc, nodeid);
+               error = 0;
+       } else {
+               error = check_rcom_config(ls, rc, nodeid);
+       }
+
        /* the caller looks at rc_result for the remote recovery status */
  out:
        return error;
@@ -172,17 +194,60 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
 {
        struct dlm_rcom *rc;
        struct dlm_mhandle *mh;
-       int error, nodeid = rc_in->rc_header.h_nodeid;
+       struct rcom_status *rs;
+       uint32_t status;
+       int nodeid = rc_in->rc_header.h_nodeid;
+       int len = sizeof(struct rcom_config);
+       int num_slots = 0;
+       int error;
+
+       if (!dlm_slots_version(&rc_in->rc_header)) {
+               status = dlm_recover_status(ls);
+               goto do_create;
+       }
+
+       rs = (struct rcom_status *)rc_in->rc_buf;
 
+       if (!(rs->rs_flags & DLM_RSF_NEED_SLOTS)) {
+               status = dlm_recover_status(ls);
+               goto do_create;
+       }
+
+       spin_lock(&ls->ls_recover_lock);
+       status = ls->ls_recover_status;
+       num_slots = ls->ls_num_slots;
+       spin_unlock(&ls->ls_recover_lock);
+       len += num_slots * sizeof(struct rcom_slot);
+
+ do_create:
        error = create_rcom(ls, nodeid, DLM_RCOM_STATUS_REPLY,
-                           sizeof(struct rcom_config), &rc, &mh);
+                           len, &rc, &mh);
        if (error)
                return;
+
        rc->rc_id = rc_in->rc_id;
        rc->rc_seq_reply = rc_in->rc_seq;
-       rc->rc_result = dlm_recover_status(ls);
-       make_config(ls, (struct rcom_config *) rc->rc_buf);
+       rc->rc_result = status;
+
+       set_rcom_config(ls, (struct rcom_config *)rc->rc_buf, num_slots);
+
+       if (!num_slots)
+               goto do_send;
+
+       spin_lock(&ls->ls_recover_lock);
+       if (ls->ls_num_slots != num_slots) {
+               spin_unlock(&ls->ls_recover_lock);
+               log_debug(ls, "receive_rcom_status num_slots %d to %d",
+                         num_slots, ls->ls_num_slots);
+               rc->rc_result = 0;
+               set_rcom_config(ls, (struct rcom_config *)rc->rc_buf, 0);
+               goto do_send;
+       }
+
+       dlm_slots_copy_out(ls, rc);
+       spin_unlock(&ls->ls_recover_lock);
 
+ do_send:
        send_rcom(ls, mh, rc);
 }
 
index b09abd29ba38cfc3ff64dd2666f3945054520c62..206723ab744dd1c30c687f3a82e50282f631e985 100644 (file)
@@ -14,7 +14,7 @@
 #ifndef __RCOM_DOT_H__
 #define __RCOM_DOT_H__
 
-int dlm_rcom_status(struct dlm_ls *ls, int nodeid);
+int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags);
 int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name,int last_len);
 int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid);
 int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
index 14638235f7b2935093e31e43a36a09435dc80c73..34d5adf1fce7d2022679d42e1e00673af0825dae 100644 (file)
@@ -85,14 +85,20 @@ uint32_t dlm_recover_status(struct dlm_ls *ls)
        return status;
 }
 
+static void _set_recover_status(struct dlm_ls *ls, uint32_t status)
+{
+       ls->ls_recover_status |= status;
+}
+
 void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
 {
        spin_lock(&ls->ls_recover_lock);
-       ls->ls_recover_status |= status;
+       _set_recover_status(ls, status);
        spin_unlock(&ls->ls_recover_lock);
 }
 
-static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status)
+static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status,
+                          int save_slots)
 {
        struct dlm_rcom *rc = ls->ls_recover_buf;
        struct dlm_member *memb;
@@ -106,10 +112,13 @@ static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status)
                                goto out;
                        }
 
-                       error = dlm_rcom_status(ls, memb->nodeid);
+                       error = dlm_rcom_status(ls, memb->nodeid, 0);
                        if (error)
                                goto out;
 
+                       if (save_slots)
+                               dlm_slot_save(ls, rc, memb);
+
                        if (rc->rc_result & wait_status)
                                break;
                        if (delay < 1000)
@@ -121,7 +130,8 @@ static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status)
        return error;
 }
 
-static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status)
+static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status,
+                          uint32_t status_flags)
 {
        struct dlm_rcom *rc = ls->ls_recover_buf;
        int error = 0, delay = 0, nodeid = ls->ls_low_nodeid;
@@ -132,7 +142,7 @@ static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status)
                        goto out;
                }
 
-               error = dlm_rcom_status(ls, nodeid);
+               error = dlm_rcom_status(ls, nodeid, status_flags);
                if (error)
                        break;
 
@@ -152,18 +162,56 @@ static int wait_status(struct dlm_ls *ls, uint32_t status)
        int error;
 
        if (ls->ls_low_nodeid == dlm_our_nodeid()) {
-               error = wait_status_all(ls, status);
+               error = wait_status_all(ls, status, 0);
                if (!error)
                        dlm_set_recover_status(ls, status_all);
        } else
-               error = wait_status_low(ls, status_all);
+               error = wait_status_low(ls, status_all, 0);
 
        return error;
 }
 
 int dlm_recover_members_wait(struct dlm_ls *ls)
 {
-       return wait_status(ls, DLM_RS_NODES);
+       struct dlm_member *memb;
+       struct dlm_slot *slots;
+       int num_slots, slots_size;
+       int error, rv;
+       uint32_t gen;
+
+       list_for_each_entry(memb, &ls->ls_nodes, list) {
+               memb->slot = -1;
+               memb->generation = 0;
+       }
+
+       if (ls->ls_low_nodeid == dlm_our_nodeid()) {
+               error = wait_status_all(ls, DLM_RS_NODES, 1);
+               if (error)
+                       goto out;
+
+               /* slots array is sparse, slots_size may be > num_slots */
+
+               rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen);
+               if (!rv) {
+                       spin_lock(&ls->ls_recover_lock);
+                       _set_recover_status(ls, DLM_RS_NODES_ALL);
+                       ls->ls_num_slots = num_slots;
+                       ls->ls_slots_size = slots_size;
+                       ls->ls_slots = slots;
+                       ls->ls_generation = gen;
+                       spin_unlock(&ls->ls_recover_lock);
+               } else {
+                       dlm_set_recover_status(ls, DLM_RS_NODES_ALL);
+               }
+       } else {
+               error = wait_status_low(ls, DLM_RS_NODES_ALL, DLM_RSF_NEED_SLOTS);
+               if (error)
+                       goto out;
+
+               dlm_slots_copy_in(ls);
+       }
+ out:
+       return error;
 }
 
 int dlm_recover_directory_wait(struct dlm_ls *ls)
@@ -542,8 +590,6 @@ int dlm_recover_locks(struct dlm_ls *ls)
  out:
        if (error)
                recover_list_clear(ls);
-       else
-               dlm_set_recover_status(ls, DLM_RS_LOCKS);
        return error;
 }
 
@@ -715,6 +761,7 @@ void dlm_recover_rsbs(struct dlm_ls *ls)
 
 int dlm_create_root_list(struct dlm_ls *ls)
 {
+       struct rb_node *n;
        struct dlm_rsb *r;
        int i, error = 0;
 
@@ -727,7 +774,8 @@ int dlm_create_root_list(struct dlm_ls *ls)
 
        for (i = 0; i < ls->ls_rsbtbl_size; i++) {
                spin_lock(&ls->ls_rsbtbl[i].lock);
-               list_for_each_entry(r, &ls->ls_rsbtbl[i].list, res_hashchain) {
+               for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
+                       r = rb_entry(n, struct dlm_rsb, res_hashnode);
                        list_add(&r->res_root_list, &ls->ls_root_list);
                        dlm_hold_rsb(r);
                }
@@ -741,7 +789,8 @@ int dlm_create_root_list(struct dlm_ls *ls)
                        continue;
                }
 
-               list_for_each_entry(r, &ls->ls_rsbtbl[i].toss, res_hashchain) {
+               for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = rb_next(n)) {
+                       r = rb_entry(n, struct dlm_rsb, res_hashnode);
                        list_add(&r->res_root_list, &ls->ls_root_list);
                        dlm_hold_rsb(r);
                }
@@ -771,16 +820,18 @@ void dlm_release_root_list(struct dlm_ls *ls)
 
 void dlm_clear_toss_list(struct dlm_ls *ls)
 {
-       struct dlm_rsb *r, *safe;
+       struct rb_node *n, *next;
+       struct dlm_rsb *rsb;
        int i;
 
        for (i = 0; i < ls->ls_rsbtbl_size; i++) {
                spin_lock(&ls->ls_rsbtbl[i].lock);
-               list_for_each_entry_safe(r, safe, &ls->ls_rsbtbl[i].toss,
-                                        res_hashchain) {
-                       if (dlm_no_directory(ls) || !is_master(r)) {
-                               list_del(&r->res_hashchain);
-                               dlm_free_rsb(r);
+               for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) {
+                       next = rb_next(n);;
+                       rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
+                       if (dlm_no_directory(ls) || !is_master(rsb)) {
+                               rb_erase(n, &ls->ls_rsbtbl[i].toss);
+                               dlm_free_rsb(rsb);
                        }
                }
                spin_unlock(&ls->ls_rsbtbl[i].lock);
index 774da3cf92c6b095d99538f971daa21577897750..3780caf7ae0c239776951724c8d951dca5aef5dd 100644 (file)
@@ -2,7 +2,7 @@
 *******************************************************************************
 **
 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
-**  Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
+**  Copyright (C) 2004-2011 Red Hat, Inc.  All rights reserved.
 **
 **  This copyrighted material is made available to anyone wishing to use,
 **  modify, copy, or redistribute it subject to the terms and conditions
@@ -54,7 +54,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
        unsigned long start;
        int error, neg = 0;
 
-       log_debug(ls, "recover %llx", (unsigned long long)rv->seq);
+       log_debug(ls, "dlm_recover %llx", (unsigned long long)rv->seq);
 
        mutex_lock(&ls->ls_recoverd_active);
 
@@ -76,14 +76,22 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
 
        /*
         * Add or remove nodes from the lockspace's ls_nodes list.
-        * Also waits for all nodes to complete dlm_recover_members.
         */
 
        error = dlm_recover_members(ls, rv, &neg);
        if (error) {
-               log_debug(ls, "recover_members failed %d", error);
+               log_debug(ls, "dlm_recover_members error %d", error);
                goto fail;
        }
+
+       dlm_set_recover_status(ls, DLM_RS_NODES);
+
+       error = dlm_recover_members_wait(ls);
+       if (error) {
+               log_debug(ls, "dlm_recover_members_wait error %d", error);
+               goto fail;
+       }
+
        start = jiffies;
 
        /*
@@ -93,17 +101,15 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
 
        error = dlm_recover_directory(ls);
        if (error) {
-               log_debug(ls, "recover_directory failed %d", error);
+               log_debug(ls, "dlm_recover_directory error %d", error);
                goto fail;
        }
 
-       /*
-        * Wait for all nodes to complete directory rebuild.
-        */
+       dlm_set_recover_status(ls, DLM_RS_DIR);
 
        error = dlm_recover_directory_wait(ls);
        if (error) {
-               log_debug(ls, "recover_directory_wait failed %d", error);
+               log_debug(ls, "dlm_recover_directory_wait error %d", error);
                goto fail;
        }
 
@@ -133,7 +139,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
 
                error = dlm_recover_masters(ls);
                if (error) {
-                       log_debug(ls, "recover_masters failed %d", error);
+                       log_debug(ls, "dlm_recover_masters error %d", error);
                        goto fail;
                }
 
@@ -143,13 +149,15 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
 
                error = dlm_recover_locks(ls);
                if (error) {
-                       log_debug(ls, "recover_locks failed %d", error);
+                       log_debug(ls, "dlm_recover_locks error %d", error);
                        goto fail;
                }
 
+               dlm_set_recover_status(ls, DLM_RS_LOCKS);
+
                error = dlm_recover_locks_wait(ls);
                if (error) {
-                       log_debug(ls, "recover_locks_wait failed %d", error);
+                       log_debug(ls, "dlm_recover_locks_wait error %d", error);
                        goto fail;
                }
 
@@ -170,7 +178,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
 
                error = dlm_recover_locks_wait(ls);
                if (error) {
-                       log_debug(ls, "recover_locks_wait failed %d", error);
+                       log_debug(ls, "dlm_recover_locks_wait error %d", error);
                        goto fail;
                }
        }
@@ -186,9 +194,10 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
        dlm_purge_requestqueue(ls);
 
        dlm_set_recover_status(ls, DLM_RS_DONE);
+
        error = dlm_recover_done_wait(ls);
        if (error) {
-               log_debug(ls, "recover_done_wait failed %d", error);
+               log_debug(ls, "dlm_recover_done_wait error %d", error);
                goto fail;
        }
 
@@ -200,34 +209,35 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
 
        error = enable_locking(ls, rv->seq);
        if (error) {
-               log_debug(ls, "enable_locking failed %d", error);
+               log_debug(ls, "enable_locking error %d", error);
                goto fail;
        }
 
        error = dlm_process_requestqueue(ls);
        if (error) {
-               log_debug(ls, "process_requestqueue failed %d", error);
+               log_debug(ls, "dlm_process_requestqueue error %d", error);
                goto fail;
        }
 
        error = dlm_recover_waiters_post(ls);
        if (error) {
-               log_debug(ls, "recover_waiters_post failed %d", error);
+               log_debug(ls, "dlm_recover_waiters_post error %d", error);
                goto fail;
        }
 
        dlm_grant_after_purge(ls);
 
-       log_debug(ls, "recover %llx done: %u ms",
-                 (unsigned long long)rv->seq,
+       log_debug(ls, "dlm_recover %llx generation %u done: %u ms",
+                 (unsigned long long)rv->seq, ls->ls_generation,
                  jiffies_to_msecs(jiffies - start));
        mutex_unlock(&ls->ls_recoverd_active);
 
+       dlm_lsop_recover_done(ls);
        return 0;
 
  fail:
        dlm_release_root_list(ls);
-       log_debug(ls, "recover %llx error %d",
+       log_debug(ls, "dlm_recover %llx error %d",
                  (unsigned long long)rv->seq, error);
        mutex_unlock(&ls->ls_recoverd_active);
        return error;
@@ -250,8 +260,7 @@ static void do_ls_recovery(struct dlm_ls *ls)
 
        if (rv) {
                ls_recover(ls, rv);
-               kfree(rv->nodeids);
-               kfree(rv->new);
+               kfree(rv->nodes);
                kfree(rv);
        }
 }
index d8ea607564034a87bbc5e9614f5fdff6db347d4b..eb4ed9ba3098198e8d1db357db48c8f6e9e2ff34 100644 (file)
@@ -392,8 +392,9 @@ static int device_create_lockspace(struct dlm_lspace_params *params)
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       error = dlm_new_lockspace(params->name, strlen(params->name),
-                                 &lockspace, params->flags, DLM_USER_LVB_LEN);
+       error = dlm_new_lockspace(params->name, NULL, params->flags,
+                                 DLM_USER_LVB_LEN, NULL, NULL, NULL,
+                                 &lockspace);
        if (error)
                return error;
 
index 3f64b9f26e7df51f24254f2885d67c1f54648923..aeb135c7ff5c0c1a6f1a1dfe3b5f2018dfbd6731 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -59,6 +59,8 @@
 #include <asm/uaccess.h>
 #include <asm/mmu_context.h>
 #include <asm/tlb.h>
+
+#include <trace/events/task.h>
 #include "internal.h"
 
 int core_uses_pid;
@@ -1054,6 +1056,8 @@ void set_task_comm(struct task_struct *tsk, char *buf)
 {
        task_lock(tsk);
 
+       trace_task_rename(tsk, buf);
+
        /*
         * Threads may access current->comm without holding
         * the task lock, so write the string carefully.
index 12ccacda44e0288e13247e3e79ebd414287eb548..f9e2cd8cf711d2f43a74f5a5f4cc830604cd82d9 100644 (file)
@@ -23,6 +23,8 @@
 
 #include <trace/events/ext4.h>
 
+static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
+                                           ext4_group_t block_group);
 /*
  * balloc.c contains the blocks allocation and deallocation routines
  */
@@ -668,7 +670,7 @@ unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
  * This function returns the number of file system metadata clusters at
  * the beginning of a block group, including the reserved gdt blocks.
  */
-unsigned ext4_num_base_meta_clusters(struct super_block *sb,
+static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
                                     ext4_group_t block_group)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
index 1554b15f91bce81f0a7b43e13bba1c738361a668..513004fc3d840ee03586a4fedcdb133d8031c642 100644 (file)
@@ -511,6 +511,14 @@ struct ext4_new_group_data {
        __u32 free_blocks_count;
 };
 
+/* Indexes used to index group tables in ext4_new_group_data */
+enum {
+       BLOCK_BITMAP = 0,       /* block bitmap */
+       INODE_BITMAP,           /* inode bitmap */
+       INODE_TABLE,            /* inode tables */
+       GROUP_TABLE_COUNT,
+};
+
 /*
  * Flags used by ext4_map_blocks()
  */
@@ -575,6 +583,7 @@ struct ext4_new_group_data {
  /* note ioctl 11 reserved for filesystem-independent FIEMAP ioctl */
 #define EXT4_IOC_ALLOC_DA_BLKS         _IO('f', 12)
 #define EXT4_IOC_MOVE_EXT              _IOWR('f', 15, struct move_extent)
+#define EXT4_IOC_RESIZE_FS             _IOW('f', 16, __u64)
 
 #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
 /*
@@ -957,12 +966,13 @@ struct ext4_inode_info {
 #define test_opt2(sb, opt)             (EXT4_SB(sb)->s_mount_opt2 & \
                                         EXT4_MOUNT2_##opt)
 
-#define ext4_set_bit                   __test_and_set_bit_le
+#define ext4_test_and_set_bit          __test_and_set_bit_le
+#define ext4_set_bit                   __set_bit_le
 #define ext4_set_bit_atomic            ext2_set_bit_atomic
-#define ext4_clear_bit                 __test_and_clear_bit_le
+#define ext4_test_and_clear_bit                __test_and_clear_bit_le
+#define ext4_clear_bit                 __clear_bit_le
 #define ext4_clear_bit_atomic          ext2_clear_bit_atomic
 #define ext4_test_bit                  test_bit_le
-#define ext4_find_first_zero_bit       find_first_zero_bit_le
 #define ext4_find_next_zero_bit                find_next_zero_bit_le
 #define ext4_find_next_bit             find_next_bit_le
 
@@ -1397,6 +1407,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
 #define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE     0x0040
 #define EXT4_FEATURE_RO_COMPAT_QUOTA           0x0100
 #define EXT4_FEATURE_RO_COMPAT_BIGALLOC                0x0200
+#define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM   0x0400
 
 #define EXT4_FEATURE_INCOMPAT_COMPRESSION      0x0001
 #define EXT4_FEATURE_INCOMPAT_FILETYPE         0x0002
@@ -1409,6 +1420,8 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
 #define EXT4_FEATURE_INCOMPAT_FLEX_BG          0x0200
 #define EXT4_FEATURE_INCOMPAT_EA_INODE         0x0400 /* EA in inode */
 #define EXT4_FEATURE_INCOMPAT_DIRDATA          0x1000 /* data in dirent */
+#define EXT4_FEATURE_INCOMPAT_INLINEDATA       0x2000 /* data in inode */
+#define EXT4_FEATURE_INCOMPAT_LARGEDIR         0x4000 /* >2GB or 3-lvl htree */
 
 #define EXT2_FEATURE_COMPAT_SUPP       EXT4_FEATURE_COMPAT_EXT_ATTR
 #define EXT2_FEATURE_INCOMPAT_SUPP     (EXT4_FEATURE_INCOMPAT_FILETYPE| \
@@ -1790,8 +1803,6 @@ extern void ext4_init_block_bitmap(struct super_block *sb,
 extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
                                              ext4_group_t block_group,
                                              struct ext4_group_desc *gdp);
-extern unsigned ext4_num_base_meta_clusters(struct super_block *sb,
-                                           ext4_group_t block_group);
 extern unsigned ext4_num_overhead_clusters(struct super_block *sb,
                                           ext4_group_t block_group,
                                           struct ext4_group_desc *gdp);
@@ -1880,16 +1891,9 @@ extern int ext4_alloc_da_blocks(struct inode *inode);
 extern void ext4_set_aops(struct inode *inode);
 extern int ext4_writepage_trans_blocks(struct inode *);
 extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
-extern int ext4_block_truncate_page(handle_t *handle,
-               struct address_space *mapping, loff_t from);
-extern int ext4_block_zero_page_range(handle_t *handle,
-               struct address_space *mapping, loff_t from, loff_t length);
 extern int ext4_discard_partial_page_buffers(handle_t *handle,
                struct address_space *mapping, loff_t from,
                loff_t length, int flags);
-extern int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
-               struct inode *inode, struct page *page, loff_t from,
-               loff_t length, int flags);
 extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 extern qsize_t *ext4_get_reserved_space(struct inode *inode);
 extern void ext4_da_update_reserve_space(struct inode *inode,
@@ -1924,6 +1928,7 @@ extern int ext4_group_add(struct super_block *sb,
 extern int ext4_group_extend(struct super_block *sb,
                                struct ext4_super_block *es,
                                ext4_fsblk_t n_blocks_count);
+extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
 
 /* super.c */
 extern void *ext4_kvmalloc(size_t size, gfp_t flags);
index 841faf5fb785058477ba59b205fe0e55dd1c0f0d..74f23c292e1b3000bb7bd9cf0e953df27488b698 100644 (file)
@@ -3280,6 +3280,9 @@ static int ext4_find_delalloc_range(struct inode *inode,
        ext4_lblk_t i, pg_lblk;
        pgoff_t index;
 
+       if (!test_opt(inode->i_sb, DELALLOC))
+               return 0;
+
        /* reverse search wont work if fs block size is less than page size */
        if (inode->i_blkbits < PAGE_CACHE_SHIFT)
                search_hint_reverse = 0;
@@ -3452,8 +3455,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
        int err = 0;
        ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
 
-       ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
-                 "block %llu, max_blocks %u, flags %d, allocated %u",
+       ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
+                 "block %llu, max_blocks %u, flags %x, allocated %u\n",
                  inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
                  flags, allocated);
        ext4_ext_show_leaf(inode, path);
@@ -3624,7 +3627,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
        ext4_lblk_t ex_cluster_start, ex_cluster_end;
-       ext4_lblk_t rr_cluster_start, rr_cluster_end;
+       ext4_lblk_t rr_cluster_start;
        ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
        ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
        unsigned short ee_len = ext4_ext_get_actual_len(ex);
@@ -3635,7 +3638,6 @@ static int get_implied_cluster_alloc(struct super_block *sb,
 
        /* The requested region passed into ext4_map_blocks() */
        rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
-       rr_cluster_end = EXT4_B2C(sbi, map->m_lblk + map->m_len - 1);
 
        if ((rr_cluster_start == ex_cluster_end) ||
            (rr_cluster_start == ex_cluster_start)) {
index 4637af036d9c8ae67e1a93dacc9607b3086d68f0..25d8c9781ad94ea758781f906a34412743b3cda0 100644 (file)
@@ -252,7 +252,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
                fatal = ext4_journal_get_write_access(handle, bh2);
        }
        ext4_lock_group(sb, block_group);
-       cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
+       cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
        if (fatal || !cleared) {
                ext4_unlock_group(sb, block_group);
                goto out;
@@ -358,7 +358,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        ext4_group_t real_ngroups = ext4_get_groups_count(sb);
        int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
-       unsigned int freei, avefreei;
+       unsigned int freei, avefreei, grp_free;
        ext4_fsblk_t freeb, avefreec;
        unsigned int ndirs;
        int max_dirs, min_inodes;
@@ -477,8 +477,8 @@ fallback_retry:
        for (i = 0; i < ngroups; i++) {
                grp = (parent_group + i) % ngroups;
                desc = ext4_get_group_desc(sb, grp, NULL);
-               if (desc && ext4_free_inodes_count(sb, desc) &&
-                   ext4_free_inodes_count(sb, desc) >= avefreei) {
+               grp_free = ext4_free_inodes_count(sb, desc);
+               if (desc && grp_free && grp_free >= avefreei) {
                        *group = grp;
                        return 0;
                }
@@ -618,7 +618,7 @@ static int ext4_claim_inode(struct super_block *sb,
         */
        down_read(&grp->alloc_sem);
        ext4_lock_group(sb, group);
-       if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
+       if (ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data)) {
                /* not a free inode */
                retval = 1;
                goto err_ret;
@@ -885,8 +885,12 @@ got:
        if (IS_DIRSYNC(inode))
                ext4_handle_sync(handle);
        if (insert_inode_locked(inode) < 0) {
-               err = -EINVAL;
-               goto fail_drop;
+               /*
+                * Likely a bitmap corruption causing inode to be allocated
+                * twice.
+                */
+               err = -EIO;
+               goto fail;
        }
        spin_lock(&sbi->s_next_gen_lock);
        inode->i_generation = sbi->s_next_generation++;
index aa8efa6572d6d835f5be2a4867d81441a71bb19f..feaa82fe629d067e0900744fcbb50da2768b0183 100644 (file)
@@ -71,6 +71,9 @@ static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
 static int __ext4_journalled_writepage(struct page *page, unsigned int len);
 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
+static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
+               struct inode *inode, struct page *page, loff_t from,
+               loff_t length, int flags);
 
 /*
  * Test whether an inode is a fast symlink.
@@ -2759,7 +2762,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
        if (!io_end || !size)
                goto out;
 
-       ext_debug("ext4_end_io_dio(): io_end 0x%p"
+       ext_debug("ext4_end_io_dio(): io_end 0x%p "
                  "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
                  iocb->private, io_end->inode->i_ino, iocb, offset,
                  size);
@@ -3160,7 +3163,7 @@ int ext4_discard_partial_page_buffers(handle_t *handle,
  *
  * Returns zero on sucess or negative on failure.
  */
-int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
+static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
                struct inode *inode, struct page *page, loff_t from,
                loff_t length, int flags)
 {
@@ -3300,126 +3303,6 @@ next:
        return err;
 }
 
-/*
- * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
- * up to the end of the block which corresponds to `from'.
- * This required during truncate. We need to physically zero the tail end
- * of that block so it doesn't yield old data if the file is later grown.
- */
-int ext4_block_truncate_page(handle_t *handle,
-               struct address_space *mapping, loff_t from)
-{
-       unsigned offset = from & (PAGE_CACHE_SIZE-1);
-       unsigned length;
-       unsigned blocksize;
-       struct inode *inode = mapping->host;
-
-       blocksize = inode->i_sb->s_blocksize;
-       length = blocksize - (offset & (blocksize - 1));
-
-       return ext4_block_zero_page_range(handle, mapping, from, length);
-}
-
-/*
- * ext4_block_zero_page_range() zeros out a mapping of length 'length'
- * starting from file offset 'from'.  The range to be zero'd must
- * be contained with in one block.  If the specified range exceeds
- * the end of the block it will be shortened to end of the block
- * that cooresponds to 'from'
- */
-int ext4_block_zero_page_range(handle_t *handle,
-               struct address_space *mapping, loff_t from, loff_t length)
-{
-       ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
-       unsigned offset = from & (PAGE_CACHE_SIZE-1);
-       unsigned blocksize, max, pos;
-       ext4_lblk_t iblock;
-       struct inode *inode = mapping->host;
-       struct buffer_head *bh;
-       struct page *page;
-       int err = 0;
-
-       page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
-                                  mapping_gfp_mask(mapping) & ~__GFP_FS);
-       if (!page)
-               return -ENOMEM;
-
-       blocksize = inode->i_sb->s_blocksize;
-       max = blocksize - (offset & (blocksize - 1));
-
-       /*
-        * correct length if it does not fall between
-        * 'from' and the end of the block
-        */
-       if (length > max || length < 0)
-               length = max;
-
-       iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
-
-       if (!page_has_buffers(page))
-               create_empty_buffers(page, blocksize, 0);
-
-       /* Find the buffer that contains "offset" */
-       bh = page_buffers(page);
-       pos = blocksize;
-       while (offset >= pos) {
-               bh = bh->b_this_page;
-               iblock++;
-               pos += blocksize;
-       }
-
-       err = 0;
-       if (buffer_freed(bh)) {
-               BUFFER_TRACE(bh, "freed: skip");
-               goto unlock;
-       }
-
-       if (!buffer_mapped(bh)) {
-               BUFFER_TRACE(bh, "unmapped");
-               ext4_get_block(inode, iblock, bh, 0);
-               /* unmapped? It's a hole - nothing to do */
-               if (!buffer_mapped(bh)) {
-                       BUFFER_TRACE(bh, "still unmapped");
-                       goto unlock;
-               }
-       }
-
-       /* Ok, it's mapped. Make sure it's up-to-date */
-       if (PageUptodate(page))
-               set_buffer_uptodate(bh);
-
-       if (!buffer_uptodate(bh)) {
-               err = -EIO;
-               ll_rw_block(READ, 1, &bh);
-               wait_on_buffer(bh);
-               /* Uhhuh. Read error. Complain and punt. */
-               if (!buffer_uptodate(bh))
-                       goto unlock;
-       }
-
-       if (ext4_should_journal_data(inode)) {
-               BUFFER_TRACE(bh, "get write access");
-               err = ext4_journal_get_write_access(handle, bh);
-               if (err)
-                       goto unlock;
-       }
-
-       zero_user(page, offset, length);
-
-       BUFFER_TRACE(bh, "zeroed end of block");
-
-       err = 0;
-       if (ext4_should_journal_data(inode)) {
-               err = ext4_handle_dirty_metadata(handle, inode, bh);
-       } else
-               mark_buffer_dirty(bh);
-
-unlock:
-       unlock_page(page);
-       page_cache_release(page);
-       return err;
-}
-
 int ext4_can_truncate(struct inode *inode)
 {
        if (S_ISREG(inode->i_mode))
@@ -4646,9 +4529,19 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
                return 0;
        if (is_journal_aborted(journal))
                return -EROFS;
+       /* We have to allocate physical blocks for delalloc blocks
+        * before flushing journal. otherwise delalloc blocks can not
+        * be allocated any more. even more truncate on delalloc blocks
+        * could trigger BUG by flushing delalloc blocks in journal.
+        * There is no delalloc block in non-journal data mode.
+        */
+       if (val && test_opt(inode->i_sb, DELALLOC)) {
+               err = ext4_alloc_da_blocks(inode);
+               if (err < 0)
+                       return err;
+       }
 
        jbd2_journal_lock_updates(journal);
-       jbd2_journal_flush(journal);
 
        /*
         * OK, there are no updates running now, and all cached data is
@@ -4660,8 +4553,10 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
 
        if (val)
                ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
-       else
+       else {
+               jbd2_journal_flush(journal);
                ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
+       }
        ext4_set_aops(inode);
 
        jbd2_journal_unlock_updates(journal);
index e87a932b073bcf7db1916db8ef1ac469c676ca63..6eee25591b8159bc96d35a16f94f94c0855a35b9 100644 (file)
@@ -18,6 +18,8 @@
 #include "ext4_jbd2.h"
 #include "ext4.h"
 
+#define MAX_32_NUM ((((unsigned long long) 1) << 32) - 1)
+
 long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        struct inode *inode = filp->f_dentry->d_inode;
@@ -186,19 +188,22 @@ setversion_out:
                if (err)
                        return err;
 
-               if (get_user(n_blocks_count, (__u32 __user *)arg))
-                       return -EFAULT;
+               if (get_user(n_blocks_count, (__u32 __user *)arg)) {
+                       err = -EFAULT;
+                       goto group_extend_out;
+               }
 
                if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
                               EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
                        ext4_msg(sb, KERN_ERR,
                                 "Online resizing not supported with bigalloc");
-                       return -EOPNOTSUPP;
+                       err = -EOPNOTSUPP;
+                       goto group_extend_out;
                }
 
                err = mnt_want_write_file(filp);
                if (err)
-                       return err;
+                       goto group_extend_out;
 
                err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
                if (EXT4_SB(sb)->s_journal) {
@@ -209,8 +214,8 @@ setversion_out:
                if (err == 0)
                        err = err2;
                mnt_drop_write_file(filp);
+group_extend_out:
                ext4_resize_end(sb);
-
                return err;
        }
 
@@ -251,8 +256,7 @@ setversion_out:
                err = ext4_move_extents(filp, donor_filp, me.orig_start,
                                        me.donor_start, me.len, &me.moved_len);
                mnt_drop_write_file(filp);
-               if (me.moved_len > 0)
-                       file_remove_suid(donor_filp);
+               mnt_drop_write(filp->f_path.mnt);
 
                if (copy_to_user((struct move_extent __user *)arg,
                                 &me, sizeof(me)))
@@ -271,19 +275,22 @@ mext_out:
                        return err;
 
                if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
-                               sizeof(input)))
-                       return -EFAULT;
+                               sizeof(input))) {
+                       err = -EFAULT;
+                       goto group_add_out;
+               }
 
                if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
                               EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
                        ext4_msg(sb, KERN_ERR,
                                 "Online resizing not supported with bigalloc");
-                       return -EOPNOTSUPP;
+                       err = -EOPNOTSUPP;
+                       goto group_add_out;
                }
 
                err = mnt_want_write_file(filp);
                if (err)
-                       return err;
+                       goto group_add_out;
 
                err = ext4_group_add(sb, &input);
                if (EXT4_SB(sb)->s_journal) {
@@ -294,8 +301,8 @@ mext_out:
                if (err == 0)
                        err = err2;
                mnt_drop_write_file(filp);
+group_add_out:
                ext4_resize_end(sb);
-
                return err;
        }
 
@@ -335,6 +342,60 @@ mext_out:
                return err;
        }
 
+       case EXT4_IOC_RESIZE_FS: {
+               ext4_fsblk_t n_blocks_count;
+               struct super_block *sb = inode->i_sb;
+               int err = 0, err2 = 0;
+
+               if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                              EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
+                       ext4_msg(sb, KERN_ERR,
+                                "Online resizing not (yet) supported with bigalloc");
+                       return -EOPNOTSUPP;
+               }
+
+               if (EXT4_HAS_INCOMPAT_FEATURE(sb,
+                              EXT4_FEATURE_INCOMPAT_META_BG)) {
+                       ext4_msg(sb, KERN_ERR,
+                                "Online resizing not (yet) supported with meta_bg");
+                       return -EOPNOTSUPP;
+               }
+
+               if (copy_from_user(&n_blocks_count, (__u64 __user *)arg,
+                                  sizeof(__u64))) {
+                       return -EFAULT;
+               }
+
+               if (n_blocks_count > MAX_32_NUM &&
+                   !EXT4_HAS_INCOMPAT_FEATURE(sb,
+                                              EXT4_FEATURE_INCOMPAT_64BIT)) {
+                       ext4_msg(sb, KERN_ERR,
+                                "File system only supports 32-bit block numbers");
+                       return -EOPNOTSUPP;
+               }
+
+               err = ext4_resize_begin(sb);
+               if (err)
+                       return err;
+
+               err = mnt_want_write(filp->f_path.mnt);
+               if (err)
+                       goto resizefs_out;
+
+               err = ext4_resize_fs(sb, n_blocks_count);
+               if (EXT4_SB(sb)->s_journal) {
+                       jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
+                       err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
+                       jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
+               }
+               if (err == 0)
+                       err = err2;
+               mnt_drop_write(filp->f_path.mnt);
+resizefs_out:
+               ext4_resize_end(sb);
+               return err;
+       }
+
        case FITRIM:
        {
                struct request_queue *q = bdev_get_queue(sb->s_bdev);
@@ -433,6 +494,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        }
        case EXT4_IOC_MOVE_EXT:
        case FITRIM:
+       case EXT4_IOC_RESIZE_FS:
                break;
        default:
                return -ENOIOCTLCMD;
index e2d8be8f28bfb8555644bef5100b2f9b8c2cbe9a..cb990b21c698bd9dd1ec0e4bb8488f6e82bbe2f7 100644 (file)
@@ -3671,7 +3671,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
        ext4_group_t group;
        ext4_grpblk_t bit;
 
-       trace_ext4_mb_release_group_pa(pa);
+       trace_ext4_mb_release_group_pa(sb, pa);
        BUG_ON(pa->pa_deleted == 0);
        ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
        BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
index 996780ab4f4e83cdfc114e83ab8cad35242f4813..f9d948f0eb861f08de3ef7b589b401846dc627f6 100644 (file)
@@ -134,6 +134,172 @@ static int verify_group_input(struct super_block *sb,
        return err;
 }
 
+/*
+ * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
+ * group each time.
+ */
+struct ext4_new_flex_group_data {
+       struct ext4_new_group_data *groups;     /* new_group_data for groups
+                                                  in the flex group */
+       __u16 *bg_flags;                        /* block group flags of groups
+                                                  in @groups */
+       ext4_group_t count;                     /* number of groups in @groups
+                                                */
+};
+
+/*
+ * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
+ * @flexbg_size.
+ *
+ * Returns NULL on failure otherwise address of the allocated structure.
+ */
+static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
+{
+       struct ext4_new_flex_group_data *flex_gd;
+
+       flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
+       if (flex_gd == NULL)
+               goto out3;
+
+       flex_gd->count = flexbg_size;
+
+       flex_gd->groups = kmalloc(sizeof(struct ext4_new_group_data) *
+                                 flexbg_size, GFP_NOFS);
+       if (flex_gd->groups == NULL)
+               goto out2;
+
+       flex_gd->bg_flags = kmalloc(flexbg_size * sizeof(__u16), GFP_NOFS);
+       if (flex_gd->bg_flags == NULL)
+               goto out1;
+
+       return flex_gd;
+
+out1:
+       kfree(flex_gd->groups);
+out2:
+       kfree(flex_gd);
+out3:
+       return NULL;
+}
+
+static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
+{
+       kfree(flex_gd->bg_flags);
+       kfree(flex_gd->groups);
+       kfree(flex_gd);
+}
+
+/*
+ * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
+ * and inode tables for a flex group.
+ *
+ * This function is used by 64bit-resize.  Note that this function allocates
+ * group tables from the 1st group of groups contained by @flexgd, which may
+ * be a partial of a flex group.
+ *
+ * @sb: super block of fs to which the groups belongs
+ */
+static void ext4_alloc_group_tables(struct super_block *sb,
+                               struct ext4_new_flex_group_data *flex_gd,
+                               int flexbg_size)
+{
+       struct ext4_new_group_data *group_data = flex_gd->groups;
+       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+       ext4_fsblk_t start_blk;
+       ext4_fsblk_t last_blk;
+       ext4_group_t src_group;
+       ext4_group_t bb_index = 0;
+       ext4_group_t ib_index = 0;
+       ext4_group_t it_index = 0;
+       ext4_group_t group;
+       ext4_group_t last_group;
+       unsigned overhead;
+
+       BUG_ON(flex_gd->count == 0 || group_data == NULL);
+
+       src_group = group_data[0].group;
+       last_group  = src_group + flex_gd->count - 1;
+
+       BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
+              (last_group & ~(flexbg_size - 1))));
+next_group:
+       group = group_data[0].group;
+       start_blk = ext4_group_first_block_no(sb, src_group);
+       last_blk = start_blk + group_data[src_group - group].blocks_count;
+
+       overhead = ext4_bg_has_super(sb, src_group) ?
+                  (1 + ext4_bg_num_gdb(sb, src_group) +
+                   le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
+
+       start_blk += overhead;
+
+       BUG_ON(src_group >= group_data[0].group + flex_gd->count);
+       /* We collect contiguous blocks as much as possible. */
+       src_group++;
+       for (; src_group <= last_group; src_group++)
+               if (!ext4_bg_has_super(sb, src_group))
+                       last_blk += group_data[src_group - group].blocks_count;
+               else
+                       break;
+
+       /* Allocate block bitmaps */
+       for (; bb_index < flex_gd->count; bb_index++) {
+               if (start_blk >= last_blk)
+                       goto next_group;
+               group_data[bb_index].block_bitmap = start_blk++;
+               ext4_get_group_no_and_offset(sb, start_blk - 1, &group, NULL);
+               group -= group_data[0].group;
+               group_data[group].free_blocks_count--;
+               if (flexbg_size > 1)
+                       flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
+       }
+
+       /* Allocate inode bitmaps */
+       for (; ib_index < flex_gd->count; ib_index++) {
+               if (start_blk >= last_blk)
+                       goto next_group;
+               group_data[ib_index].inode_bitmap = start_blk++;
+               ext4_get_group_no_and_offset(sb, start_blk - 1, &group, NULL);
+               group -= group_data[0].group;
+               group_data[group].free_blocks_count--;
+               if (flexbg_size > 1)
+                       flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
+       }
+
+       /* Allocate inode tables */
+       for (; it_index < flex_gd->count; it_index++) {
+               if (start_blk + EXT4_SB(sb)->s_itb_per_group > last_blk)
+                       goto next_group;
+               group_data[it_index].inode_table = start_blk;
+               ext4_get_group_no_and_offset(sb, start_blk, &group, NULL);
+               group -= group_data[0].group;
+               group_data[group].free_blocks_count -=
+                                       EXT4_SB(sb)->s_itb_per_group;
+               if (flexbg_size > 1)
+                       flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
+
+               start_blk += EXT4_SB(sb)->s_itb_per_group;
+       }
+
+       if (test_opt(sb, DEBUG)) {
+               int i;
+               group = group_data[0].group;
+
+               printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
+                      "%d groups, flexbg size is %d:\n", flex_gd->count,
+                      flexbg_size);
+
+               for (i = 0; i < flex_gd->count; i++) {
+                       printk(KERN_DEBUG "adding %s group %u: %u "
+                              "blocks (%d free)\n",
+                              ext4_bg_has_super(sb, group + i) ? "normal" :
+                              "no-super", group + i,
+                              group_data[i].blocks_count,
+                              group_data[i].free_blocks_count);
+               }
+       }
+}
+
 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
                                  ext4_fsblk_t blk)
 {
@@ -179,131 +345,250 @@ static int extend_or_restart_transaction(handle_t *handle, int thresh)
 }
 
 /*
- * Set up the block and inode bitmaps, and the inode table for the new group.
+ * set_flexbg_block_bitmap() mark @count blocks starting from @block used.
+ *
+ * Helper function for ext4_setup_new_group_blocks() which set .
+ *
+ * @sb: super block
+ * @handle: journal handle
+ * @flex_gd: flex group data
+ */
+static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
+                       struct ext4_new_flex_group_data *flex_gd,
+                       ext4_fsblk_t block, ext4_group_t count)
+{
+       ext4_group_t count2;
+
+       ext4_debug("mark blocks [%llu/%u] used\n", block, count);
+       for (count2 = count; count > 0; count -= count2, block += count2) {
+               ext4_fsblk_t start;
+               struct buffer_head *bh;
+               ext4_group_t group;
+               int err;
+
+               ext4_get_group_no_and_offset(sb, block, &group, NULL);
+               start = ext4_group_first_block_no(sb, group);
+               group -= flex_gd->groups[0].group;
+
+               count2 = sb->s_blocksize * 8 - (block - start);
+               if (count2 > count)
+                       count2 = count;
+
+               if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
+                       BUG_ON(flex_gd->count > 1);
+                       continue;
+               }
+
+               err = extend_or_restart_transaction(handle, 1);
+               if (err)
+                       return err;
+
+               bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
+               if (!bh)
+                       return -EIO;
+
+               err = ext4_journal_get_write_access(handle, bh);
+               if (err)
+                       return err;
+               ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
+                          block - start, count2);
+               ext4_set_bits(bh->b_data, block - start, count2);
+
+               err = ext4_handle_dirty_metadata(handle, NULL, bh);
+               if (unlikely(err))
+                       return err;
+               brelse(bh);
+       }
+
+       return 0;
+}
+
+/*
+ * Set up the block and inode bitmaps, and the inode table for the new groups.
  * This doesn't need to be part of the main transaction, since we are only
  * changing blocks outside the actual filesystem.  We still do journaling to
  * ensure the recovery is correct in case of a failure just after resize.
  * If any part of this fails, we simply abort the resize.
+ *
+ * setup_new_flex_group_blocks handles a flex group as follow:
+ *  1. copy super block and GDT, and initialize group tables if necessary.
+ *     In this step, we only set bits in blocks bitmaps for blocks taken by
+ *     super block and GDT.
+ *  2. allocate group tables in block bitmaps, that is, set bits in block
+ *     bitmap for blocks taken by group tables.
  */
-static int setup_new_group_blocks(struct super_block *sb,
-                                 struct ext4_new_group_data *input)
+static int setup_new_flex_group_blocks(struct super_block *sb,
+                               struct ext4_new_flex_group_data *flex_gd)
 {
+       int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
+       ext4_fsblk_t start;
+       ext4_fsblk_t block;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
-       ext4_fsblk_t start = ext4_group_first_block_no(sb, input->group);
-       int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
-               le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0;
-       unsigned long gdblocks = ext4_bg_num_gdb(sb, input->group);
-       struct buffer_head *bh;
+       struct ext4_super_block *es = sbi->s_es;
+       struct ext4_new_group_data *group_data = flex_gd->groups;
+       __u16 *bg_flags = flex_gd->bg_flags;
        handle_t *handle;
-       ext4_fsblk_t block;
-       ext4_grpblk_t bit;
-       int i;
-       int err = 0, err2;
+       ext4_group_t group, count;
+       struct buffer_head *bh = NULL;
+       int reserved_gdb, i, j, err = 0, err2;
+
+       BUG_ON(!flex_gd->count || !group_data ||
+              group_data[0].group != sbi->s_groups_count);
+
+       reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
 
        /* This transaction may be extended/restarted along the way */
        handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA);
-
        if (IS_ERR(handle))
                return PTR_ERR(handle);
 
-       BUG_ON(input->group != sbi->s_groups_count);
+       group = group_data[0].group;
+       for (i = 0; i < flex_gd->count; i++, group++) {
+               unsigned long gdblocks;
 
-       /* Copy all of the GDT blocks into the backup in this group */
-       for (i = 0, bit = 1, block = start + 1;
-            i < gdblocks; i++, block++, bit++) {
-               struct buffer_head *gdb;
+               gdblocks = ext4_bg_num_gdb(sb, group);
+               start = ext4_group_first_block_no(sb, group);
 
-               ext4_debug("update backup group %#04llx (+%d)\n", block, bit);
-               err = extend_or_restart_transaction(handle, 1);
-               if (err)
-                       goto exit_journal;
+               /* Copy all of the GDT blocks into the backup in this group */
+               for (j = 0, block = start + 1; j < gdblocks; j++, block++) {
+                       struct buffer_head *gdb;
 
-               gdb = sb_getblk(sb, block);
-               if (!gdb) {
-                       err = -EIO;
-                       goto exit_journal;
-               }
-               if ((err = ext4_journal_get_write_access(handle, gdb))) {
+                       ext4_debug("update backup group %#04llx\n", block);
+                       err = extend_or_restart_transaction(handle, 1);
+                       if (err)
+                               goto out;
+
+                       gdb = sb_getblk(sb, block);
+                       if (!gdb) {
+                               err = -EIO;
+                               goto out;
+                       }
+
+                       err = ext4_journal_get_write_access(handle, gdb);
+                       if (err) {
+                               brelse(gdb);
+                               goto out;
+                       }
+                       memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
+                              gdb->b_size);
+                       set_buffer_uptodate(gdb);
+
+                       err = ext4_handle_dirty_metadata(handle, NULL, gdb);
+                       if (unlikely(err)) {
+                               brelse(gdb);
+                               goto out;
+                       }
                        brelse(gdb);
-                       goto exit_journal;
                }
-               memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
-               set_buffer_uptodate(gdb);
-               err = ext4_handle_dirty_metadata(handle, NULL, gdb);
-               if (unlikely(err)) {
-                       brelse(gdb);
-                       goto exit_journal;
+
+               /* Zero out all of the reserved backup group descriptor
+                * table blocks
+                */
+               if (ext4_bg_has_super(sb, group)) {
+                       err = sb_issue_zeroout(sb, gdblocks + start + 1,
+                                       reserved_gdb, GFP_NOFS);
+                       if (err)
+                               goto out;
                }
-               brelse(gdb);
-       }
 
-       /* Zero out all of the reserved backup group descriptor table blocks */
-       ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
-                       block, sbi->s_itb_per_group);
-       err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb,
-                              GFP_NOFS);
-       if (err)
-               goto exit_journal;
+               /* Initialize group tables of the grop @group */
+               if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
+                       goto handle_bb;
 
-       err = extend_or_restart_transaction(handle, 2);
-       if (err)
-               goto exit_journal;
+               /* Zero out all of the inode table blocks */
+               block = group_data[i].inode_table;
+               ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
+                          block, sbi->s_itb_per_group);
+               err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
+                                      GFP_NOFS);
+               if (err)
+                       goto out;
 
-       bh = bclean(handle, sb, input->block_bitmap);
-       if (IS_ERR(bh)) {
-               err = PTR_ERR(bh);
-               goto exit_journal;
-       }
+handle_bb:
+               if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
+                       goto handle_ib;
 
-       if (ext4_bg_has_super(sb, input->group)) {
-               ext4_debug("mark backup group tables %#04llx (+0)\n", start);
-               ext4_set_bits(bh->b_data, 0, gdblocks + reserved_gdb + 1);
-       }
+               /* Initialize block bitmap of the @group */
+               block = group_data[i].block_bitmap;
+               err = extend_or_restart_transaction(handle, 1);
+               if (err)
+                       goto out;
 
-       ext4_debug("mark block bitmap %#04llx (+%llu)\n", input->block_bitmap,
-                  input->block_bitmap - start);
-       ext4_set_bit(input->block_bitmap - start, bh->b_data);
-       ext4_debug("mark inode bitmap %#04llx (+%llu)\n", input->inode_bitmap,
-                  input->inode_bitmap - start);
-       ext4_set_bit(input->inode_bitmap - start, bh->b_data);
-
-       /* Zero out all of the inode table blocks */
-       block = input->inode_table;
-       ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
-                       block, sbi->s_itb_per_group);
-       err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS);
-       if (err)
-               goto exit_bh;
-       ext4_set_bits(bh->b_data, input->inode_table - start,
-                     sbi->s_itb_per_group);
+               bh = bclean(handle, sb, block);
+               if (IS_ERR(bh)) {
+                       err = PTR_ERR(bh);
+                       goto out;
+               }
+               if (ext4_bg_has_super(sb, group)) {
+                       ext4_debug("mark backup superblock %#04llx (+0)\n",
+                                  start);
+                       ext4_set_bits(bh->b_data, 0, gdblocks + reserved_gdb +
+                                                    1);
+               }
+               ext4_mark_bitmap_end(group_data[i].blocks_count,
+                                    sb->s_blocksize * 8, bh->b_data);
+               err = ext4_handle_dirty_metadata(handle, NULL, bh);
+               if (err)
+                       goto out;
+               brelse(bh);
 
+handle_ib:
+               if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
+                       continue;
 
-       ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8,
-                            bh->b_data);
-       err = ext4_handle_dirty_metadata(handle, NULL, bh);
-       if (unlikely(err)) {
-               ext4_std_error(sb, err);
-               goto exit_bh;
+               /* Initialize inode bitmap of the @group */
+               block = group_data[i].inode_bitmap;
+               err = extend_or_restart_transaction(handle, 1);
+               if (err)
+                       goto out;
+               /* Mark unused entries in inode bitmap used */
+               bh = bclean(handle, sb, block);
+               if (IS_ERR(bh)) {
+                       err = PTR_ERR(bh);
+                       goto out;
+               }
+
+               ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
+                                    sb->s_blocksize * 8, bh->b_data);
+               err = ext4_handle_dirty_metadata(handle, NULL, bh);
+               if (err)
+                       goto out;
+               brelse(bh);
        }
-       brelse(bh);
-       /* Mark unused entries in inode bitmap used */
-       ext4_debug("clear inode bitmap %#04llx (+%llu)\n",
-                  input->inode_bitmap, input->inode_bitmap - start);
-       if (IS_ERR(bh = bclean(handle, sb, input->inode_bitmap))) {
-               err = PTR_ERR(bh);
-               goto exit_journal;
+       bh = NULL;
+
+       /* Mark group tables in block bitmap */
+       for (j = 0; j < GROUP_TABLE_COUNT; j++) {
+               count = group_table_count[j];
+               start = (&group_data[0].block_bitmap)[j];
+               block = start;
+               for (i = 1; i < flex_gd->count; i++) {
+                       block += group_table_count[j];
+                       if (block == (&group_data[i].block_bitmap)[j]) {
+                               count += group_table_count[j];
+                               continue;
+                       }
+                       err = set_flexbg_block_bitmap(sb, handle,
+                                               flex_gd, start, count);
+                       if (err)
+                               goto out;
+                       count = group_table_count[j];
+                       start = group_data[i].block_bitmap;
+                       block = start;
+               }
+
+               if (count) {
+                       err = set_flexbg_block_bitmap(sb, handle,
+                                               flex_gd, start, count);
+                       if (err)
+                               goto out;
+               }
        }
 
-       ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
-                            bh->b_data);
-       err = ext4_handle_dirty_metadata(handle, NULL, bh);
-       if (unlikely(err))
-               ext4_std_error(sb, err);
-exit_bh:
+out:
        brelse(bh);
-
-exit_journal:
-       if ((err2 = ext4_journal_stop(handle)) && !err)
+       err2 = ext4_journal_stop(handle);
+       if (err2 && !err)
                err = err2;
 
        return err;
@@ -351,10 +636,10 @@ static unsigned ext4_list_backups(struct super_block *sb, unsigned *three,
  * groups in current filesystem that have BACKUPS, or -ve error code.
  */
 static int verify_reserved_gdb(struct super_block *sb,
+                              ext4_group_t end,
                               struct buffer_head *primary)
 {
        const ext4_fsblk_t blk = primary->b_blocknr;
-       const ext4_group_t end = EXT4_SB(sb)->s_groups_count;
        unsigned three = 1;
        unsigned five = 5;
        unsigned seven = 7;
@@ -429,7 +714,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
        if (!gdb_bh)
                return -EIO;
 
-       gdbackups = verify_reserved_gdb(sb, gdb_bh);
+       gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
        if (gdbackups < 0) {
                err = gdbackups;
                goto exit_bh;
@@ -592,7 +877,8 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
                        err = -EIO;
                        goto exit_bh;
                }
-               if ((gdbackups = verify_reserved_gdb(sb, primary[res])) < 0) {
+               gdbackups = verify_reserved_gdb(sb, group, primary[res]);
+               if (gdbackups < 0) {
                        brelse(primary[res]);
                        err = gdbackups;
                        goto exit_bh;
@@ -735,6 +1021,348 @@ exit_err:
        }
 }
 
+/*
+ * ext4_add_new_descs() adds @count group descriptor of groups
+ * starting at @group
+ *
+ * @handle: journal handle
+ * @sb: super block
+ * @group: the group no. of the first group desc to be added
+ * @resize_inode: the resize inode
+ * @count: number of group descriptors to be added
+ */
+static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
+                             ext4_group_t group, struct inode *resize_inode,
+                             ext4_group_t count)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_super_block *es = sbi->s_es;
+       struct buffer_head *gdb_bh;
+       int i, gdb_off, gdb_num, err = 0;
+
+       for (i = 0; i < count; i++, group++) {
+               int reserved_gdb = ext4_bg_has_super(sb, group) ?
+                       le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
+
+               gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
+               gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
+
+               /*
+                * We will only either add reserved group blocks to a backup group
+                * or remove reserved blocks for the first group in a new group block.
+                * Doing both would be mean more complex code, and sane people don't
+                * use non-sparse filesystems anymore.  This is already checked above.
+                */
+               if (gdb_off) {
+                       gdb_bh = sbi->s_group_desc[gdb_num];
+                       err = ext4_journal_get_write_access(handle, gdb_bh);
+
+                       if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
+                               err = reserve_backup_gdb(handle, resize_inode, group);
+               } else
+                       err = add_new_gdb(handle, resize_inode, group);
+               if (err)
+                       break;
+       }
+       return err;
+}
+
+/*
+ * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
+ */
+static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
+                               struct ext4_new_flex_group_data *flex_gd)
+{
+       struct ext4_new_group_data      *group_data = flex_gd->groups;
+       struct ext4_group_desc          *gdp;
+       struct ext4_sb_info             *sbi = EXT4_SB(sb);
+       struct buffer_head              *gdb_bh;
+       ext4_group_t                    group;
+       __u16                           *bg_flags = flex_gd->bg_flags;
+       int                             i, gdb_off, gdb_num, err = 0;
+       
+
+       for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
+               group = group_data->group;
+
+               gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
+               gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
+
+               /*
+                * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
+                */
+               gdb_bh = sbi->s_group_desc[gdb_num];
+               /* Update group descriptor block for new group */
+               gdp = (struct ext4_group_desc *)((char *)gdb_bh->b_data +
+                                                gdb_off * EXT4_DESC_SIZE(sb));
+
+               memset(gdp, 0, EXT4_DESC_SIZE(sb));
+               ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
+               ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
+               ext4_inode_table_set(sb, gdp, group_data->inode_table);
+               ext4_free_group_clusters_set(sb, gdp,
+                                            EXT4_B2C(sbi, group_data->free_blocks_count));
+               ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
+               gdp->bg_flags = cpu_to_le16(*bg_flags);
+               gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
+
+               err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
+               if (unlikely(err)) {
+                       ext4_std_error(sb, err);
+                       break;
+               }
+
+               /*
+                * We can allocate memory for mb_alloc based on the new group
+                * descriptor
+                */
+               err = ext4_mb_add_groupinfo(sb, group, gdp);
+               if (err)
+                       break;
+       }
+       return err;
+}
+
+/*
+ * ext4_update_super() updates the super block so that the newly added
+ * groups can be seen by the filesystem.
+ *
+ * @sb: super block
+ * @flex_gd: new added groups
+ */
+static void ext4_update_super(struct super_block *sb,
+                            struct ext4_new_flex_group_data *flex_gd)
+{
+       ext4_fsblk_t blocks_count = 0;
+       ext4_fsblk_t free_blocks = 0;
+       ext4_fsblk_t reserved_blocks = 0;
+       struct ext4_new_group_data *group_data = flex_gd->groups;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_super_block *es = sbi->s_es;
+       int i;
+
+       BUG_ON(flex_gd->count == 0 || group_data == NULL);
+       /*
+        * Make the new blocks and inodes valid next.  We do this before
+        * increasing the group count so that once the group is enabled,
+        * all of its blocks and inodes are already valid.
+        *
+        * We always allocate group-by-group, then block-by-block or
+        * inode-by-inode within a group, so enabling these
+        * blocks/inodes before the group is live won't actually let us
+        * allocate the new space yet.
+        */
+       for (i = 0; i < flex_gd->count; i++) {
+               blocks_count += group_data[i].blocks_count;
+               free_blocks += group_data[i].free_blocks_count;
+       }
+
+       reserved_blocks = ext4_r_blocks_count(es) * 100;
+       do_div(reserved_blocks, ext4_blocks_count(es));
+       reserved_blocks *= blocks_count;
+       do_div(reserved_blocks, 100);
+
+       ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
+       le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
+                    flex_gd->count);
+
+       /*
+        * We need to protect s_groups_count against other CPUs seeing
+        * inconsistent state in the superblock.
+        *
+        * The precise rules we use are:
+        *
+        * * Writers must perform a smp_wmb() after updating all
+        *   dependent data and before modifying the groups count
+        *
+        * * Readers must perform an smp_rmb() after reading the groups
+        *   count and before reading any dependent data.
+        *
+        * NB. These rules can be relaxed when checking the group count
+        * while freeing data, as we can only allocate from a block
+        * group after serialising against the group count, and we can
+        * only then free after serialising in turn against that
+        * allocation.
+        */
+       smp_wmb();
+
+       /* Update the global fs size fields */
+       sbi->s_groups_count += flex_gd->count;
+
+       /* Update the reserved block counts only once the new group is
+        * active. */
+       ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
+                               reserved_blocks);
+
+       /* Update the free space counts */
+       percpu_counter_add(&sbi->s_freeclusters_counter,
+                          EXT4_B2C(sbi, free_blocks));
+       percpu_counter_add(&sbi->s_freeinodes_counter,
+                          EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
+
+       if (EXT4_HAS_INCOMPAT_FEATURE(sb,
+                                     EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
+           sbi->s_log_groups_per_flex) {
+               ext4_group_t flex_group;
+               flex_group = ext4_flex_group(sbi, group_data[0].group);
+               atomic_add(EXT4_B2C(sbi, free_blocks),
+                          &sbi->s_flex_groups[flex_group].free_clusters);
+               atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
+                          &sbi->s_flex_groups[flex_group].free_inodes);
+       }
+
+       if (test_opt(sb, DEBUG))
+               printk(KERN_DEBUG "EXT4-fs: added group %u:"
+                      "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
+                      blocks_count, free_blocks, reserved_blocks);
+}
+
+/* Add a flex group to an fs. Ensure we handle all possible error conditions
+ * _before_ we start modifying the filesystem, because we cannot abort the
+ * transaction and not have it write the data to disk.
+ */
+static int ext4_flex_group_add(struct super_block *sb,
+                              struct inode *resize_inode,
+                              struct ext4_new_flex_group_data *flex_gd)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_super_block *es = sbi->s_es;
+       ext4_fsblk_t o_blocks_count;
+       ext4_grpblk_t last;
+       ext4_group_t group;
+       handle_t *handle;
+       unsigned reserved_gdb;
+       int err = 0, err2 = 0, credit;
+
+       BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
+
+       reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
+       o_blocks_count = ext4_blocks_count(es);
+       ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
+       BUG_ON(last);
+
+       err = setup_new_flex_group_blocks(sb, flex_gd);
+       if (err)
+               goto exit;
+       /*
+        * We will always be modifying at least the superblock and  GDT
+        * block.  If we are adding a group past the last current GDT block,
+        * we will also modify the inode and the dindirect block.  If we
+        * are adding a group with superblock/GDT backups  we will also
+        * modify each of the reserved GDT dindirect blocks.
+        */
+       credit = flex_gd->count * 4 + reserved_gdb;
+       handle = ext4_journal_start_sb(sb, credit);
+       if (IS_ERR(handle)) {
+               err = PTR_ERR(handle);
+               goto exit;
+       }
+
+       err = ext4_journal_get_write_access(handle, sbi->s_sbh);
+       if (err)
+               goto exit_journal;
+
+       group = flex_gd->groups[0].group;
+       BUG_ON(group != EXT4_SB(sb)->s_groups_count);
+       err = ext4_add_new_descs(handle, sb, group,
+                               resize_inode, flex_gd->count);
+       if (err)
+               goto exit_journal;
+
+       err = ext4_setup_new_descs(handle, sb, flex_gd);
+       if (err)
+               goto exit_journal;
+
+       ext4_update_super(sb, flex_gd);
+
+       err = ext4_handle_dirty_super(handle, sb);
+
+exit_journal:
+       err2 = ext4_journal_stop(handle);
+       if (!err)
+               err = err2;
+
+       if (!err) {
+               int i;
+               update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
+                              sizeof(struct ext4_super_block));
+               for (i = 0; i < flex_gd->count; i++, group++) {
+                       struct buffer_head *gdb_bh;
+                       int gdb_num;
+                       gdb_num = group / EXT4_BLOCKS_PER_GROUP(sb);
+                       gdb_bh = sbi->s_group_desc[gdb_num];
+                       update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
+                                      gdb_bh->b_size);
+               }
+       }
+exit:
+       return err;
+}
+
+static int ext4_setup_next_flex_gd(struct super_block *sb,
+                                   struct ext4_new_flex_group_data *flex_gd,
+                                   ext4_fsblk_t n_blocks_count,
+                                   unsigned long flexbg_size)
+{
+       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+       struct ext4_new_group_data *group_data = flex_gd->groups;
+       ext4_fsblk_t o_blocks_count;
+       ext4_group_t n_group;
+       ext4_group_t group;
+       ext4_group_t last_group;
+       ext4_grpblk_t last;
+       ext4_grpblk_t blocks_per_group;
+       unsigned long i;
+
+       blocks_per_group = EXT4_BLOCKS_PER_GROUP(sb);
+
+       o_blocks_count = ext4_blocks_count(es);
+
+       if (o_blocks_count == n_blocks_count)
+               return 0;
+
+       ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
+       BUG_ON(last);
+       ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
+
+       last_group = group | (flexbg_size - 1);
+       if (last_group > n_group)
+               last_group = n_group;
+
+       flex_gd->count = last_group - group + 1;
+
+       for (i = 0; i < flex_gd->count; i++) {
+               int overhead;
+
+               group_data[i].group = group + i;
+               group_data[i].blocks_count = blocks_per_group;
+               overhead = ext4_bg_has_super(sb, group + i) ?
+                          (1 + ext4_bg_num_gdb(sb, group + i) +
+                           le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
+               group_data[i].free_blocks_count = blocks_per_group - overhead;
+               if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                              EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
+                       flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
+                                              EXT4_BG_INODE_UNINIT;
+               else
+                       flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
+       }
+
+       if (last_group == n_group &&
+           EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                      EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
+               /* We need to initialize block bitmap of last group. */
+               flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
+
+       if ((last_group == n_group) && (last != blocks_per_group - 1)) {
+               group_data[i - 1].blocks_count = last + 1;
+               group_data[i - 1].free_blocks_count -= blocks_per_group-
+                                       last - 1;
+       }
+
+       return 1;
+}
+
 /* Add group descriptor data to an existing or new group descriptor block.
  * Ensure we handle all possible error conditions _before_ we start modifying
  * the filesystem, because we cannot abort the transaction and not have it
@@ -750,16 +1378,15 @@ exit_err:
  */
 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
 {
+       struct ext4_new_flex_group_data flex_gd;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct ext4_super_block *es = sbi->s_es;
        int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
                le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
-       struct buffer_head *primary = NULL;
-       struct ext4_group_desc *gdp;
        struct inode *inode = NULL;
-       handle_t *handle;
        int gdb_off, gdb_num;
-       int err, err2;
+       int err;
+       __u16 bg_flags = 0;
 
        gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
        gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
@@ -798,175 +1425,69 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
        }
 
 
-       if ((err = verify_group_input(sb, input)))
-               goto exit_put;
+       err = verify_group_input(sb, input);
+       if (err)
+               goto out;
 
-       if ((err = setup_new_group_blocks(sb, input)))
-               goto exit_put;
+       flex_gd.count = 1;
+       flex_gd.groups = input;
+       flex_gd.bg_flags = &bg_flags;
+       err = ext4_flex_group_add(sb, inode, &flex_gd);
+out:
+       iput(inode);
+       return err;
+} /* ext4_group_add */
 
-       /*
-        * We will always be modifying at least the superblock and a GDT
-        * block.  If we are adding a group past the last current GDT block,
-        * we will also modify the inode and the dindirect block.  If we
-        * are adding a group with superblock/GDT backups  we will also
-        * modify each of the reserved GDT dindirect blocks.
+/*
+ * extend a group without checking assuming that checking has been done.
+ */
+static int ext4_group_extend_no_check(struct super_block *sb,
+                                     ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
+{
+       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+       handle_t *handle;
+       int err = 0, err2;
+
+       /* We will update the superblock, one block bitmap, and
+        * one group descriptor via ext4_group_add_blocks().
         */
-       handle = ext4_journal_start_sb(sb,
-                                      ext4_bg_has_super(sb, input->group) ?
-                                      3 + reserved_gdb : 4);
+       handle = ext4_journal_start_sb(sb, 3);
        if (IS_ERR(handle)) {
                err = PTR_ERR(handle);
-               goto exit_put;
+               ext4_warning(sb, "error %d on journal start", err);
+               return err;
        }
 
-       if ((err = ext4_journal_get_write_access(handle, sbi->s_sbh)))
-               goto exit_journal;
-
-        /*
-         * We will only either add reserved group blocks to a backup group
-         * or remove reserved blocks for the first group in a new group block.
-         * Doing both would be mean more complex code, and sane people don't
-         * use non-sparse filesystems anymore.  This is already checked above.
-         */
-       if (gdb_off) {
-               primary = sbi->s_group_desc[gdb_num];
-               if ((err = ext4_journal_get_write_access(handle, primary)))
-                       goto exit_journal;
-
-               if (reserved_gdb && ext4_bg_num_gdb(sb, input->group)) {
-                       err = reserve_backup_gdb(handle, inode, input->group);
-                       if (err)
-                               goto exit_journal;
-               }
-       } else {
-               /*
-                * Note that we can access new group descriptor block safely
-                * only if add_new_gdb() succeeds.
-                */
-               err = add_new_gdb(handle, inode, input->group);
-               if (err)
-                       goto exit_journal;
-               primary = sbi->s_group_desc[gdb_num];
+       err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
+       if (err) {
+               ext4_warning(sb, "error %d on journal write access", err);
+               goto errout;
        }
 
-        /*
-         * OK, now we've set up the new group.  Time to make it active.
-         *
-         * so we have to be safe wrt. concurrent accesses the group
-         * data.  So we need to be careful to set all of the relevant
-         * group descriptor data etc. *before* we enable the group.
-         *
-         * The key field here is sbi->s_groups_count: as long as
-         * that retains its old value, nobody is going to access the new
-         * group.
-         *
-         * So first we update all the descriptor metadata for the new
-         * group; then we update the total disk blocks count; then we
-         * update the groups count to enable the group; then finally we
-         * update the free space counts so that the system can start
-         * using the new disk blocks.
-         */
-
-       /* Update group descriptor block for new group */
-       gdp = (struct ext4_group_desc *)((char *)primary->b_data +
-                                        gdb_off * EXT4_DESC_SIZE(sb));
-
-       memset(gdp, 0, EXT4_DESC_SIZE(sb));
-       ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */
-       ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */
-       ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */
-       ext4_free_group_clusters_set(sb, gdp, input->free_blocks_count);
-       ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
-       gdp->bg_flags = cpu_to_le16(EXT4_BG_INODE_ZEROED);
-       gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp);
-
-       /*
-        * We can allocate memory for mb_alloc based on the new group
-        * descriptor
-        */
-       err = ext4_mb_add_groupinfo(sb, input->group, gdp);
+       ext4_blocks_count_set(es, o_blocks_count + add);
+       ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
+                  o_blocks_count + add);
+       /* We add the blocks to the bitmap and set the group need init bit */
+       err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
        if (err)
-               goto exit_journal;
-
-       /*
-        * Make the new blocks and inodes valid next.  We do this before
-        * increasing the group count so that once the group is enabled,
-        * all of its blocks and inodes are already valid.
-        *
-        * We always allocate group-by-group, then block-by-block or
-        * inode-by-inode within a group, so enabling these
-        * blocks/inodes before the group is live won't actually let us
-        * allocate the new space yet.
-        */
-       ext4_blocks_count_set(es, ext4_blocks_count(es) +
-               input->blocks_count);
-       le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb));
-
-       /*
-        * We need to protect s_groups_count against other CPUs seeing
-        * inconsistent state in the superblock.
-        *
-        * The precise rules we use are:
-        *
-        * * Writers must perform a smp_wmb() after updating all dependent
-        *   data and before modifying the groups count
-        *
-        * * Readers must perform an smp_rmb() after reading the groups count
-        *   and before reading any dependent data.
-        *
-        * NB. These rules can be relaxed when checking the group count
-        * while freeing data, as we can only allocate from a block
-        * group after serialising against the group count, and we can
-        * only then free after serialising in turn against that
-        * allocation.
-        */
-       smp_wmb();
-
-       /* Update the global fs size fields */
-       sbi->s_groups_count++;
-
-       err = ext4_handle_dirty_metadata(handle, NULL, primary);
-       if (unlikely(err)) {
-               ext4_std_error(sb, err);
-               goto exit_journal;
-       }
-
-       /* Update the reserved block counts only once the new group is
-        * active. */
-       ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
-               input->reserved_blocks);
-
-       /* Update the free space counts */
-       percpu_counter_add(&sbi->s_freeclusters_counter,
-                          EXT4_B2C(sbi, input->free_blocks_count));
-       percpu_counter_add(&sbi->s_freeinodes_counter,
-                          EXT4_INODES_PER_GROUP(sb));
-
-       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
-           sbi->s_log_groups_per_flex) {
-               ext4_group_t flex_group;
-               flex_group = ext4_flex_group(sbi, input->group);
-               atomic_add(EXT4_B2C(sbi, input->free_blocks_count),
-                          &sbi->s_flex_groups[flex_group].free_clusters);
-               atomic_add(EXT4_INODES_PER_GROUP(sb),
-                          &sbi->s_flex_groups[flex_group].free_inodes);
-       }
-
+               goto errout;
        ext4_handle_dirty_super(handle, sb);
-
-exit_journal:
-       if ((err2 = ext4_journal_stop(handle)) && !err)
+       ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
+                  o_blocks_count + add);
+errout:
+       err2 = ext4_journal_stop(handle);
+       if (err2 && !err)
                err = err2;
-       if (!err && primary) {
-               update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
+
+       if (!err) {
+               if (test_opt(sb, DEBUG))
+                       printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
+                              "blocks\n", ext4_blocks_count(es));
+               update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, (char *)es,
                               sizeof(struct ext4_super_block));
-               update_backups(sb, primary->b_blocknr, primary->b_data,
-                              primary->b_size);
        }
-exit_put:
-       iput(inode);
        return err;
-} /* ext4_group_add */
+}
 
 /*
  * Extend the filesystem to the new number of blocks specified.  This entry
@@ -985,8 +1506,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
        ext4_grpblk_t last;
        ext4_grpblk_t add;
        struct buffer_head *bh;
-       handle_t *handle;
-       int err, err2;
+       int err;
        ext4_group_t group;
 
        o_blocks_count = ext4_blocks_count(es);
@@ -1042,42 +1562,119 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
        }
        brelse(bh);
 
-       /* We will update the superblock, one block bitmap, and
-        * one group descriptor via ext4_free_blocks().
-        */
-       handle = ext4_journal_start_sb(sb, 3);
-       if (IS_ERR(handle)) {
-               err = PTR_ERR(handle);
-               ext4_warning(sb, "error %d on journal start", err);
-               goto exit_put;
+       err = ext4_group_extend_no_check(sb, o_blocks_count, add);
+       return err;
+} /* ext4_group_extend */
+
+/*
+ * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
+ *
+ * @sb: super block of the fs to be resized
+ * @n_blocks_count: the number of blocks resides in the resized fs
+ */
+int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
+{
+       struct ext4_new_flex_group_data *flex_gd = NULL;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_super_block *es = sbi->s_es;
+       struct buffer_head *bh;
+       struct inode *resize_inode;
+       ext4_fsblk_t o_blocks_count;
+       ext4_group_t o_group;
+       ext4_group_t n_group;
+       ext4_grpblk_t offset;
+       unsigned long n_desc_blocks;
+       unsigned long o_desc_blocks;
+       unsigned long desc_blocks;
+       int err = 0, flexbg_size = 1;
+
+       o_blocks_count = ext4_blocks_count(es);
+
+       if (test_opt(sb, DEBUG))
+               printk(KERN_DEBUG "EXT4-fs: resizing filesystem from %llu "
+                      "upto %llu blocks\n", o_blocks_count, n_blocks_count);
+
+       if (n_blocks_count < o_blocks_count) {
+               /* On-line shrinking not supported */
+               ext4_warning(sb, "can't shrink FS - resize aborted");
+               return -EINVAL;
        }
 
-       if ((err = ext4_journal_get_write_access(handle,
-                                                EXT4_SB(sb)->s_sbh))) {
-               ext4_warning(sb, "error %d on journal write access", err);
-               ext4_journal_stop(handle);
-               goto exit_put;
+       if (n_blocks_count == o_blocks_count)
+               /* Nothing need to do */
+               return 0;
+
+       ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &offset);
+       ext4_get_group_no_and_offset(sb, o_blocks_count, &o_group, &offset);
+
+       n_desc_blocks = (n_group + EXT4_DESC_PER_BLOCK(sb)) /
+                       EXT4_DESC_PER_BLOCK(sb);
+       o_desc_blocks = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
+                       EXT4_DESC_PER_BLOCK(sb);
+       desc_blocks = n_desc_blocks - o_desc_blocks;
+
+       if (desc_blocks &&
+           (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_RESIZE_INODE) ||
+            le16_to_cpu(es->s_reserved_gdt_blocks) < desc_blocks)) {
+               ext4_warning(sb, "No reserved GDT blocks, can't resize");
+               return -EPERM;
        }
-       ext4_blocks_count_set(es, o_blocks_count + add);
-       ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
-                  o_blocks_count + add);
-       /* We add the blocks to the bitmap and set the group need init bit */
-       err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
-       ext4_handle_dirty_super(handle, sb);
-       ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
-                  o_blocks_count + add);
-       err2 = ext4_journal_stop(handle);
-       if (!err && err2)
-               err = err2;
 
-       if (err)
-               goto exit_put;
+       resize_inode = ext4_iget(sb, EXT4_RESIZE_INO);
+       if (IS_ERR(resize_inode)) {
+               ext4_warning(sb, "Error opening resize inode");
+               return PTR_ERR(resize_inode);
+       }
 
+       /* See if the device is actually as big as what was requested */
+       bh = sb_bread(sb, n_blocks_count - 1);
+       if (!bh) {
+               ext4_warning(sb, "can't read last block, resize aborted");
+               return -ENOSPC;
+       }
+       brelse(bh);
+
+       if (offset != 0) {
+               /* extend the last group */
+               ext4_grpblk_t add;
+               add = EXT4_BLOCKS_PER_GROUP(sb) - offset;
+               err = ext4_group_extend_no_check(sb, o_blocks_count, add);
+               if (err)
+                       goto out;
+       }
+
+       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
+           es->s_log_groups_per_flex)
+               flexbg_size = 1 << es->s_log_groups_per_flex;
+
+       o_blocks_count = ext4_blocks_count(es);
+       if (o_blocks_count == n_blocks_count)
+               goto out;
+
+       flex_gd = alloc_flex_gd(flexbg_size);
+       if (flex_gd == NULL) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       /* Add flex groups. Note that a regular group is a
+        * flex group with 1 group.
+        */
+       while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
+                                             flexbg_size)) {
+               ext4_alloc_group_tables(sb, flex_gd, flexbg_size);
+               err = ext4_flex_group_add(sb, resize_inode, flex_gd);
+               if (unlikely(err))
+                       break;
+       }
+
+out:
+       if (flex_gd)
+               free_flex_gd(flex_gd);
+
+       iput(resize_inode);
        if (test_opt(sb, DEBUG))
-               printk(KERN_DEBUG "EXT4-fs: extended group to %llu blocks\n",
-                      ext4_blocks_count(es));
-       update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, (char *)es,
-                      sizeof(struct ext4_super_block));
-exit_put:
+               printk(KERN_DEBUG "EXT4-fs: resized filesystem from %llu "
+                      "upto %llu blocks\n", o_blocks_count, n_blocks_count);
        return err;
-} /* ext4_group_extend */
+}
index ed3ce82e2de4ce8ef303075028db8651a1571e17..502c61fd739262306f7665b355cbdeba4d1e1c43 100644 (file)
@@ -1095,7 +1095,7 @@ static int ext4_show_options(struct seq_file *seq, struct dentry *root)
        }
        if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) {
                seq_printf(seq, ",max_batch_time=%u",
-                          (unsigned) sbi->s_min_batch_time);
+                          (unsigned) sbi->s_max_batch_time);
        }
 
        /*
@@ -2005,17 +2005,16 @@ static int ext4_fill_flex_info(struct super_block *sb)
        struct ext4_group_desc *gdp = NULL;
        ext4_group_t flex_group_count;
        ext4_group_t flex_group;
-       int groups_per_flex = 0;
+       unsigned int groups_per_flex = 0;
        size_t size;
        int i;
 
        sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
-       groups_per_flex = 1 << sbi->s_log_groups_per_flex;
-
-       if (groups_per_flex < 2) {
+       if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
                sbi->s_log_groups_per_flex = 0;
                return 1;
        }
+       groups_per_flex = 1 << sbi->s_log_groups_per_flex;
 
        /* We allocate both existing and potentially added groups */
        flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
@@ -3506,7 +3505,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
         * of the filesystem.
         */
        if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
-                ext4_msg(sb, KERN_WARNING, "bad geometry: first data"
+               ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
                         "block %u is beyond end of filesystem (%llu)",
                         le32_to_cpu(es->s_first_data_block),
                         ext4_blocks_count(es));
index b60f9f81e33c4371985d83621020ed69f88de779..d2a200624af59a61ef29b88e4320609ddf2c02d0 100644 (file)
@@ -47,8 +47,9 @@ ext4_xattr_security_set(struct dentry *dentry, const char *name,
                              name, value, size, flags);
 }
 
-int ext4_initxattrs(struct inode *inode, const struct xattr *xattr_array,
-                   void *fs_info)
+static int
+ext4_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+               void *fs_info)
 {
        const struct xattr *xattr;
        handle_t *handle = fs_info;
index e2951506434de6dbeb6bd9e0288b50d0d4977ac9..f855916657ba910f676eeb72ad795317c62fcbd5 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/sched.h>
 #include <linux/fs.h>
 #include <linux/mm.h>
+#include <linux/pagemap.h>
 #include <linux/kthread.h>
 #include <linux/freezer.h>
 #include <linux/writeback.h>
 #include <linux/tracepoint.h>
 #include "internal.h"
 
+/*
+ * 4MB minimal write chunk size
+ */
+#define MIN_WRITEBACK_PAGES    (4096UL >> (PAGE_CACHE_SHIFT - 10))
+
 /*
  * Passed into wb_writeback(), essentially a subset of writeback_control
  */
@@ -742,11 +748,17 @@ static long wb_writeback(struct bdi_writeback *wb,
                if (work->for_background && !over_bground_thresh(wb->bdi))
                        break;
 
+               /*
+                * Kupdate and background works are special and we want to
+                * include all inodes that need writing. Livelock avoidance is
+                * handled by these works yielding to any other work so we are
+                * safe.
+                */
                if (work->for_kupdate) {
                        oldest_jif = jiffies -
                                msecs_to_jiffies(dirty_expire_interval * 10);
-                       work->older_than_this = &oldest_jif;
-               }
+               } else if (work->for_background)
+                       oldest_jif = jiffies;
 
                trace_writeback_start(wb->bdi, work);
                if (list_empty(&wb->b_io))
index 98c80d8c2a62df79273433cb494a3f05d35ae02c..ce85b62bc0a27f4ad0e06a0714c93a8d4a9f634c 100644 (file)
@@ -195,10 +195,10 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *fsname)
                return -EINVAL;
        }
 
-       error = dlm_new_lockspace(fsname, strlen(fsname), &ls->ls_dlm,
+       error = dlm_new_lockspace(fsname, NULL, 
                                  DLM_LSFL_FS | DLM_LSFL_NEWEXCL |
                                  (ls->ls_nodir ? DLM_LSFL_NODIR : 0),
-                                 GDLM_LVB_SIZE);
+                                 GDLM_LVB_SIZE, NULL, NULL, NULL, &ls->ls_dlm);
        if (error)
                printk(KERN_ERR "dlm_new_lockspace error %d", error);
 
index 87535753ab04a70b4556b58d4f1443774305da7a..4fa4f0916af9047ef57d8db43315244d92fef427 100644 (file)
@@ -776,6 +776,8 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
        else
                __count_vm_events(PGINODESTEAL, reap);
        spin_unlock(&sb->s_inode_lru_lock);
+       if (current->reclaim_state)
+               current->reclaim_state->reclaimed_slab += reap;
 
        dispose_list(&freeable);
 }
index 68d704db787f108350f9cc47fd506628685399c7..5069b84751509e65bb9689c153ac87e36c26b843 100644 (file)
@@ -429,6 +429,12 @@ void jbd2_journal_commit_transaction(journal_t *journal)
 
        jbd_debug(3, "JBD2: commit phase 1\n");
 
+       /*
+        * Clear revoked flag to reflect there is no revoked buffers
+        * in the next transaction which is going to be started.
+        */
+       jbd2_clear_buffer_revoked_flags(journal);
+
        /*
         * Switch to a new revoke table.
         */
index 69fd93588118027961923d26685b8edecfe3c268..30b2867d6cc950cb7eeceeb528f18d073405117b 100644 (file)
  *   overwriting the new data.  We don't even need to clear the revoke
  *   bit here.
  *
+ * We cache revoke status of a buffer in the current transaction in b_states
+ * bits.  As the name says, revokevalid flag indicates that the cached revoke
+ * status of a buffer is valid and we can rely on the cached status.
+ *
  * Revoke information on buffers is a tri-state value:
  *
  * RevokeValid clear:  no cached revoke status, need to look it up
@@ -478,6 +482,36 @@ int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
        return did_revoke;
 }
 
+/*
+ * journal_clear_revoked_flag clears revoked flag of buffers in
+ * revoke table to reflect there is no revoked buffers in the next
+ * transaction which is going to be started.
+ */
+void jbd2_clear_buffer_revoked_flags(journal_t *journal)
+{
+       struct jbd2_revoke_table_s *revoke = journal->j_revoke;
+       int i = 0;
+
+       for (i = 0; i < revoke->hash_size; i++) {
+               struct list_head *hash_list;
+               struct list_head *list_entry;
+               hash_list = &revoke->hash_table[i];
+
+               list_for_each(list_entry, hash_list) {
+                       struct jbd2_revoke_record_s *record;
+                       struct buffer_head *bh;
+                       record = (struct jbd2_revoke_record_s *)list_entry;
+                       bh = __find_get_block(journal->j_fs_dev,
+                                             record->blocknr,
+                                             journal->j_blocksize);
+                       if (bh) {
+                               clear_buffer_revoked(bh);
+                               __brelse(bh);
+                       }
+               }
+       }
+}
+
 /* journal_switch_revoke table select j_revoke for next transaction
  * we do not want to suspend any processing until all revokes are
  * written -bzzz
index a0e41a4c080e9b2d3a51f5b4c6404cb8b8bc9c2b..35ae096bed5dca819181c1d7bfa94194fa3b0c01 100644 (file)
@@ -517,12 +517,13 @@ void jbd2_journal_lock_updates(journal_t *journal)
                        break;
 
                spin_lock(&transaction->t_handle_lock);
+               prepare_to_wait(&journal->j_wait_updates, &wait,
+                               TASK_UNINTERRUPTIBLE);
                if (!atomic_read(&transaction->t_updates)) {
                        spin_unlock(&transaction->t_handle_lock);
+                       finish_wait(&journal->j_wait_updates, &wait);
                        break;
                }
-               prepare_to_wait(&journal->j_wait_updates, &wait,
-                               TASK_UNINTERRUPTIBLE);
                spin_unlock(&transaction->t_handle_lock);
                write_unlock(&journal->j_state_lock);
                schedule();
index e513f1913c1522a8e9e8dbc4f501a4804507d182..a01cdad6aad1810f0323d6494865f7540d50421c 100644 (file)
@@ -74,7 +74,7 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
        ((struct erase_priv_struct *)instr->priv)->jeb = jeb;
        ((struct erase_priv_struct *)instr->priv)->c = c;
 
-       ret = c->mtd->erase(c->mtd, instr);
+       ret = mtd_erase(c->mtd, instr);
        if (!ret)
                return;
 
@@ -336,12 +336,11 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
        uint32_t ofs;
        size_t retlen;
        int ret = -EIO;
+       unsigned long *wordebuf;
 
-       if (c->mtd->point) {
-               unsigned long *wordebuf;
-
-               ret = c->mtd->point(c->mtd, jeb->offset, c->sector_size,
-                                   &retlen, &ebuf, NULL);
+       ret = mtd_point(c->mtd, jeb->offset, c->sector_size, &retlen,
+                       &ebuf, NULL);
+       if (ret != -EOPNOTSUPP) {
                if (ret) {
                        D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
                        goto do_flash_read;
@@ -349,7 +348,7 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
                if (retlen < c->sector_size) {
                        /* Don't muck about if it won't let us point to the whole erase sector */
                        D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen));
-                       c->mtd->unpoint(c->mtd, jeb->offset, retlen);
+                       mtd_unpoint(c->mtd, jeb->offset, retlen);
                        goto do_flash_read;
                }
                wordebuf = ebuf-sizeof(*wordebuf);
@@ -358,7 +357,7 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
                   if (*++wordebuf != ~0)
                           break;
                } while(--retlen);
-               c->mtd->unpoint(c->mtd, jeb->offset, c->sector_size);
+               mtd_unpoint(c->mtd, jeb->offset, c->sector_size);
                if (retlen) {
                        printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08tx\n",
                               *wordebuf, jeb->offset + c->sector_size-retlen*sizeof(*wordebuf));
@@ -381,7 +380,7 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
 
                *bad_offset = ofs;
 
-               ret = c->mtd->read(c->mtd, ofs, readlen, &retlen, ebuf);
+               ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf);
                if (ret) {
                        printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret);
                        ret = -EIO;
index 4b8afe39a87f2807859b9eda971b08ddc27dae7e..2e0123867cb1b67b1a6a88d9f03439af1da4c204 100644 (file)
@@ -466,7 +466,6 @@ struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_r
 
        if (insert_inode_locked(inode) < 0) {
                make_bad_inode(inode);
-               unlock_new_inode(inode);
                iput(inode);
                return ERR_PTR(-EINVAL);
        }
index ee57bac1ba6d07696053a3172457696311a88899..3093ac4fb24c2966c39b4987040dc9b5a3c33855 100644 (file)
@@ -62,17 +62,15 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
 #ifndef __ECOS
        /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(),
         * adding and jffs2_flash_read_end() interface. */
-       if (c->mtd->point) {
-               err = c->mtd->point(c->mtd, ofs, len, &retlen,
-                                   (void **)&buffer, NULL);
-               if (!err && retlen < len) {
-                       JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
-                       c->mtd->unpoint(c->mtd, ofs, retlen);
-               } else if (err)
+       err = mtd_point(c->mtd, ofs, len, &retlen, (void **)&buffer, NULL);
+       if (!err && retlen < len) {
+               JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
+               mtd_unpoint(c->mtd, ofs, retlen);
+       } else if (err) {
+               if (err != -EOPNOTSUPP)
                        JFFS2_WARNING("MTD point failed: error code %d.\n", err);
-               else
-                       pointed = 1; /* succefully pointed to device */
-       }
+       } else
+               pointed = 1; /* succefully pointed to device */
 #endif
 
        if (!pointed) {
@@ -101,7 +99,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
                kfree(buffer);
 #ifndef __ECOS
        else
-               c->mtd->unpoint(c->mtd, ofs, len);
+               mtd_unpoint(c->mtd, ofs, len);
 #endif
 
        if (crc != tn->data_crc) {
@@ -137,7 +135,7 @@ free_out:
                kfree(buffer);
 #ifndef __ECOS
        else
-               c->mtd->unpoint(c->mtd, ofs, len);
+               mtd_unpoint(c->mtd, ofs, len);
 #endif
        return err;
 }
index 28107ca136e481a556f686376292bf2244a78a91..f99464833bb2fb6e1ce886a713706b0bd88413ff 100644 (file)
@@ -97,15 +97,15 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
        size_t pointlen, try_size;
 
        if (c->mtd->point) {
-               ret = c->mtd->point(c->mtd, 0, c->mtd->size, &pointlen,
-                                   (void **)&flashbuf, NULL);
+               ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen,
+                               (void **)&flashbuf, NULL);
                if (!ret && pointlen < c->mtd->size) {
                        /* Don't muck about if it won't let us point to the whole flash */
                        D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen));
-                       c->mtd->unpoint(c->mtd, 0, pointlen);
+                       mtd_unpoint(c->mtd, 0, pointlen);
                        flashbuf = NULL;
                }
-               if (ret)
+               if (ret && ret != -EOPNOTSUPP)
                        D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
        }
 #endif
@@ -273,7 +273,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
                kfree(flashbuf);
 #ifndef __ECOS
        else
-               c->mtd->unpoint(c->mtd, 0, c->mtd->size);
+               mtd_unpoint(c->mtd, 0, c->mtd->size);
 #endif
        kfree(s);
        return ret;
@@ -455,7 +455,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
        if (jffs2_cleanmarker_oob(c)) {
                int ret;
 
-               if (c->mtd->block_isbad(c->mtd, jeb->offset))
+               if (mtd_block_isbad(c->mtd, jeb->offset))
                        return BLK_STATE_BADBLOCK;
 
                ret = jffs2_check_nand_cleanmarker(c, jeb);
index 8be4925296cf981aed60190b96eca8fd0bf6e467..f2d96b5e64f6fa47ee8fd11543156c926779ca0e 100644 (file)
@@ -335,9 +335,7 @@ static void jffs2_put_super (struct super_block *sb)
        jffs2_flash_cleanup(c);
        kfree(c->inocache_list);
        jffs2_clear_xattr_subsystem(c);
-       if (c->mtd->sync)
-               c->mtd->sync(c->mtd);
-
+       mtd_sync(c->mtd);
        D1(printk(KERN_DEBUG "jffs2_put_super returning\n"));
 }
 
index b09e51d2f81f98a1a985cdc410802b909bd361f2..30e8f47e8a233f32ad7eaab70dfbf1291bcf409b 100644 (file)
@@ -228,7 +228,7 @@ static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
        size_t retlen;
        char *eccstr;
 
-       ret = c->mtd->read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
+       ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
        if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
                printk(KERN_WARNING "jffs2_verify_write(): Read back of page at %08x failed: %d\n", c->wbuf_ofs, ret);
                return ret;
@@ -337,7 +337,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
                }
 
                /* Do the read... */
-               ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
+               ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen,
+                              buf);
 
                /* ECC recovered ? */
                if ((ret == -EUCLEAN || ret == -EBADMSG) &&
@@ -413,13 +414,12 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
                if (breakme++ == 20) {
                        printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
                        breakme = 0;
-                       c->mtd->write(c->mtd, ofs, towrite, &retlen,
-                                     brokenbuf);
+                       mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
                        ret = -EIO;
                } else
 #endif
-                       ret = c->mtd->write(c->mtd, ofs, towrite, &retlen,
-                                           rewrite_buf);
+                       ret = mtd_write(c->mtd, ofs, towrite, &retlen,
+                                       rewrite_buf);
 
                if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
                        /* Argh. We tried. Really we did. */
@@ -619,13 +619,14 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
        if (breakme++ == 20) {
                printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
                breakme = 0;
-               c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
-                             brokenbuf);
+               mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
+                         brokenbuf);
                ret = -EIO;
        } else
 #endif
 
-               ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
+               ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
+                               &retlen, c->wbuf);
 
        if (ret) {
                printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n", ret);
@@ -861,8 +862,8 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
                v += wbuf_retlen;
 
                if (vlen >= c->wbuf_pagesize) {
-                       ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen),
-                                           &wbuf_retlen, v);
+                       ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen),
+                                       &wbuf_retlen, v);
                        if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
                                goto outfile;
 
@@ -948,11 +949,11 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re
        int     ret;
 
        if (!jffs2_is_writebuffered(c))
-               return c->mtd->read(c->mtd, ofs, len, retlen, buf);
+               return mtd_read(c->mtd, ofs, len, retlen, buf);
 
        /* Read flash */
        down_read(&c->wbuf_sem);
-       ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
+       ret = mtd_read(c->mtd, ofs, len, retlen, buf);
 
        if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
                if (ret == -EBADMSG)
@@ -1031,7 +1032,7 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
        ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
        ops.datbuf = NULL;
 
-       ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
+       ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
        if (ret || ops.oobretlen != ops.ooblen) {
                printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
                                " bytes, read %zd bytes, error %d\n",
@@ -1074,7 +1075,7 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
        ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
        ops.datbuf = NULL;
 
-       ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
+       ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
        if (ret || ops.oobretlen != ops.ooblen) {
                printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
                                " bytes, read %zd bytes, error %d\n",
@@ -1100,7 +1101,7 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
        ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
        ops.datbuf = NULL;
 
-       ret = c->mtd->write_oob(c->mtd, jeb->offset, &ops);
+       ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
        if (ret || ops.oobretlen != ops.ooblen) {
                printk(KERN_ERR "cannot write OOB for EB at %08x, requested %zd"
                                " bytes, read %zd bytes, error %d\n",
@@ -1129,11 +1130,8 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *
        if( ++jeb->bad_count < MAX_ERASE_FAILURES)
                return 0;
 
-       if (!c->mtd->block_markbad)
-               return 1; // What else can we do?
-
        printk(KERN_WARNING "JFFS2: marking eraseblock at %08x\n as bad", bad_offset);
-       ret = c->mtd->block_markbad(c->mtd, bad_offset);
+       ret = mtd_block_markbad(c->mtd, bad_offset);
 
        if (ret) {
                D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
index b9276b11bac69a8ce08666ccf6e28539e2bf2322..a1bda9dab3f8b67e1306b7695c4461f7010a47bf 100644 (file)
 #include <linux/mtd/mtd.h>
 #include "nodelist.h"
 
-/* This ought to be in core MTD code. All registered MTD devices
-   without writev should have this put in place. Bug the MTD
-   maintainer */
-static inline int mtd_fake_writev(struct mtd_info *mtd, const struct kvec *vecs,
-                                 unsigned long count, loff_t to, size_t *retlen)
-{
-       unsigned long i;
-       size_t totlen = 0, thislen;
-       int ret = 0;
-
-       for (i=0; i<count; i++) {
-               if (!vecs[i].iov_len)
-                       continue;
-               ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base);
-               totlen += thislen;
-               if (ret || thislen != vecs[i].iov_len)
-                       break;
-               to += vecs[i].iov_len;
-       }
-       if (retlen)
-               *retlen = totlen;
-       return ret;
-}
-
 int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs,
                              unsigned long count, loff_t to, size_t *retlen)
 {
@@ -50,18 +26,14 @@ int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs,
                }
        }
 
-       if (c->mtd->writev)
-               return c->mtd->writev(c->mtd, vecs, count, to, retlen);
-       else {
-               return mtd_fake_writev(c->mtd, vecs, count, to, retlen);
-       }
+       return mtd_writev(c->mtd, vecs, count, to, retlen);
 }
 
 int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
                        size_t *retlen, const u_char *buf)
 {
        int ret;
-       ret = c->mtd->write(c->mtd, ofs, len, retlen, buf);
+       ret = mtd_write(c->mtd, ofs, len, retlen, buf);
 
        if (jffs2_sum_active()) {
                struct kvec vecs[1];
index 339e17e9133d0a87f3f8306062d016ea9d328500..e97404d611e0abf4f627d6c080aea0c0739422f0 100644 (file)
 
 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
 
-static int mtd_read(struct super_block *sb, loff_t ofs, size_t len, void *buf)
+static int logfs_mtd_read(struct super_block *sb, loff_t ofs, size_t len,
+                       void *buf)
 {
        struct mtd_info *mtd = logfs_super(sb)->s_mtd;
        size_t retlen;
        int ret;
 
-       ret = mtd->read(mtd, ofs, len, &retlen, buf);
+       ret = mtd_read(mtd, ofs, len, &retlen, buf);
        BUG_ON(ret == -EINVAL);
        if (ret)
                return ret;
@@ -31,7 +32,8 @@ static int mtd_read(struct super_block *sb, loff_t ofs, size_t len, void *buf)
        return 0;
 }
 
-static int mtd_write(struct super_block *sb, loff_t ofs, size_t len, void *buf)
+static int loffs_mtd_write(struct super_block *sb, loff_t ofs, size_t len,
+                       void *buf)
 {
        struct logfs_super *super = logfs_super(sb);
        struct mtd_info *mtd = super->s_mtd;
@@ -47,7 +49,7 @@ static int mtd_write(struct super_block *sb, loff_t ofs, size_t len, void *buf)
        BUG_ON(len > PAGE_CACHE_SIZE);
        page_start = ofs & PAGE_CACHE_MASK;
        page_end = PAGE_CACHE_ALIGN(ofs + len) - 1;
-       ret = mtd->write(mtd, ofs, len, &retlen, buf);
+       ret = mtd_write(mtd, ofs, len, &retlen, buf);
        if (ret || (retlen != len))
                return -EIO;
 
@@ -60,14 +62,15 @@ static int mtd_write(struct super_block *sb, loff_t ofs, size_t len, void *buf)
  * asynchronous properties.  So just to prevent the first implementor of such
  * a thing from breaking logfs in 2350, we do the usual pointless dance to
  * declare a completion variable and wait for completion before returning
- * from mtd_erase().  What an exercise in futility!
+ * from logfs_mtd_erase().  What an exercise in futility!
  */
 static void logfs_erase_callback(struct erase_info *ei)
 {
        complete((struct completion *)ei->priv);
 }
 
-static int mtd_erase_mapping(struct super_block *sb, loff_t ofs, size_t len)
+static int logfs_mtd_erase_mapping(struct super_block *sb, loff_t ofs,
+                               size_t len)
 {
        struct logfs_super *super = logfs_super(sb);
        struct address_space *mapping = super->s_mapping_inode->i_mapping;
@@ -84,7 +87,7 @@ static int mtd_erase_mapping(struct super_block *sb, loff_t ofs, size_t len)
        return 0;
 }
 
-static int mtd_erase(struct super_block *sb, loff_t ofs, size_t len,
+static int logfs_mtd_erase(struct super_block *sb, loff_t ofs, size_t len,
                int ensure_write)
 {
        struct mtd_info *mtd = logfs_super(sb)->s_mtd;
@@ -102,30 +105,29 @@ static int mtd_erase(struct super_block *sb, loff_t ofs, size_t len,
        ei.len = len;
        ei.callback = logfs_erase_callback;
        ei.priv = (long)&complete;
-       ret = mtd->erase(mtd, &ei);
+       ret = mtd_erase(mtd, &ei);
        if (ret)
                return -EIO;
 
        wait_for_completion(&complete);
        if (ei.state != MTD_ERASE_DONE)
                return -EIO;
-       return mtd_erase_mapping(sb, ofs, len);
+       return logfs_mtd_erase_mapping(sb, ofs, len);
 }
 
-static void mtd_sync(struct super_block *sb)
+static void logfs_mtd_sync(struct super_block *sb)
 {
        struct mtd_info *mtd = logfs_super(sb)->s_mtd;
 
-       if (mtd->sync)
-               mtd->sync(mtd);
+       mtd_sync(mtd);
 }
 
-static int mtd_readpage(void *_sb, struct page *page)
+static int logfs_mtd_readpage(void *_sb, struct page *page)
 {
        struct super_block *sb = _sb;
        int err;
 
-       err = mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
+       err = logfs_mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
                        page_address(page));
        if (err == -EUCLEAN || err == -EBADMSG) {
                /* -EBADMSG happens regularly on power failures */
@@ -143,18 +145,18 @@ static int mtd_readpage(void *_sb, struct page *page)
        return err;
 }
 
-static struct page *mtd_find_first_sb(struct super_block *sb, u64 *ofs)
+static struct page *logfs_mtd_find_first_sb(struct super_block *sb, u64 *ofs)
 {
        struct logfs_super *super = logfs_super(sb);
        struct address_space *mapping = super->s_mapping_inode->i_mapping;
-       filler_t *filler = mtd_readpage;
+       filler_t *filler = logfs_mtd_readpage;
        struct mtd_info *mtd = super->s_mtd;
 
-       if (!mtd->block_isbad)
+       if (!mtd_can_have_bb(mtd))
                return NULL;
 
        *ofs = 0;
-       while (mtd->block_isbad(mtd, *ofs)) {
+       while (mtd_block_isbad(mtd, *ofs)) {
                *ofs += mtd->erasesize;
                if (*ofs >= mtd->size)
                        return NULL;
@@ -163,18 +165,18 @@ static struct page *mtd_find_first_sb(struct super_block *sb, u64 *ofs)
        return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb);
 }
 
-static struct page *mtd_find_last_sb(struct super_block *sb, u64 *ofs)
+static struct page *logfs_mtd_find_last_sb(struct super_block *sb, u64 *ofs)
 {
        struct logfs_super *super = logfs_super(sb);
        struct address_space *mapping = super->s_mapping_inode->i_mapping;
-       filler_t *filler = mtd_readpage;
+       filler_t *filler = logfs_mtd_readpage;
        struct mtd_info *mtd = super->s_mtd;
 
-       if (!mtd->block_isbad)
+       if (!mtd_can_have_bb(mtd))
                return NULL;
 
        *ofs = mtd->size - mtd->erasesize;
-       while (mtd->block_isbad(mtd, *ofs)) {
+       while (mtd_block_isbad(mtd, *ofs)) {
                *ofs -= mtd->erasesize;
                if (*ofs <= 0)
                        return NULL;
@@ -184,7 +186,7 @@ static struct page *mtd_find_last_sb(struct super_block *sb, u64 *ofs)
        return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb);
 }
 
-static int __mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
+static int __logfs_mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
                size_t nr_pages)
 {
        struct logfs_super *super = logfs_super(sb);
@@ -196,8 +198,8 @@ static int __mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
                page = find_lock_page(mapping, index + i);
                BUG_ON(!page);
 
-               err = mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
-                               page_address(page));
+               err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
+                                       page_address(page));
                unlock_page(page);
                page_cache_release(page);
                if (err)
@@ -206,7 +208,7 @@ static int __mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
        return 0;
 }
 
-static void mtd_writeseg(struct super_block *sb, u64 ofs, size_t len)
+static void logfs_mtd_writeseg(struct super_block *sb, u64 ofs, size_t len)
 {
        struct logfs_super *super = logfs_super(sb);
        int head;
@@ -227,15 +229,15 @@ static void mtd_writeseg(struct super_block *sb, u64 ofs, size_t len)
                len += head;
        }
        len = PAGE_ALIGN(len);
-       __mtd_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
+       __logfs_mtd_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
 }
 
-static void mtd_put_device(struct logfs_super *s)
+static void logfs_mtd_put_device(struct logfs_super *s)
 {
        put_mtd_device(s->s_mtd);
 }
 
-static int mtd_can_write_buf(struct super_block *sb, u64 ofs)
+static int logfs_mtd_can_write_buf(struct super_block *sb, u64 ofs)
 {
        struct logfs_super *super = logfs_super(sb);
        void *buf;
@@ -244,7 +246,7 @@ static int mtd_can_write_buf(struct super_block *sb, u64 ofs)
        buf = kmalloc(super->s_writesize, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
-       err = mtd_read(sb, ofs, super->s_writesize, buf);
+       err = logfs_mtd_read(sb, ofs, super->s_writesize, buf);
        if (err)
                goto out;
        if (memchr_inv(buf, 0xff, super->s_writesize))
@@ -255,14 +257,14 @@ out:
 }
 
 static const struct logfs_device_ops mtd_devops = {
-       .find_first_sb  = mtd_find_first_sb,
-       .find_last_sb   = mtd_find_last_sb,
-       .readpage       = mtd_readpage,
-       .writeseg       = mtd_writeseg,
-       .erase          = mtd_erase,
-       .can_write_buf  = mtd_can_write_buf,
-       .sync           = mtd_sync,
-       .put_device     = mtd_put_device,
+       .find_first_sb  = logfs_mtd_find_first_sb,
+       .find_last_sb   = logfs_mtd_find_last_sb,
+       .readpage       = logfs_mtd_readpage,
+       .writeseg       = logfs_mtd_writeseg,
+       .erase          = logfs_mtd_erase,
+       .can_write_buf  = logfs_mtd_can_write_buf,
+       .sync           = logfs_mtd_sync,
+       .put_device     = logfs_mtd_put_device,
 };
 
 int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr)
index 43926add945b0a4a88625b23062e1cdd393a743d..54cea8ad5a76ff6f8796030c4f42ad7f70d12ca6 100644 (file)
@@ -339,7 +339,7 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
        dprintk("%s enter. slotid %d seqid %d\n",
                __func__, args->csa_slotid, args->csa_sequenceid);
 
-       if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS)
+       if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS)
                return htonl(NFS4ERR_BADSLOT);
 
        slot = tbl->slots + args->csa_slotid;
index 873bf00d51a2b8d000ef9d45bdb9f139186ec6e5..277dfaf2e99ac063dee95c4eddbfb8bb9afbfe1e 100644 (file)
@@ -84,7 +84,7 @@ retry:
 /*
  * Turn off NFSv4 uid/gid mapping when using AUTH_SYS
  */
-static int nfs4_disable_idmapping = 0;
+static int nfs4_disable_idmapping = 1;
 
 /*
  * RPC cruft for NFS
@@ -185,7 +185,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
        clp->cl_minorversion = cl_init->minorversion;
        clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
 #endif
-       cred = rpc_lookup_machine_cred();
+       cred = rpc_lookup_machine_cred("*");
        if (!IS_ERR(cred))
                clp->cl_machine_cred = cred;
        nfs_fscache_get_client_cookie(clp);
@@ -250,6 +250,11 @@ static void pnfs_init_server(struct nfs_server *server)
        rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC");
 }
 
+static void nfs4_destroy_server(struct nfs_server *server)
+{
+       nfs4_purge_state_owners(server);
+}
+
 #else
 static void nfs4_shutdown_client(struct nfs_client *clp)
 {
@@ -1065,6 +1070,7 @@ static struct nfs_server *nfs_alloc_server(void)
        INIT_LIST_HEAD(&server->master_link);
        INIT_LIST_HEAD(&server->delegations);
        INIT_LIST_HEAD(&server->layouts);
+       INIT_LIST_HEAD(&server->state_owners_lru);
 
        atomic_set(&server->active, 0);
 
@@ -1538,6 +1544,7 @@ static int nfs4_server_common_setup(struct nfs_server *server,
 
        nfs_server_insert_lists(server);
        server->mount_time = jiffies;
+       server->destroy = nfs4_destroy_server;
 out:
        nfs_free_fattr(fattr);
        return error;
@@ -1719,6 +1726,7 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
 
        /* Copy data from the source */
        server->nfs_client = source->nfs_client;
+       server->destroy = source->destroy;
        atomic_inc(&server->nfs_client->cl_count);
        nfs_server_copy_userdata(server, source);
 
index 606ef0f20aed58d7e67b33ab2696382ece03c6f5..c43a452f7da2e70c084bddb7dfe6415d194bdf3e 100644 (file)
@@ -272,13 +272,13 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
                        datasync);
 
        ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
-       if (ret)
-               return ret;
        mutex_lock(&inode->i_mutex);
 
        nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
        have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
        status = nfs_commit_inode(inode, FLUSH_SYNC);
+       if (status >= 0 && ret < 0)
+               status = ret;
        have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
        if (have_error)
                ret = xchg(&ctx->error, 0);
index 47d1c6ff2d8e845625e60fbcf3ba5cf65786282e..2c05f1991e1e1c3664c8ffb9f0159687b53b8449 100644 (file)
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/nfs_idmap.h>
+#include <linux/nfs_fs.h>
+
+/**
+ * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
+ * @fattr: fully initialised struct nfs_fattr
+ * @owner_name: owner name string cache
+ * @group_name: group name string cache
+ */
+void nfs_fattr_init_names(struct nfs_fattr *fattr,
+               struct nfs4_string *owner_name,
+               struct nfs4_string *group_name)
+{
+       fattr->owner_name = owner_name;
+       fattr->group_name = group_name;
+}
+
+static void nfs_fattr_free_owner_name(struct nfs_fattr *fattr)
+{
+       fattr->valid &= ~NFS_ATTR_FATTR_OWNER_NAME;
+       kfree(fattr->owner_name->data);
+}
+
+static void nfs_fattr_free_group_name(struct nfs_fattr *fattr)
+{
+       fattr->valid &= ~NFS_ATTR_FATTR_GROUP_NAME;
+       kfree(fattr->group_name->data);
+}
+
+static bool nfs_fattr_map_owner_name(struct nfs_server *server, struct nfs_fattr *fattr)
+{
+       struct nfs4_string *owner = fattr->owner_name;
+       __u32 uid;
+
+       if (!(fattr->valid & NFS_ATTR_FATTR_OWNER_NAME))
+               return false;
+       if (nfs_map_name_to_uid(server, owner->data, owner->len, &uid) == 0) {
+               fattr->uid = uid;
+               fattr->valid |= NFS_ATTR_FATTR_OWNER;
+       }
+       return true;
+}
+
+static bool nfs_fattr_map_group_name(struct nfs_server *server, struct nfs_fattr *fattr)
+{
+       struct nfs4_string *group = fattr->group_name;
+       __u32 gid;
+
+       if (!(fattr->valid & NFS_ATTR_FATTR_GROUP_NAME))
+               return false;
+       if (nfs_map_group_to_gid(server, group->data, group->len, &gid) == 0) {
+               fattr->gid = gid;
+               fattr->valid |= NFS_ATTR_FATTR_GROUP;
+       }
+       return true;
+}
+
+/**
+ * nfs_fattr_free_names - free up the NFSv4 owner and group strings
+ * @fattr: a fully initialised nfs_fattr structure
+ */
+void nfs_fattr_free_names(struct nfs_fattr *fattr)
+{
+       if (fattr->valid & NFS_ATTR_FATTR_OWNER_NAME)
+               nfs_fattr_free_owner_name(fattr);
+       if (fattr->valid & NFS_ATTR_FATTR_GROUP_NAME)
+               nfs_fattr_free_group_name(fattr);
+}
+
+/**
+ * nfs_fattr_map_and_free_names - map owner/group strings into uid/gid and free
+ * @server: pointer to the filesystem nfs_server structure
+ * @fattr: a fully initialised nfs_fattr structure
+ *
+ * This helper maps the cached NFSv4 owner/group strings in fattr into
+ * their numeric uid/gid equivalents, and then frees the cached strings.
+ */
+void nfs_fattr_map_and_free_names(struct nfs_server *server, struct nfs_fattr *fattr)
+{
+       if (nfs_fattr_map_owner_name(server, fattr))
+               nfs_fattr_free_owner_name(fattr);
+       if (nfs_fattr_map_group_name(server, fattr))
+               nfs_fattr_free_group_name(fattr);
+}
 
 static int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res)
 {
index 81db25e92e108133ed7995510a9ffe1e48e14137..25c3bfad7953645309272c324ba442680546eef3 100644 (file)
@@ -1020,6 +1020,8 @@ void nfs_fattr_init(struct nfs_fattr *fattr)
        fattr->valid = 0;
        fattr->time_start = jiffies;
        fattr->gencount = nfs_inc_attr_generation_counter();
+       fattr->owner_name = NULL;
+       fattr->group_name = NULL;
 }
 
 struct nfs_fattr *nfs_alloc_fattr(void)
index 3f4d95751d52f3e152fc7f2dcb5d1a6d8fc2184b..5ee92538b06317059a7e9b16d449c22c896208bf 100644 (file)
@@ -307,6 +307,8 @@ extern void nfs_readdata_release(struct nfs_read_data *rdata);
 /* write.c */
 extern int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
                struct list_head *head);
+extern void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
+                                 struct inode *inode, int ioflags);
 extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio);
 extern void nfs_writedata_release(struct nfs_write_data *wdata);
 extern void nfs_commit_free(struct nfs_write_data *p);
index 693ae22f873194d0c36adc384d7cb05fefbc8b6c..4d7d0aedc101831ecb3b10cf345f0e18e7ca56ad 100644 (file)
@@ -94,6 +94,8 @@ struct nfs_unique_id {
 struct nfs4_state_owner {
        struct nfs_unique_id so_owner_id;
        struct nfs_server    *so_server;
+       struct list_head     so_lru;
+       unsigned long        so_expires;
        struct rb_node       so_server_node;
 
        struct rpc_cred      *so_cred;   /* Associated cred */
@@ -319,6 +321,7 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session)
 
 extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
 extern void nfs4_put_state_owner(struct nfs4_state_owner *);
+extern void nfs4_purge_state_owners(struct nfs_server *);
 extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
 extern void nfs4_put_open_state(struct nfs4_state *);
 extern void nfs4_close_state(struct nfs4_state *, fmode_t);
index a62d36b9a99ebb912286a8fcf87f425e6b06f203..71ec08617e23820b8e3d11ee8e47a4217ee52e55 100644 (file)
@@ -49,13 +49,14 @@ filelayout_get_dense_offset(struct nfs4_filelayout_segment *flseg,
                            loff_t offset)
 {
        u32 stripe_width = flseg->stripe_unit * flseg->dsaddr->stripe_count;
-       u64 tmp;
+       u64 stripe_no;
+       u32 rem;
 
        offset -= flseg->pattern_offset;
-       tmp = offset;
-       do_div(tmp, stripe_width);
+       stripe_no = div_u64(offset, stripe_width);
+       div_u64_rem(offset, flseg->stripe_unit, &rem);
 
-       return tmp * flseg->stripe_unit + do_div(offset, flseg->stripe_unit);
+       return stripe_no * flseg->stripe_unit + rem;
 }
 
 /* This function is used by the layout driver to calculate the
index dcda0ba7af6034b6c8d983082ee00ffbbb4d6cde..75366dc89686d88655c6569f4840a1651ca98690 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/namei.h>
 #include <linux/mount.h>
 #include <linux/module.h>
+#include <linux/nfs_idmap.h>
 #include <linux/sunrpc/bc_xprt.h>
 #include <linux/xattr.h>
 #include <linux/utsname.h>
@@ -364,9 +365,8 @@ static void renew_lease(const struct nfs_server *server, unsigned long timestamp
  * Must be called while holding tbl->slot_tbl_lock
  */
 static void
-nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *free_slot)
+nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
 {
-       int free_slotid = free_slot - tbl->slots;
        int slotid = free_slotid;
 
        BUG_ON(slotid < 0 || slotid >= NFS4_MAX_SLOT_TABLE);
@@ -431,7 +431,7 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
        }
 
        spin_lock(&tbl->slot_tbl_lock);
-       nfs4_free_slot(tbl, res->sr_slot);
+       nfs4_free_slot(tbl, res->sr_slot - tbl->slots);
        nfs4_check_drain_fc_complete(res->sr_session);
        spin_unlock(&tbl->slot_tbl_lock);
        res->sr_slot = NULL;
@@ -554,13 +554,10 @@ int nfs41_setup_sequence(struct nfs4_session *session,
        spin_lock(&tbl->slot_tbl_lock);
        if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
            !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
-               /*
-                * The state manager will wait until the slot table is empty.
-                * Schedule the reset thread
-                */
+               /* The state manager will wait until the slot table is empty */
                rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
                spin_unlock(&tbl->slot_tbl_lock);
-               dprintk("%s Schedule Session Reset\n", __func__);
+               dprintk("%s session is draining\n", __func__);
                return -EAGAIN;
        }
 
@@ -765,6 +762,8 @@ struct nfs4_opendata {
        struct nfs_openres o_res;
        struct nfs_open_confirmargs c_arg;
        struct nfs_open_confirmres c_res;
+       struct nfs4_string owner_name;
+       struct nfs4_string group_name;
        struct nfs_fattr f_attr;
        struct nfs_fattr dir_attr;
        struct dentry *dir;
@@ -788,6 +787,7 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p)
        p->o_res.server = p->o_arg.server;
        nfs_fattr_init(&p->f_attr);
        nfs_fattr_init(&p->dir_attr);
+       nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
 }
 
 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
@@ -819,6 +819,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
        p->o_arg.name = &dentry->d_name;
        p->o_arg.server = server;
        p->o_arg.bitmask = server->attr_bitmask;
+       p->o_arg.dir_bitmask = server->cache_consistency_bitmask;
        p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
        if (flags & O_CREAT) {
                u32 *s;
@@ -855,6 +856,7 @@ static void nfs4_opendata_free(struct kref *kref)
        dput(p->dir);
        dput(p->dentry);
        nfs_sb_deactive(sb);
+       nfs_fattr_free_names(&p->f_attr);
        kfree(p);
 }
 
@@ -1579,6 +1581,8 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
        if (status != 0 || !data->rpc_done)
                return status;
 
+       nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
+
        nfs_refresh_inode(dir, o_res->dir_attr);
 
        if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
@@ -1611,6 +1615,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
                return status;
        }
 
+       nfs_fattr_map_and_free_names(server, &data->f_attr);
+
        if (o_arg->open_flags & O_CREAT) {
                update_changeattr(dir, &o_res->cinfo);
                nfs_post_op_update_inode(dir, o_res->dir_attr);
@@ -3431,19 +3437,6 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server)
  */
 #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
 
-static void buf_to_pages(const void *buf, size_t buflen,
-               struct page **pages, unsigned int *pgbase)
-{
-       const void *p = buf;
-
-       *pgbase = offset_in_page(buf);
-       p -= *pgbase;
-       while (p < buf + buflen) {
-               *(pages++) = virt_to_page(p);
-               p += PAGE_CACHE_SIZE;
-       }
-}
-
 static int buf_to_pages_noslab(const void *buf, size_t buflen,
                struct page **pages, unsigned int *pgbase)
 {
@@ -3540,9 +3533,19 @@ out:
        nfs4_set_cached_acl(inode, acl);
 }
 
+/*
+ * The getxattr API returns the required buffer length when called with a
+ * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
+ * the required buf.  On a NULL buf, we send a page of data to the server
+ * guessing that the ACL request can be serviced by a page. If so, we cache
+ * up to the page of ACL data, and the 2nd call to getxattr is serviced by
+ * the cache. If not so, we throw away the page, and cache the required
+ * length. The next getxattr call will then produce another round trip to
+ * the server, this time with the input buf of the required size.
+ */
 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
 {
-       struct page *pages[NFS4ACL_MAXPAGES];
+       struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
        struct nfs_getaclargs args = {
                .fh = NFS_FH(inode),
                .acl_pages = pages,
@@ -3557,41 +3560,60 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
                .rpc_argp = &args,
                .rpc_resp = &res,
        };
-       struct page *localpage = NULL;
-       int ret;
+       int ret = -ENOMEM, npages, i, acl_len = 0;
 
-       if (buflen < PAGE_SIZE) {
-               /* As long as we're doing a round trip to the server anyway,
-                * let's be prepared for a page of acl data. */
-               localpage = alloc_page(GFP_KERNEL);
-               resp_buf = page_address(localpage);
-               if (localpage == NULL)
-                       return -ENOMEM;
-               args.acl_pages[0] = localpage;
-               args.acl_pgbase = 0;
-               args.acl_len = PAGE_SIZE;
-       } else {
-               resp_buf = buf;
-               buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
+       npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       /* As long as we're doing a round trip to the server anyway,
+        * let's be prepared for a page of acl data. */
+       if (npages == 0)
+               npages = 1;
+
+       for (i = 0; i < npages; i++) {
+               pages[i] = alloc_page(GFP_KERNEL);
+               if (!pages[i])
+                       goto out_free;
        }
-       ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
+       if (npages > 1) {
+               /* for decoding across pages */
+               args.acl_scratch = alloc_page(GFP_KERNEL);
+               if (!args.acl_scratch)
+                       goto out_free;
+       }
+       args.acl_len = npages * PAGE_SIZE;
+       args.acl_pgbase = 0;
+       /* Let decode_getfacl know not to fail if the ACL data is larger than
+        * the page we send as a guess */
+       if (buf == NULL)
+               res.acl_flags |= NFS4_ACL_LEN_REQUEST;
+       resp_buf = page_address(pages[0]);
+
+       dprintk("%s  buf %p buflen %ld npages %d args.acl_len %ld\n",
+               __func__, buf, buflen, npages, args.acl_len);
+       ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
+                            &msg, &args.seq_args, &res.seq_res, 0);
        if (ret)
                goto out_free;
-       if (res.acl_len > args.acl_len)
-               nfs4_write_cached_acl(inode, NULL, res.acl_len);
+
+       acl_len = res.acl_len - res.acl_data_offset;
+       if (acl_len > args.acl_len)
+               nfs4_write_cached_acl(inode, NULL, acl_len);
        else
-               nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
+               nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
+                                     acl_len);
        if (buf) {
                ret = -ERANGE;
-               if (res.acl_len > buflen)
+               if (acl_len > buflen)
                        goto out_free;
-               if (localpage)
-                       memcpy(buf, resp_buf, res.acl_len);
+               _copy_from_pages(buf, pages, res.acl_data_offset,
+                               res.acl_len);
        }
-       ret = res.acl_len;
+       ret = acl_len;
 out_free:
-       if (localpage)
-               __free_page(localpage);
+       for (i = 0; i < npages; i++)
+               if (pages[i])
+                       __free_page(pages[i]);
+       if (args.acl_scratch)
+               __free_page(args.acl_scratch);
        return ret;
 }
 
@@ -3622,6 +3644,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
                nfs_zap_acl_cache(inode);
        ret = nfs4_read_cached_acl(inode, buf, buflen);
        if (ret != -ENOENT)
+               /* -ENOENT is returned if there is no ACL or if there is an ACL
+                * but no cached acl data, just the acl length */
                return ret;
        return nfs4_get_acl_uncached(inode, buf, buflen);
 }
@@ -5022,23 +5046,6 @@ out:
        return ret;
 }
 
-/*
- * Reset the forechannel and backchannel slot tables
- */
-static int nfs4_reset_slot_tables(struct nfs4_session *session)
-{
-       int status;
-
-       status = nfs4_reset_slot_table(&session->fc_slot_table,
-                       session->fc_attrs.max_reqs, 1);
-       if (status)
-               return status;
-
-       status = nfs4_reset_slot_table(&session->bc_slot_table,
-                       session->bc_attrs.max_reqs, 0);
-       return status;
-}
-
 /* Destroy the slot table */
 static void nfs4_destroy_slot_tables(struct nfs4_session *session)
 {
@@ -5084,29 +5091,35 @@ out:
 }
 
 /*
- * Initialize the forechannel and backchannel tables
+ * Initialize or reset the forechannel and backchannel tables
  */
-static int nfs4_init_slot_tables(struct nfs4_session *session)
+static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
 {
        struct nfs4_slot_table *tbl;
-       int status = 0;
+       int status;
 
-       tbl = &session->fc_slot_table;
+       dprintk("--> %s\n", __func__);
+       /* Fore channel */
+       tbl = &ses->fc_slot_table;
        if (tbl->slots == NULL) {
-               status = nfs4_init_slot_table(tbl,
-                               session->fc_attrs.max_reqs, 1);
+               status = nfs4_init_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
+               if (status) /* -ENOMEM */
+                       return status;
+       } else {
+               status = nfs4_reset_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
                if (status)
                        return status;
        }
-
-       tbl = &session->bc_slot_table;
+       /* Back channel */
+       tbl = &ses->bc_slot_table;
        if (tbl->slots == NULL) {
-               status = nfs4_init_slot_table(tbl,
-                               session->bc_attrs.max_reqs, 0);
+               status = nfs4_init_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
                if (status)
-                       nfs4_destroy_slot_tables(session);
-       }
-
+                       /* Fore and back channel share a connection so get
+                        * both slot tables or neither */
+                       nfs4_destroy_slot_tables(ses);
+       } else
+               status = nfs4_reset_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
        return status;
 }
 
@@ -5294,13 +5307,9 @@ int nfs4_proc_create_session(struct nfs_client *clp)
        if (status)
                goto out;
 
-       /* Init and reset the fore channel */
-       status = nfs4_init_slot_tables(session);
-       dprintk("slot table initialization returned %d\n", status);
-       if (status)
-               goto out;
-       status = nfs4_reset_slot_tables(session);
-       dprintk("slot table reset returned %d\n", status);
+       /* Init or reset the session slot tables */
+       status = nfs4_setup_session_slot_tables(session);
+       dprintk("slot table setup returned %d\n", status);
        if (status)
                goto out;
 
index 6a7107ae6b72d407bf95c4953bd1017e4704f63f..a53f33b4ac3a5cf333afe80b358f27fe4130289f 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/ratelimit.h>
 #include <linux/workqueue.h>
 #include <linux/bitops.h>
+#include <linux/jiffies.h>
 
 #include "nfs4_fs.h"
 #include "callback.h"
@@ -377,31 +378,24 @@ nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
 {
        struct rb_node **p = &server->state_owners.rb_node,
                       *parent = NULL;
-       struct nfs4_state_owner *sp, *res = NULL;
+       struct nfs4_state_owner *sp;
 
        while (*p != NULL) {
                parent = *p;
                sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
 
-               if (server < sp->so_server) {
-                       p = &parent->rb_left;
-                       continue;
-               }
-               if (server > sp->so_server) {
-                       p = &parent->rb_right;
-                       continue;
-               }
                if (cred < sp->so_cred)
                        p = &parent->rb_left;
                else if (cred > sp->so_cred)
                        p = &parent->rb_right;
                else {
+                       if (!list_empty(&sp->so_lru))
+                               list_del_init(&sp->so_lru);
                        atomic_inc(&sp->so_count);
-                       res = sp;
-                       break;
+                       return sp;
                }
        }
-       return res;
+       return NULL;
 }
 
 static struct nfs4_state_owner *
@@ -421,6 +415,8 @@ nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
                else if (new->so_cred > sp->so_cred)
                        p = &parent->rb_right;
                else {
+                       if (!list_empty(&sp->so_lru))
+                               list_del_init(&sp->so_lru);
                        atomic_inc(&sp->so_count);
                        return sp;
                }
@@ -462,6 +458,7 @@ nfs4_alloc_state_owner(void)
        spin_lock_init(&sp->so_sequence.lock);
        INIT_LIST_HEAD(&sp->so_sequence.list);
        atomic_set(&sp->so_count, 1);
+       INIT_LIST_HEAD(&sp->so_lru);
        return sp;
 }
 
@@ -479,6 +476,38 @@ nfs4_drop_state_owner(struct nfs4_state_owner *sp)
        }
 }
 
+static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
+{
+       rpc_destroy_wait_queue(&sp->so_sequence.wait);
+       put_rpccred(sp->so_cred);
+       kfree(sp);
+}
+
+static void nfs4_gc_state_owners(struct nfs_server *server)
+{
+       struct nfs_client *clp = server->nfs_client;
+       struct nfs4_state_owner *sp, *tmp;
+       unsigned long time_min, time_max;
+       LIST_HEAD(doomed);
+
+       spin_lock(&clp->cl_lock);
+       time_max = jiffies;
+       time_min = (long)time_max - (long)clp->cl_lease_time;
+       list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
+               /* NB: LRU is sorted so that oldest is at the head */
+               if (time_in_range(sp->so_expires, time_min, time_max))
+                       break;
+               list_move(&sp->so_lru, &doomed);
+               nfs4_remove_state_owner_locked(sp);
+       }
+       spin_unlock(&clp->cl_lock);
+
+       list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
+               list_del(&sp->so_lru);
+               nfs4_free_state_owner(sp);
+       }
+}
+
 /**
  * nfs4_get_state_owner - Look up a state owner given a credential
  * @server: nfs_server to search
@@ -496,10 +525,10 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
        sp = nfs4_find_state_owner_locked(server, cred);
        spin_unlock(&clp->cl_lock);
        if (sp != NULL)
-               return sp;
+               goto out;
        new = nfs4_alloc_state_owner();
        if (new == NULL)
-               return NULL;
+               goto out;
        new->so_server = server;
        new->so_cred = cred;
        spin_lock(&clp->cl_lock);
@@ -511,26 +540,58 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
                rpc_destroy_wait_queue(&new->so_sequence.wait);
                kfree(new);
        }
+out:
+       nfs4_gc_state_owners(server);
        return sp;
 }
 
 /**
  * nfs4_put_state_owner - Release a nfs4_state_owner
  * @sp: state owner data to release
- *
  */
 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
 {
-       struct nfs_client *clp = sp->so_server->nfs_client;
-       struct rpc_cred *cred = sp->so_cred;
+       struct nfs_server *server = sp->so_server;
+       struct nfs_client *clp = server->nfs_client;
 
        if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
                return;
-       nfs4_remove_state_owner_locked(sp);
+
+       if (!RB_EMPTY_NODE(&sp->so_server_node)) {
+               sp->so_expires = jiffies;
+               list_add_tail(&sp->so_lru, &server->state_owners_lru);
+               spin_unlock(&clp->cl_lock);
+       } else {
+               nfs4_remove_state_owner_locked(sp);
+               spin_unlock(&clp->cl_lock);
+               nfs4_free_state_owner(sp);
+       }
+}
+
+/**
+ * nfs4_purge_state_owners - Release all cached state owners
+ * @server: nfs_server with cached state owners to release
+ *
+ * Called at umount time.  Remaining state owners will be on
+ * the LRU with ref count of zero.
+ */
+void nfs4_purge_state_owners(struct nfs_server *server)
+{
+       struct nfs_client *clp = server->nfs_client;
+       struct nfs4_state_owner *sp, *tmp;
+       LIST_HEAD(doomed);
+
+       spin_lock(&clp->cl_lock);
+       list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
+               list_move(&sp->so_lru, &doomed);
+               nfs4_remove_state_owner_locked(sp);
+       }
        spin_unlock(&clp->cl_lock);
-       rpc_destroy_wait_queue(&sp->so_sequence.wait);
-       put_rpccred(cred);
-       kfree(sp);
+
+       list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
+               list_del(&sp->so_lru);
+               nfs4_free_state_owner(sp);
+       }
 }
 
 static struct nfs4_state *
@@ -1402,6 +1463,7 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
 restart:
        rcu_read_lock();
        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+               nfs4_purge_state_owners(server);
                spin_lock(&clp->cl_lock);
                for (pos = rb_first(&server->state_owners);
                     pos != NULL;
index e6161b213ed122d04dfe3e5fe7dfdc25e80a2c2e..95e92e438407c8ea39d2a6a717faeb919dd14819 100644 (file)
@@ -2298,7 +2298,7 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
        encode_getfh(xdr, &hdr);
        encode_getfattr(xdr, args->bitmask, &hdr);
        encode_restorefh(xdr, &hdr);
-       encode_getfattr(xdr, args->bitmask, &hdr);
+       encode_getfattr(xdr, args->dir_bitmask, &hdr);
        encode_nops(&hdr);
 }
 
@@ -2517,11 +2517,13 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
        encode_compound_hdr(xdr, req, &hdr);
        encode_sequence(xdr, &args->seq_args, &hdr);
        encode_putfh(xdr, args->fh, &hdr);
-       replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
+       replen = hdr.replen + op_decode_hdr_maxsz + 1;
        encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
 
        xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
                args->acl_pages, args->acl_pgbase, args->acl_len);
+       xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE);
+
        encode_nops(&hdr);
 }
 
@@ -3790,7 +3792,8 @@ out_overflow:
 }
 
 static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap,
-               const struct nfs_server *server, uint32_t *uid, int may_sleep)
+               const struct nfs_server *server, uint32_t *uid,
+               struct nfs4_string *owner_name)
 {
        uint32_t len;
        __be32 *p;
@@ -3807,8 +3810,12 @@ static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap,
                p = xdr_inline_decode(xdr, len);
                if (unlikely(!p))
                        goto out_overflow;
-               if (!may_sleep) {
-                       /* do nothing */
+               if (owner_name != NULL) {
+                       owner_name->data = kmemdup(p, len, GFP_NOWAIT);
+                       if (owner_name->data != NULL) {
+                               owner_name->len = len;
+                               ret = NFS_ATTR_FATTR_OWNER_NAME;
+                       }
                } else if (len < XDR_MAX_NETOBJ) {
                        if (nfs_map_name_to_uid(server, (char *)p, len, uid) == 0)
                                ret = NFS_ATTR_FATTR_OWNER;
@@ -3828,7 +3835,8 @@ out_overflow:
 }
 
 static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
-               const struct nfs_server *server, uint32_t *gid, int may_sleep)
+               const struct nfs_server *server, uint32_t *gid,
+               struct nfs4_string *group_name)
 {
        uint32_t len;
        __be32 *p;
@@ -3845,8 +3853,12 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
                p = xdr_inline_decode(xdr, len);
                if (unlikely(!p))
                        goto out_overflow;
-               if (!may_sleep) {
-                       /* do nothing */
+               if (group_name != NULL) {
+                       group_name->data = kmemdup(p, len, GFP_NOWAIT);
+                       if (group_name->data != NULL) {
+                               group_name->len = len;
+                               ret = NFS_ATTR_FATTR_GROUP_NAME;
+                       }
                } else if (len < XDR_MAX_NETOBJ) {
                        if (nfs_map_group_to_gid(server, (char *)p, len, gid) == 0)
                                ret = NFS_ATTR_FATTR_GROUP;
@@ -4283,7 +4295,7 @@ xdr_error:
 
 static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
                struct nfs_fattr *fattr, struct nfs_fh *fh,
-               const struct nfs_server *server, int may_sleep)
+               const struct nfs_server *server)
 {
        int status;
        umode_t fmode = 0;
@@ -4350,12 +4362,12 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
                goto xdr_error;
        fattr->valid |= status;
 
-       status = decode_attr_owner(xdr, bitmap, server, &fattr->uid, may_sleep);
+       status = decode_attr_owner(xdr, bitmap, server, &fattr->uid, fattr->owner_name);
        if (status < 0)
                goto xdr_error;
        fattr->valid |= status;
 
-       status = decode_attr_group(xdr, bitmap, server, &fattr->gid, may_sleep);
+       status = decode_attr_group(xdr, bitmap, server, &fattr->gid, fattr->group_name);
        if (status < 0)
                goto xdr_error;
        fattr->valid |= status;
@@ -4396,7 +4408,7 @@ xdr_error:
 }
 
 static int decode_getfattr_generic(struct xdr_stream *xdr, struct nfs_fattr *fattr,
-               struct nfs_fh *fh, const struct nfs_server *server, int may_sleep)
+               struct nfs_fh *fh, const struct nfs_server *server)
 {
        __be32 *savep;
        uint32_t attrlen,
@@ -4415,7 +4427,7 @@ static int decode_getfattr_generic(struct xdr_stream *xdr, struct nfs_fattr *fat
        if (status < 0)
                goto xdr_error;
 
-       status = decode_getfattr_attrs(xdr, bitmap, fattr, fh, server, may_sleep);
+       status = decode_getfattr_attrs(xdr, bitmap, fattr, fh, server);
        if (status < 0)
                goto xdr_error;
 
@@ -4426,9 +4438,9 @@ xdr_error:
 }
 
 static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr,
-               const struct nfs_server *server, int may_sleep)
+               const struct nfs_server *server)
 {
-       return decode_getfattr_generic(xdr, fattr, NULL, server, may_sleep);
+       return decode_getfattr_generic(xdr, fattr, NULL, server);
 }
 
 /*
@@ -4957,17 +4969,18 @@ decode_restorefh(struct xdr_stream *xdr)
 }
 
 static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
-               size_t *acl_len)
+                        struct nfs_getaclres *res)
 {
-       __be32 *savep;
+       __be32 *savep, *bm_p;
        uint32_t attrlen,
                 bitmap[3] = {0};
        struct kvec *iov = req->rq_rcv_buf.head;
        int status;
 
-       *acl_len = 0;
+       res->acl_len = 0;
        if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
                goto out;
+       bm_p = xdr->p;
        if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
                goto out;
        if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
@@ -4979,18 +4992,30 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
                size_t hdrlen;
                u32 recvd;
 
+               /* The bitmap (xdr len + bitmaps) and the attr xdr len words
+                * are stored with the acl data to handle the problem of
+                * variable length bitmaps.*/
+               xdr->p = bm_p;
+               res->acl_data_offset = be32_to_cpup(bm_p) + 2;
+               res->acl_data_offset <<= 2;
+
                /* We ignore &savep and don't do consistency checks on
                 * the attr length.  Let userspace figure it out.... */
                hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
+               attrlen += res->acl_data_offset;
                recvd = req->rq_rcv_buf.len - hdrlen;
                if (attrlen > recvd) {
-                       dprintk("NFS: server cheating in getattr"
-                                       " acl reply: attrlen %u > recvd %u\n",
+                       if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
+                               /* getxattr interface called with a NULL buf */
+                               res->acl_len = attrlen;
+                               goto out;
+                       }
+                       dprintk("NFS: acl reply: attrlen %u > recvd %u\n",
                                        attrlen, recvd);
                        return -EINVAL;
                }
                xdr_read_pages(xdr, attrlen);
-               *acl_len = attrlen;
+               res->acl_len = attrlen;
        } else
                status = -EOPNOTSUPP;
 
@@ -5696,8 +5721,7 @@ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp,
        status = decode_open_downgrade(xdr, res);
        if (status != 0)
                goto out;
-       decode_getfattr(xdr, res->fattr, res->server,
-                       !RPC_IS_ASYNC(rqstp->rq_task));
+       decode_getfattr(xdr, res->fattr, res->server);
 out:
        return status;
 }
@@ -5723,8 +5747,7 @@ static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        status = decode_access(xdr, res);
        if (status != 0)
                goto out;
-       decode_getfattr(xdr, res->fattr, res->server,
-                       !RPC_IS_ASYNC(rqstp->rq_task));
+       decode_getfattr(xdr, res->fattr, res->server);
 out:
        return status;
 }
@@ -5753,8 +5776,7 @@ static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        status = decode_getfh(xdr, res->fh);
        if (status)
                goto out;
-       status = decode_getfattr(xdr, res->fattr, res->server
-                       ,!RPC_IS_ASYNC(rqstp->rq_task));
+       status = decode_getfattr(xdr, res->fattr, res->server);
 out:
        return status;
 }
@@ -5780,8 +5802,7 @@ static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp,
                goto out;
        status = decode_getfh(xdr, res->fh);
        if (status == 0)
-               status = decode_getfattr(xdr, res->fattr, res->server,
-                               !RPC_IS_ASYNC(rqstp->rq_task));
+               status = decode_getfattr(xdr, res->fattr, res->server);
 out:
        return status;
 }
@@ -5807,8 +5828,7 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        status = decode_remove(xdr, &res->cinfo);
        if (status)
                goto out;
-       decode_getfattr(xdr, res->dir_attr, res->server,
-                       !RPC_IS_ASYNC(rqstp->rq_task));
+       decode_getfattr(xdr, res->dir_attr, res->server);
 out:
        return status;
 }
@@ -5841,14 +5861,12 @@ static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        if (status)
                goto out;
        /* Current FH is target directory */
-       if (decode_getfattr(xdr, res->new_fattr, res->server,
-                               !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
+       if (decode_getfattr(xdr, res->new_fattr, res->server))
                goto out;
        status = decode_restorefh(xdr);
        if (status)
                goto out;
-       decode_getfattr(xdr, res->old_fattr, res->server,
-                       !RPC_IS_ASYNC(rqstp->rq_task));
+       decode_getfattr(xdr, res->old_fattr, res->server);
 out:
        return status;
 }
@@ -5884,14 +5902,12 @@ static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
         * Note order: OP_LINK leaves the directory as the current
         *             filehandle.
         */
-       if (decode_getfattr(xdr, res->dir_attr, res->server,
-                               !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
+       if (decode_getfattr(xdr, res->dir_attr, res->server))
                goto out;
        status = decode_restorefh(xdr);
        if (status)
                goto out;
-       decode_getfattr(xdr, res->fattr, res->server,
-                       !RPC_IS_ASYNC(rqstp->rq_task));
+       decode_getfattr(xdr, res->fattr, res->server);
 out:
        return status;
 }
@@ -5923,14 +5939,12 @@ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        status = decode_getfh(xdr, res->fh);
        if (status)
                goto out;
-       if (decode_getfattr(xdr, res->fattr, res->server,
-                               !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
+       if (decode_getfattr(xdr, res->fattr, res->server))
                goto out;
        status = decode_restorefh(xdr);
        if (status)
                goto out;
-       decode_getfattr(xdr, res->dir_fattr, res->server,
-                       !RPC_IS_ASYNC(rqstp->rq_task));
+       decode_getfattr(xdr, res->dir_fattr, res->server);
 out:
        return status;
 }
@@ -5962,8 +5976,7 @@ static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        status = decode_putfh(xdr);
        if (status)
                goto out;
-       status = decode_getfattr(xdr, res->fattr, res->server,
-                       !RPC_IS_ASYNC(rqstp->rq_task));
+       status = decode_getfattr(xdr, res->fattr, res->server);
 out:
        return status;
 }
@@ -6028,7 +6041,7 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        status = decode_putfh(xdr);
        if (status)
                goto out;
-       status = decode_getacl(xdr, rqstp, &res->acl_len);
+       status = decode_getacl(xdr, rqstp, res);
 
 out:
        return status;
@@ -6061,8 +6074,7 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
         *      an ESTALE error. Shouldn't be a problem,
         *      though, since fattr->valid will remain unset.
         */
-       decode_getfattr(xdr, res->fattr, res->server,
-                       !RPC_IS_ASYNC(rqstp->rq_task));
+       decode_getfattr(xdr, res->fattr, res->server);
 out:
        return status;
 }
@@ -6093,13 +6105,11 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
                goto out;
        if (decode_getfh(xdr, &res->fh) != 0)
                goto out;
-       if (decode_getfattr(xdr, res->f_attr, res->server,
-                               !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
+       if (decode_getfattr(xdr, res->f_attr, res->server) != 0)
                goto out;
        if (decode_restorefh(xdr) != 0)
                goto out;
-       decode_getfattr(xdr, res->dir_attr, res->server,
-                       !RPC_IS_ASYNC(rqstp->rq_task));
+       decode_getfattr(xdr, res->dir_attr, res->server);
 out:
        return status;
 }
@@ -6147,8 +6157,7 @@ static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp,
        status = decode_open(xdr, res);
        if (status)
                goto out;
-       decode_getfattr(xdr, res->f_attr, res->server,
-                       !RPC_IS_ASYNC(rqstp->rq_task));
+       decode_getfattr(xdr, res->f_attr, res->server);
 out:
        return status;
 }
@@ -6175,8 +6184,7 @@ static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp,
        status = decode_setattr(xdr);
        if (status)
                goto out;
-       decode_getfattr(xdr, res->fattr, res->server,
-                       !RPC_IS_ASYNC(rqstp->rq_task));
+       decode_getfattr(xdr, res->fattr, res->server);
 out:
        return status;
 }
@@ -6356,8 +6364,7 @@ static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        if (status)
                goto out;
        if (res->fattr)
-               decode_getfattr(xdr, res->fattr, res->server,
-                               !RPC_IS_ASYNC(rqstp->rq_task));
+               decode_getfattr(xdr, res->fattr, res->server);
        if (!status)
                status = res->count;
 out:
@@ -6386,8 +6393,7 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        if (status)
                goto out;
        if (res->fattr)
-               decode_getfattr(xdr, res->fattr, res->server,
-                               !RPC_IS_ASYNC(rqstp->rq_task));
+               decode_getfattr(xdr, res->fattr, res->server);
 out:
        return status;
 }
@@ -6546,8 +6552,7 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
        status = decode_delegreturn(xdr);
        if (status != 0)
                goto out;
-       decode_getfattr(xdr, res->fattr, res->server,
-                       !RPC_IS_ASYNC(rqstp->rq_task));
+       decode_getfattr(xdr, res->fattr, res->server);
 out:
        return status;
 }
@@ -6576,8 +6581,7 @@ static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
                goto out;
        xdr_enter_page(xdr, PAGE_SIZE);
        status = decode_getfattr(xdr, &res->fs_locations->fattr,
-                                res->fs_locations->server,
-                                !RPC_IS_ASYNC(req->rq_task));
+                                res->fs_locations->server);
 out:
        return status;
 }
@@ -6826,8 +6830,7 @@ static int nfs4_xdr_dec_layoutcommit(struct rpc_rqst *rqstp,
        status = decode_layoutcommit(xdr, rqstp, res);
        if (status)
                goto out;
-       decode_getfattr(xdr, res->fattr, res->server,
-                       !RPC_IS_ASYNC(rqstp->rq_task));
+       decode_getfattr(xdr, res->fattr, res->server);
 out:
        return status;
 }
@@ -6958,7 +6961,7 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
                goto out_overflow;
 
        if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh,
-                                       entry->server, 1) < 0)
+                                       entry->server) < 0)
                goto out_overflow;
        if (entry->fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
                entry->ino = entry->fattr->mounted_on_fileid;
index c807ab93140e519135f76ab88318a6cb68212978..55d01280a6098264cc5e6d7133c72347e392d109 100644 (file)
@@ -551,7 +551,8 @@ static const struct nfs_pageio_ops objio_pg_write_ops = {
 static struct pnfs_layoutdriver_type objlayout_type = {
        .id = LAYOUT_OSD2_OBJECTS,
        .name = "LAYOUT_OSD2_OBJECTS",
-       .flags                   = PNFS_LAYOUTRET_ON_SETATTR,
+       .flags                   = PNFS_LAYOUTRET_ON_SETATTR |
+                                  PNFS_LAYOUTRET_ON_ERROR,
 
        .alloc_layout_hdr        = objlayout_alloc_layout_hdr,
        .free_layout_hdr         = objlayout_free_layout_hdr,
index 72074e3a04f922705151e38a586e7af5f2611f4a..b3c29039f5b893e69058cd404547d218cfa8dff8 100644 (file)
@@ -254,6 +254,8 @@ objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
        oir->status = rdata->task.tk_status = status;
        if (status >= 0)
                rdata->res.count = status;
+       else
+               rdata->pnfs_error = status;
        objlayout_iodone(oir);
        /* must not use oir after this point */
 
@@ -334,6 +336,8 @@ objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
        if (status >= 0) {
                wdata->res.count = status;
                wdata->verf.committed = oir->committed;
+       } else {
+               wdata->pnfs_error = status;
        }
        objlayout_iodone(oir);
        /* must not use oir after this point */
index 8e672a2b2d693193e8ca7252d70c73578d739e0f..17149a4900653af5326dfd6e0490e6e439b1426f 100644 (file)
@@ -1166,6 +1166,33 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
 
+static int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head)
+{
+       struct nfs_pageio_descriptor pgio;
+       LIST_HEAD(failed);
+
+       /* Resend all requests through the MDS */
+       nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE);
+       while (!list_empty(head)) {
+               struct nfs_page *req = nfs_list_entry(head->next);
+
+               nfs_list_remove_request(req);
+               if (!nfs_pageio_add_request(&pgio, req))
+                       nfs_list_add_request(req, &failed);
+       }
+       nfs_pageio_complete(&pgio);
+
+       if (!list_empty(&failed)) {
+               /* For some reason our attempt to resend pages. Mark the
+                * overall send request as having failed, and let
+                * nfs_writeback_release_full deal with the error.
+                */
+               list_move(&failed, head);
+               return -EIO;
+       }
+       return 0;
+}
+
 /*
  * Called by non rpc-based layout drivers
  */
@@ -1175,9 +1202,17 @@ void pnfs_ld_write_done(struct nfs_write_data *data)
                pnfs_set_layoutcommit(data);
                data->mds_ops->rpc_call_done(&data->task, data);
        } else {
-               put_lseg(data->lseg);
-               data->lseg = NULL;
                dprintk("pnfs write error = %d\n", data->pnfs_error);
+               if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
+                                               PNFS_LAYOUTRET_ON_ERROR) {
+                       /* Don't lo_commit on error, Server will needs to
+                        * preform a file recovery.
+                        */
+                       clear_bit(NFS_INO_LAYOUTCOMMIT,
+                                 &NFS_I(data->inode)->flags);
+                       pnfs_return_layout(data->inode);
+               }
+               data->task.tk_status = pnfs_write_done_resend_to_mds(data->inode, &data->pages);
        }
        data->mds_ops->rpc_release(data);
 }
@@ -1267,6 +1302,9 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
        put_lseg(data->lseg);
        data->lseg = NULL;
        dprintk("pnfs write error = %d\n", data->pnfs_error);
+       if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
+                                               PNFS_LAYOUTRET_ON_ERROR)
+               pnfs_return_layout(data->inode);
 
        nfs_pageio_init_read_mds(&pgio, data->inode);
 
index 1509530cb111fadfdd7b47bb48bbb9f88c4b1176..53d593a0a4f265a69c9f4fbc5d2ccb759291686a 100644 (file)
@@ -68,6 +68,7 @@ enum {
 enum layoutdriver_policy_flags {
        /* Should the pNFS client commit and return the layout upon a setattr */
        PNFS_LAYOUTRET_ON_SETATTR       = 1 << 0,
+       PNFS_LAYOUTRET_ON_ERROR         = 1 << 1,
 };
 
 struct nfs4_deviceid_node;
index e463967aafb8cf1d48be42e8c7b191cba607fbeb..3dfa4f112c0ab8be8d5b3f897173a68502406b6c 100644 (file)
@@ -908,10 +908,24 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
                data->auth_flavor_len   = 1;
                data->version           = version;
                data->minorversion      = 0;
+               security_init_mnt_opts(&data->lsm_opts);
        }
        return data;
 }
 
+static void nfs_free_parsed_mount_data(struct nfs_parsed_mount_data *data)
+{
+       if (data) {
+               kfree(data->client_address);
+               kfree(data->mount_server.hostname);
+               kfree(data->nfs_server.export_path);
+               kfree(data->nfs_server.hostname);
+               kfree(data->fscache_uniq);
+               security_free_mnt_opts(&data->lsm_opts);
+               kfree(data);
+       }
+}
+
 /*
  * Sanity-check a server address provided by the mount command.
  *
@@ -2219,9 +2233,7 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
        data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
        mntfh = nfs_alloc_fhandle();
        if (data == NULL || mntfh == NULL)
-               goto out_free_fh;
-
-       security_init_mnt_opts(&data->lsm_opts);
+               goto out;
 
        /* Validate the mount data */
        error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
@@ -2233,8 +2245,6 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
 #ifdef CONFIG_NFS_V4
        if (data->version == 4) {
                mntroot = nfs4_try_mount(flags, dev_name, data);
-               kfree(data->client_address);
-               kfree(data->nfs_server.export_path);
                goto out;
        }
 #endif /* CONFIG_NFS_V4 */
@@ -2289,13 +2299,8 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
        s->s_flags |= MS_ACTIVE;
 
 out:
-       kfree(data->nfs_server.hostname);
-       kfree(data->mount_server.hostname);
-       kfree(data->fscache_uniq);
-       security_free_mnt_opts(&data->lsm_opts);
-out_free_fh:
+       nfs_free_parsed_mount_data(data);
        nfs_free_fhandle(mntfh);
-       kfree(data);
        return mntroot;
 
 out_err_nosb:
@@ -2622,9 +2627,7 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
 
        mntfh = nfs_alloc_fhandle();
        if (data == NULL || mntfh == NULL)
-               goto out_free_fh;
-
-       security_init_mnt_opts(&data->lsm_opts);
+               goto out;
 
        /* Get a volume representation */
        server = nfs4_create_server(data, mntfh);
@@ -2676,13 +2679,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
 
        s->s_flags |= MS_ACTIVE;
 
-       security_free_mnt_opts(&data->lsm_opts);
        nfs_free_fhandle(mntfh);
        return mntroot;
 
 out:
-       security_free_mnt_opts(&data->lsm_opts);
-out_free_fh:
        nfs_free_fhandle(mntfh);
        return ERR_PTR(error);
 
@@ -2839,7 +2839,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
 
        data = nfs_alloc_parsed_mount_data(4);
        if (data == NULL)
-               goto out_free_data;
+               goto out;
 
        /* Validate the mount data */
        error = nfs4_validate_mount_data(raw_data, data, dev_name);
@@ -2853,12 +2853,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
                error = PTR_ERR(res);
 
 out:
-       kfree(data->client_address);
-       kfree(data->nfs_server.export_path);
-       kfree(data->nfs_server.hostname);
-       kfree(data->fscache_uniq);
-out_free_data:
-       kfree(data);
+       nfs_free_parsed_mount_data(data);
        dprintk("<-- nfs4_mount() = %d%s\n", error,
                        error != 0 ? " [error]" : "");
        return res;
index 1dda78db6a73256e8aeee11254e968f0547e4075..0c3885255f97cf985b8b8091dc5b2a2f0eebc59f 100644 (file)
@@ -1052,7 +1052,7 @@ static const struct nfs_pageio_ops nfs_pageio_write_ops = {
        .pg_doio = nfs_generic_pg_writepages,
 };
 
-static void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
+void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
                                  struct inode *inode, int ioflags)
 {
        nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops,
@@ -1166,13 +1166,7 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
 static void nfs_writeback_release_full(void *calldata)
 {
        struct nfs_write_data   *data = calldata;
-       int ret, status = data->task.tk_status;
-       struct nfs_pageio_descriptor pgio;
-
-       if (data->pnfs_error) {
-               nfs_pageio_init_write_mds(&pgio, data->inode, FLUSH_STABLE);
-               pgio.pg_recoalesce = 1;
-       }
+       int status = data->task.tk_status;
 
        /* Update attributes as result of writeback. */
        while (!list_empty(&data->pages)) {
@@ -1188,11 +1182,6 @@ static void nfs_writeback_release_full(void *calldata)
                        req->wb_bytes,
                        (long long)req_offset(req));
 
-               if (data->pnfs_error) {
-                       dprintk(", pnfs error = %d\n", data->pnfs_error);
-                       goto next;
-               }
-
                if (status < 0) {
                        nfs_set_pageerror(page);
                        nfs_context_set_write_error(req->wb_context, status);
@@ -1212,19 +1201,7 @@ remove_request:
        next:
                nfs_clear_page_tag_locked(req);
                nfs_end_page_writeback(page);
-               if (data->pnfs_error) {
-                       lock_page(page);
-                       nfs_pageio_cond_complete(&pgio, page->index);
-                       ret = nfs_page_async_flush(&pgio, page, 0);
-                       if (ret) {
-                               nfs_set_pageerror(page);
-                               dprintk("rewrite to MDS error = %d\n", ret);
-                       }
-                       unlock_page(page);
-               }
        }
-       if (data->pnfs_error)
-               nfs_pageio_complete(&pgio);
        nfs_writedata_release(calldata);
 }
 
index 7748d6a18d9746b480745ba0313002a7d72b10ad..6f3ebb48b12fad4532884df525e8942bd3564c90 100644 (file)
@@ -718,7 +718,7 @@ int set_callback_cred(void)
 {
        if (callback_cred)
                return 0;
-       callback_cred = rpc_lookup_machine_cred();
+       callback_cred = rpc_lookup_machine_cred("nfs");
        if (!callback_cred)
                return -ENOMEM;
        return 0;
index a5ebe421195fea3df8d66f2dc3573534a3bc298a..286edf1e231f3598b0e33c77297bc8cf0d91ebe2 100644 (file)
@@ -827,8 +827,8 @@ static int user_cluster_connect(struct ocfs2_cluster_connection *conn)
                goto out;
        }
 
-       rc = dlm_new_lockspace(conn->cc_name, strlen(conn->cc_name),
-                              &fsdlm, DLM_LSFL_FS, DLM_LVB_LEN);
+       rc = dlm_new_lockspace(conn->cc_name, NULL, DLM_LSFL_FS, DLM_LVB_LEN,
+                              NULL, NULL, NULL, &fsdlm);
        if (rc) {
                ocfs2_live_connection_drop(control);
                goto out;
index a1dddda999f208b2083582bcce9449f5a95e7a20..8173dfd89cb262ac506e64e475fa5c0cd9742c8e 100644 (file)
 #include <linux/pid_namespace.h>
 #include <linux/fs_struct.h>
 #include <linux/slab.h>
+#include <linux/flex_array.h>
 #ifdef CONFIG_HARDWALL
 #include <asm/hardwall.h>
 #endif
+#include <trace/events/oom.h>
 #include "internal.h"
 
 /* NOTE:
@@ -133,6 +135,8 @@ struct pid_entry {
                NULL, &proc_single_file_operations,     \
                { .proc_show = show } )
 
+static int proc_fd_permission(struct inode *inode, int mask);
+
 /*
  * Count the number of hardlinks for the pid_entry table, excluding the .
  * and .. links.
@@ -165,9 +169,9 @@ static int get_task_root(struct task_struct *task, struct path *root)
        return result;
 }
 
-static int proc_cwd_link(struct inode *inode, struct path *path)
+static int proc_cwd_link(struct dentry *dentry, struct path *path)
 {
-       struct task_struct *task = get_proc_task(inode);
+       struct task_struct *task = get_proc_task(dentry->d_inode);
        int result = -ENOENT;
 
        if (task) {
@@ -182,9 +186,9 @@ static int proc_cwd_link(struct inode *inode, struct path *path)
        return result;
 }
 
-static int proc_root_link(struct inode *inode, struct path *path)
+static int proc_root_link(struct dentry *dentry, struct path *path)
 {
-       struct task_struct *task = get_proc_task(inode);
+       struct task_struct *task = get_proc_task(dentry->d_inode);
        int result = -ENOENT;
 
        if (task) {
@@ -627,6 +631,50 @@ int proc_setattr(struct dentry *dentry, struct iattr *attr)
        return 0;
 }
 
+/*
+ * May current process learn task's sched/cmdline info (for hide_pid_min=1)
+ * or euid/egid (for hide_pid_min=2)?
+ */
+static bool has_pid_permissions(struct pid_namespace *pid,
+                                struct task_struct *task,
+                                int hide_pid_min)
+{
+       if (pid->hide_pid < hide_pid_min)
+               return true;
+       if (in_group_p(pid->pid_gid))
+               return true;
+       return ptrace_may_access(task, PTRACE_MODE_READ);
+}
+
+
+static int proc_pid_permission(struct inode *inode, int mask)
+{
+       struct pid_namespace *pid = inode->i_sb->s_fs_info;
+       struct task_struct *task;
+       bool has_perms;
+
+       task = get_proc_task(inode);
+       has_perms = has_pid_permissions(pid, task, 1);
+       put_task_struct(task);
+
+       if (!has_perms) {
+               if (pid->hide_pid == 2) {
+                       /*
+                        * Let's make getdents(), stat(), and open()
+                        * consistent with each other.  If a process
+                        * may not stat() a file, it shouldn't be seen
+                        * in procfs at all.
+                        */
+                       return -ENOENT;
+               }
+
+               return -EPERM;
+       }
+       return generic_permission(inode, mask);
+}
+
+
+
 static const struct inode_operations proc_def_inode_operations = {
        .setattr        = proc_setattr,
 };
@@ -1010,6 +1058,7 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
        else
                task->signal->oom_score_adj = (oom_adjust * OOM_SCORE_ADJ_MAX) /
                                                                -OOM_DISABLE;
+       trace_oom_score_adj_update(task);
 err_sighand:
        unlock_task_sighand(task, &flags);
 err_task_lock:
@@ -1097,6 +1146,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
        task->signal->oom_score_adj = oom_score_adj;
        if (has_capability_noaudit(current, CAP_SYS_RESOURCE))
                task->signal->oom_score_adj_min = oom_score_adj;
+       trace_oom_score_adj_update(task);
        /*
         * Scale /proc/pid/oom_adj appropriately ensuring that OOM_DISABLE is
         * always attainable.
@@ -1453,13 +1503,13 @@ static const struct file_operations proc_pid_set_comm_operations = {
        .release        = single_release,
 };
 
-static int proc_exe_link(struct inode *inode, struct path *exe_path)
+static int proc_exe_link(struct dentry *dentry, struct path *exe_path)
 {
        struct task_struct *task;
        struct mm_struct *mm;
        struct file *exe_file;
 
-       task = get_proc_task(inode);
+       task = get_proc_task(dentry->d_inode);
        if (!task)
                return -ENOENT;
        mm = get_task_mm(task);
@@ -1489,7 +1539,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
        if (!proc_fd_access_allowed(inode))
                goto out;
 
-       error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
+       error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
 out:
        return ERR_PTR(error);
 }
@@ -1528,7 +1578,7 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
        if (!proc_fd_access_allowed(inode))
                goto out;
 
-       error = PROC_I(inode)->op.proc_get_link(inode, &path);
+       error = PROC_I(inode)->op.proc_get_link(dentry, &path);
        if (error)
                goto out;
 
@@ -1609,6 +1659,7 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
        struct inode *inode = dentry->d_inode;
        struct task_struct *task;
        const struct cred *cred;
+       struct pid_namespace *pid = dentry->d_sb->s_fs_info;
 
        generic_fillattr(inode, stat);
 
@@ -1617,6 +1668,14 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
        stat->gid = 0;
        task = pid_task(proc_pid(inode), PIDTYPE_PID);
        if (task) {
+               if (!has_pid_permissions(pid, task, 2)) {
+                       rcu_read_unlock();
+                       /*
+                        * This doesn't prevent learning whether PID exists,
+                        * it only makes getattr() consistent with readdir().
+                        */
+                       return -ENOENT;
+               }
                if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
                    task_dumpable(task)) {
                        cred = __task_cred(task);
@@ -1820,9 +1879,9 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
        return -ENOENT;
 }
 
-static int proc_fd_link(struct inode *inode, struct path *path)
+static int proc_fd_link(struct dentry *dentry, struct path *path)
 {
-       return proc_fd_info(inode, path, NULL);
+       return proc_fd_info(dentry->d_inode, path, NULL);
 }
 
 static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
@@ -2043,6 +2102,355 @@ static const struct file_operations proc_fd_operations = {
        .llseek         = default_llseek,
 };
 
+#ifdef CONFIG_CHECKPOINT_RESTORE
+
+/*
+ * dname_to_vma_addr - maps a dentry name into two unsigned longs
+ * which represent vma start and end addresses.
+ */
+static int dname_to_vma_addr(struct dentry *dentry,
+                            unsigned long *start, unsigned long *end)
+{
+       if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int map_files_d_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+       unsigned long vm_start, vm_end;
+       bool exact_vma_exists = false;
+       struct mm_struct *mm = NULL;
+       struct task_struct *task;
+       const struct cred *cred;
+       struct inode *inode;
+       int status = 0;
+
+       if (nd && nd->flags & LOOKUP_RCU)
+               return -ECHILD;
+
+       if (!capable(CAP_SYS_ADMIN)) {
+               status = -EACCES;
+               goto out_notask;
+       }
+
+       inode = dentry->d_inode;
+       task = get_proc_task(inode);
+       if (!task)
+               goto out_notask;
+
+       if (!ptrace_may_access(task, PTRACE_MODE_READ))
+               goto out;
+
+       mm = get_task_mm(task);
+       if (!mm)
+               goto out;
+
+       if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
+               down_read(&mm->mmap_sem);
+               exact_vma_exists = !!find_exact_vma(mm, vm_start, vm_end);
+               up_read(&mm->mmap_sem);
+       }
+
+       mmput(mm);
+
+       if (exact_vma_exists) {
+               if (task_dumpable(task)) {
+                       rcu_read_lock();
+                       cred = __task_cred(task);
+                       inode->i_uid = cred->euid;
+                       inode->i_gid = cred->egid;
+                       rcu_read_unlock();
+               } else {
+                       inode->i_uid = 0;
+                       inode->i_gid = 0;
+               }
+               security_task_to_inode(task, inode);
+               status = 1;
+       }
+
+out:
+       put_task_struct(task);
+
+out_notask:
+       if (status <= 0)
+               d_drop(dentry);
+
+       return status;
+}
+
+static const struct dentry_operations tid_map_files_dentry_operations = {
+       .d_revalidate   = map_files_d_revalidate,
+       .d_delete       = pid_delete_dentry,
+};
+
+static int proc_map_files_get_link(struct dentry *dentry, struct path *path)
+{
+       unsigned long vm_start, vm_end;
+       struct vm_area_struct *vma;
+       struct task_struct *task;
+       struct mm_struct *mm;
+       int rc;
+
+       rc = -ENOENT;
+       task = get_proc_task(dentry->d_inode);
+       if (!task)
+               goto out;
+
+       mm = get_task_mm(task);
+       put_task_struct(task);
+       if (!mm)
+               goto out;
+
+       rc = dname_to_vma_addr(dentry, &vm_start, &vm_end);
+       if (rc)
+               goto out_mmput;
+
+       down_read(&mm->mmap_sem);
+       vma = find_exact_vma(mm, vm_start, vm_end);
+       if (vma && vma->vm_file) {
+               *path = vma->vm_file->f_path;
+               path_get(path);
+               rc = 0;
+       }
+       up_read(&mm->mmap_sem);
+
+out_mmput:
+       mmput(mm);
+out:
+       return rc;
+}
+
+struct map_files_info {
+       struct file     *file;
+       unsigned long   len;
+       unsigned char   name[4*sizeof(long)+2]; /* max: %lx-%lx\0 */
+};
+
+static struct dentry *
+proc_map_files_instantiate(struct inode *dir, struct dentry *dentry,
+                          struct task_struct *task, const void *ptr)
+{
+       const struct file *file = ptr;
+       struct proc_inode *ei;
+       struct inode *inode;
+
+       if (!file)
+               return ERR_PTR(-ENOENT);
+
+       inode = proc_pid_make_inode(dir->i_sb, task);
+       if (!inode)
+               return ERR_PTR(-ENOENT);
+
+       ei = PROC_I(inode);
+       ei->op.proc_get_link = proc_map_files_get_link;
+
+       inode->i_op = &proc_pid_link_inode_operations;
+       inode->i_size = 64;
+       inode->i_mode = S_IFLNK;
+
+       if (file->f_mode & FMODE_READ)
+               inode->i_mode |= S_IRUSR;
+       if (file->f_mode & FMODE_WRITE)
+               inode->i_mode |= S_IWUSR;
+
+       d_set_d_op(dentry, &tid_map_files_dentry_operations);
+       d_add(dentry, inode);
+
+       return NULL;
+}
+
+static struct dentry *proc_map_files_lookup(struct inode *dir,
+               struct dentry *dentry, struct nameidata *nd)
+{
+       unsigned long vm_start, vm_end;
+       struct vm_area_struct *vma;
+       struct task_struct *task;
+       struct dentry *result;
+       struct mm_struct *mm;
+
+       result = ERR_PTR(-EACCES);
+       if (!capable(CAP_SYS_ADMIN))
+               goto out;
+
+       result = ERR_PTR(-ENOENT);
+       task = get_proc_task(dir);
+       if (!task)
+               goto out;
+
+       result = ERR_PTR(-EACCES);
+       if (lock_trace(task))
+               goto out_put_task;
+
+       result = ERR_PTR(-ENOENT);
+       if (dname_to_vma_addr(dentry, &vm_start, &vm_end))
+               goto out_unlock;
+
+       mm = get_task_mm(task);
+       if (!mm)
+               goto out_unlock;
+
+       down_read(&mm->mmap_sem);
+       vma = find_exact_vma(mm, vm_start, vm_end);
+       if (!vma)
+               goto out_no_vma;
+
+       result = proc_map_files_instantiate(dir, dentry, task, vma->vm_file);
+
+out_no_vma:
+       up_read(&mm->mmap_sem);
+       mmput(mm);
+out_unlock:
+       unlock_trace(task);
+out_put_task:
+       put_task_struct(task);
+out:
+       return result;
+}
+
+static const struct inode_operations proc_map_files_inode_operations = {
+       .lookup         = proc_map_files_lookup,
+       .permission     = proc_fd_permission,
+       .setattr        = proc_setattr,
+};
+
+static int
+proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+       struct dentry *dentry = filp->f_path.dentry;
+       struct inode *inode = dentry->d_inode;
+       struct vm_area_struct *vma;
+       struct task_struct *task;
+       struct mm_struct *mm;
+       ino_t ino;
+       int ret;
+
+       ret = -EACCES;
+       if (!capable(CAP_SYS_ADMIN))
+               goto out;
+
+       ret = -ENOENT;
+       task = get_proc_task(inode);
+       if (!task)
+               goto out;
+
+       ret = -EACCES;
+       if (lock_trace(task))
+               goto out_put_task;
+
+       ret = 0;
+       switch (filp->f_pos) {
+       case 0:
+               ino = inode->i_ino;
+               if (filldir(dirent, ".", 1, 0, ino, DT_DIR) < 0)
+                       goto out_unlock;
+               filp->f_pos++;
+       case 1:
+               ino = parent_ino(dentry);
+               if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
+                       goto out_unlock;
+               filp->f_pos++;
+       default:
+       {
+               unsigned long nr_files, pos, i;
+               struct flex_array *fa = NULL;
+               struct map_files_info info;
+               struct map_files_info *p;
+
+               mm = get_task_mm(task);
+               if (!mm)
+                       goto out_unlock;
+               down_read(&mm->mmap_sem);
+
+               nr_files = 0;
+
+               /*
+                * We need two passes here:
+                *
+                *  1) Collect vmas of mapped files with mmap_sem taken
+                *  2) Release mmap_sem and instantiate entries
+                *
+                * otherwise we get lockdep complained, since filldir()
+                * routine might require mmap_sem taken in might_fault().
+                */
+
+               for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) {
+                       if (vma->vm_file && ++pos > filp->f_pos)
+                               nr_files++;
+               }
+
+               if (nr_files) {
+                       fa = flex_array_alloc(sizeof(info), nr_files,
+                                               GFP_KERNEL);
+                       if (!fa || flex_array_prealloc(fa, 0, nr_files,
+                                                       GFP_KERNEL)) {
+                               ret = -ENOMEM;
+                               if (fa)
+                                       flex_array_free(fa);
+                               up_read(&mm->mmap_sem);
+                               mmput(mm);
+                               goto out_unlock;
+                       }
+                       for (i = 0, vma = mm->mmap, pos = 2; vma;
+                                       vma = vma->vm_next) {
+                               if (!vma->vm_file)
+                                       continue;
+                               if (++pos <= filp->f_pos)
+                                       continue;
+
+                               get_file(vma->vm_file);
+                               info.file = vma->vm_file;
+                               info.len = snprintf(info.name,
+                                               sizeof(info.name), "%lx-%lx",
+                                               vma->vm_start, vma->vm_end);
+                               if (flex_array_put(fa, i++, &info, GFP_KERNEL))
+                                       BUG();
+                       }
+               }
+               up_read(&mm->mmap_sem);
+
+               for (i = 0; i < nr_files; i++) {
+                       p = flex_array_get(fa, i);
+                       ret = proc_fill_cache(filp, dirent, filldir,
+                                             p->name, p->len,
+                                             proc_map_files_instantiate,
+                                             task, p->file);
+                       if (ret)
+                               break;
+                       filp->f_pos++;
+                       fput(p->file);
+               }
+               for (; i < nr_files; i++) {
+                       /*
+                        * In case of error don't forget
+                        * to put rest of file refs.
+                        */
+                       p = flex_array_get(fa, i);
+                       fput(p->file);
+               }
+               if (fa)
+                       flex_array_free(fa);
+               mmput(mm);
+       }
+       }
+
+out_unlock:
+       unlock_trace(task);
+out_put_task:
+       put_task_struct(task);
+out:
+       return ret;
+}
+
+static const struct file_operations proc_map_files_operations = {
+       .read           = generic_read_dir,
+       .readdir        = proc_map_files_readdir,
+       .llseek         = default_llseek,
+};
+
+#endif /* CONFIG_CHECKPOINT_RESTORE */
+
 /*
  * /proc/pid/fd needs a special permission handler so that a process can still
  * access /proc/self/fd after it has executed a setuid().
@@ -2658,6 +3066,9 @@ static const struct inode_operations proc_task_inode_operations;
 static const struct pid_entry tgid_base_stuff[] = {
        DIR("task",       S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
        DIR("fd",         S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
+#ifdef CONFIG_CHECKPOINT_RESTORE
+       DIR("map_files",  S_IRUSR|S_IXUSR, proc_map_files_inode_operations, proc_map_files_operations),
+#endif
        DIR("fdinfo",     S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
        DIR("ns",         S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
 #ifdef CONFIG_NET
@@ -2761,6 +3172,7 @@ static const struct inode_operations proc_tgid_base_inode_operations = {
        .lookup         = proc_tgid_base_lookup,
        .getattr        = pid_getattr,
        .setattr        = proc_setattr,
+       .permission     = proc_pid_permission,
 };
 
 static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
@@ -2964,6 +3376,12 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
                                proc_pid_instantiate, iter.task, NULL);
 }
 
+static int fake_filldir(void *buf, const char *name, int namelen,
+                       loff_t offset, u64 ino, unsigned d_type)
+{
+       return 0;
+}
+
 /* for the /proc/ directory itself, after non-process stuff has been done */
 int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
 {
@@ -2971,6 +3389,7 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
        struct task_struct *reaper;
        struct tgid_iter iter;
        struct pid_namespace *ns;
+       filldir_t __filldir;
 
        if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET)
                goto out_no_task;
@@ -2992,8 +3411,13 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
        for (iter = next_tgid(ns, iter);
             iter.task;
             iter.tgid += 1, iter = next_tgid(ns, iter)) {
+               if (has_pid_permissions(ns, iter.task, 2))
+                       __filldir = filldir;
+               else
+                       __filldir = fake_filldir;
+
                filp->f_pos = iter.tgid + TGID_OFFSET;
-               if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
+               if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
                        put_task_struct(iter.task);
                        goto out;
                }
@@ -3328,6 +3752,7 @@ static const struct inode_operations proc_task_inode_operations = {
        .lookup         = proc_task_lookup,
        .getattr        = proc_task_getattr,
        .setattr        = proc_setattr,
+       .permission     = proc_pid_permission,
 };
 
 static const struct file_operations proc_task_operations = {
index 51a176622b8fda3b3867381302acc3ac200fb4ae..84fd3235a5902b73eb59dbc9d18d42be4969b3a3 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/time.h>
 #include <linux/proc_fs.h>
 #include <linux/kernel.h>
+#include <linux/pid_namespace.h>
 #include <linux/mm.h>
 #include <linux/string.h>
 #include <linux/stat.h>
@@ -17,7 +18,9 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/sysctl.h>
+#include <linux/seq_file.h>
 #include <linux/slab.h>
+#include <linux/mount.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
@@ -101,12 +104,27 @@ void __init proc_init_inodecache(void)
                                             init_once);
 }
 
+static int proc_show_options(struct seq_file *seq, struct dentry *root)
+{
+       struct super_block *sb = root->d_sb;
+       struct pid_namespace *pid = sb->s_fs_info;
+
+       if (pid->pid_gid)
+               seq_printf(seq, ",gid=%lu", (unsigned long)pid->pid_gid);
+       if (pid->hide_pid != 0)
+               seq_printf(seq, ",hidepid=%u", pid->hide_pid);
+
+       return 0;
+}
+
 static const struct super_operations proc_sops = {
        .alloc_inode    = proc_alloc_inode,
        .destroy_inode  = proc_destroy_inode,
        .drop_inode     = generic_delete_inode,
        .evict_inode    = proc_evict_inode,
        .statfs         = simple_statfs,
+       .remount_fs     = proc_remount,
+       .show_options   = proc_show_options,
 };
 
 static void __pde_users_dec(struct proc_dir_entry *pde)
index 7838e5cfec145d4655d4af0e291a8d8bebad91cd..292577531ad13e665bfd94ff63dbfd5a206a5472 100644 (file)
@@ -117,6 +117,7 @@ void pde_put(struct proc_dir_entry *pde);
 
 int proc_fill_super(struct super_block *);
 struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
+int proc_remount(struct super_block *sb, int *flags, char *data);
 
 /*
  * These are generic /proc routines that use the internal
index 03102d978180eba68469ef01d13ad25465f5b796..46a15d8a29ca74d9ca0a68a57e416108e6b599f3 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/bitops.h>
 #include <linux/mount.h>
 #include <linux/pid_namespace.h>
+#include <linux/parser.h>
 
 #include "internal.h"
 
@@ -36,6 +37,63 @@ static int proc_set_super(struct super_block *sb, void *data)
        return err;
 }
 
+enum {
+       Opt_gid, Opt_hidepid, Opt_err,
+};
+
+static const match_table_t tokens = {
+       {Opt_hidepid, "hidepid=%u"},
+       {Opt_gid, "gid=%u"},
+       {Opt_err, NULL},
+};
+
+static int proc_parse_options(char *options, struct pid_namespace *pid)
+{
+       char *p;
+       substring_t args[MAX_OPT_ARGS];
+       int option;
+
+       if (!options)
+               return 1;
+
+       while ((p = strsep(&options, ",")) != NULL) {
+               int token;
+               if (!*p)
+                       continue;
+
+               args[0].to = args[0].from = 0;
+               token = match_token(p, tokens, args);
+               switch (token) {
+               case Opt_gid:
+                       if (match_int(&args[0], &option))
+                               return 0;
+                       pid->pid_gid = option;
+                       break;
+               case Opt_hidepid:
+                       if (match_int(&args[0], &option))
+                               return 0;
+                       if (option < 0 || option > 2) {
+                               pr_err("proc: hidepid value must be between 0 and 2.\n");
+                               return 0;
+                       }
+                       pid->hide_pid = option;
+                       break;
+               default:
+                       pr_err("proc: unrecognized mount option \"%s\" "
+                              "or missing value\n", p);
+                       return 0;
+               }
+       }
+
+       return 1;
+}
+
+int proc_remount(struct super_block *sb, int *flags, char *data)
+{
+       struct pid_namespace *pid = sb->s_fs_info;
+       return !proc_parse_options(data, pid);
+}
+
 static struct dentry *proc_mount(struct file_system_type *fs_type,
        int flags, const char *dev_name, void *data)
 {
@@ -43,11 +101,15 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
        struct super_block *sb;
        struct pid_namespace *ns;
        struct proc_inode *ei;
+       char *options;
 
-       if (flags & MS_KERNMOUNT)
+       if (flags & MS_KERNMOUNT) {
                ns = (struct pid_namespace *)data;
-       else
+               options = NULL;
+       } else {
                ns = current->nsproxy->pid_ns;
+               options = data;
+       }
 
        sb = sget(fs_type, proc_test_super, proc_set_super, ns);
        if (IS_ERR(sb))
@@ -55,6 +117,10 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
 
        if (!sb->s_root) {
                sb->s_flags = flags;
+               if (!proc_parse_options(options, ns)) {
+                       deactivate_locked_super(sb);
+                       return ERR_PTR(-EINVAL);
+               }
                err = proc_fill_super(sb);
                if (err) {
                        deactivate_locked_super(sb);
index a945cd265228684418cfb8a4fd1d3df15d2013d5..70de42f09f1d1cfcef2c78aaf7552b851cb00099 100644 (file)
@@ -1364,10 +1364,7 @@ int reiserfs_init_bitmap_cache(struct super_block *sb)
        struct reiserfs_bitmap_info *bitmap;
        unsigned int bmap_nr = reiserfs_bmap_count(sb);
 
-       /* Avoid lock recursion in fault case */
-       reiserfs_write_unlock(sb);
        bitmap = vmalloc(sizeof(*bitmap) * bmap_nr);
-       reiserfs_write_lock(sb);
        if (bitmap == NULL)
                return -ENOMEM;
 
index eb711060a6f2b771d4b4e68613ea4890273633dd..c3cf54fd4de327c343c0964488a89e8056665062 100644 (file)
@@ -2678,16 +2678,10 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
        char b[BDEVNAME_SIZE];
        int ret;
 
-       /*
-        * Unlock here to avoid various RECLAIM-FS-ON <-> IN-RECLAIM-FS
-        * dependency inversion warnings.
-        */
-       reiserfs_write_unlock(sb);
        journal = SB_JOURNAL(sb) = vzalloc(sizeof(struct reiserfs_journal));
        if (!journal) {
                reiserfs_warning(sb, "journal-1256",
                                 "unable to get memory for journal structure");
-               reiserfs_write_lock(sb);
                return 1;
        }
        INIT_LIST_HEAD(&journal->j_bitmap_nodes);
@@ -2695,10 +2689,8 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
        INIT_LIST_HEAD(&journal->j_working_list);
        INIT_LIST_HEAD(&journal->j_journal_list);
        journal->j_persistent_trans = 0;
-       ret = reiserfs_allocate_list_bitmaps(sb, journal->j_list_bitmap,
-                                          reiserfs_bmap_count(sb));
-       reiserfs_write_lock(sb);
-       if (ret)
+       if (reiserfs_allocate_list_bitmaps(sb, journal->j_list_bitmap,
+                                          reiserfs_bmap_count(sb)))
                goto free_and_return;
 
        allocate_bitmap_nodes(sb);
@@ -2727,27 +2719,11 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
                goto free_and_return;
        }
 
-       /*
-        * We need to unlock here to avoid creating the following
-        * dependency:
-        * reiserfs_lock -> sysfs_mutex
-        * Because the reiserfs mmap path creates the following dependency:
-        * mm->mmap -> reiserfs_lock, hence we have
-        * mm->mmap -> reiserfs_lock ->sysfs_mutex
-        * This would ends up in a circular dependency with sysfs readdir path
-        * which does sysfs_mutex -> mm->mmap_sem
-        * This is fine because the reiserfs lock is useless in mount path,
-        * at least until we call journal_begin. We keep it for paranoid
-        * reasons.
-        */
-       reiserfs_write_unlock(sb);
        if (journal_init_dev(sb, journal, j_dev_name) != 0) {
-               reiserfs_write_lock(sb);
                reiserfs_warning(sb, "sh-462",
                                 "unable to initialize jornal device");
                goto free_and_return;
        }
-       reiserfs_write_lock(sb);
 
        rs = SB_DISK_SUPER_BLOCK(sb);
 
@@ -2829,9 +2805,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
        journal->j_mount_id = 10;
        journal->j_state = 0;
        atomic_set(&(journal->j_jlock), 0);
-       reiserfs_write_unlock(sb);
        journal->j_cnode_free_list = allocate_cnodes(num_cnodes);
-       reiserfs_write_lock(sb);
        journal->j_cnode_free_orig = journal->j_cnode_free_list;
        journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0;
        journal->j_cnode_used = 0;
@@ -2848,24 +2822,37 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
 
        init_journal_hash(sb);
        jl = journal->j_current_jl;
+
+       /*
+        * get_list_bitmap() may call flush_commit_list() which
+        * requires the lock. Calling flush_commit_list() shouldn't happen
+        * this early but I like to be paranoid.
+        */
+       reiserfs_write_lock(sb);
        jl->j_list_bitmap = get_list_bitmap(sb, jl);
+       reiserfs_write_unlock(sb);
        if (!jl->j_list_bitmap) {
                reiserfs_warning(sb, "journal-2005",
                                 "get_list_bitmap failed for journal list 0");
                goto free_and_return;
        }
-       if (journal_read(sb) < 0) {
+
+       /*
+        * Journal_read needs to be inspected in order to push down
+        * the lock further inside (or even remove it).
+        */
+       reiserfs_write_lock(sb);
+       ret = journal_read(sb);
+       reiserfs_write_unlock(sb);
+       if (ret < 0) {
                reiserfs_warning(sb, "reiserfs-2006",
                                 "Replay Failure, unable to mount");
                goto free_and_return;
        }
 
        reiserfs_mounted_fs_count++;
-       if (reiserfs_mounted_fs_count <= 1) {
-               reiserfs_write_unlock(sb);
+       if (reiserfs_mounted_fs_count <= 1)
                commit_wq = alloc_workqueue("reiserfs", WQ_MEM_RECLAIM, 0);
-               reiserfs_write_lock(sb);
-       }
 
        INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
        journal->j_work_sb = sb;
@@ -2896,14 +2883,13 @@ int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
            journal->j_cnode_free < (journal->j_trans_max * 3)) {
                return 1;
        }
-       /* protected by the BKL here */
+
        journal->j_len_alloc += new_alloc;
        th->t_blocks_allocated += new_alloc ;
        return 0;
 }
 
-/* this must be called inside a transaction, and requires the
-** kernel_lock to be held
+/* this must be called inside a transaction
 */
 void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
 {
@@ -2914,8 +2900,7 @@ void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
        return;
 }
 
-/* this must be called without a transaction started, and does not
-** require BKL
+/* this must be called without a transaction started
 */
 void reiserfs_allow_writes(struct super_block *s)
 {
@@ -2924,8 +2909,7 @@ void reiserfs_allow_writes(struct super_block *s)
        wake_up(&journal->j_join_wait);
 }
 
-/* this must be called without a transaction started, and does not
-** require BKL
+/* this must be called without a transaction started
 */
 void reiserfs_wait_on_write_block(struct super_block *s)
 {
index 1d42e707d5fadc0e604beecf42085c168e628274..e12d8b97cd4dbf3c4c7dc249d42c66a8aeadc4bb 100644 (file)
@@ -1519,9 +1519,7 @@ static int read_super_block(struct super_block *s, int offset)
 static int reread_meta_blocks(struct super_block *s)
 {
        ll_rw_block(READ, 1, &(SB_BUFFER_WITH_SB(s)));
-       reiserfs_write_unlock(s);
        wait_on_buffer(SB_BUFFER_WITH_SB(s));
-       reiserfs_write_lock(s);
        if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
                reiserfs_warning(s, "reiserfs-2504", "error reading the super");
                return 1;
@@ -1746,22 +1744,11 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
        mutex_init(&REISERFS_SB(s)->lock);
        REISERFS_SB(s)->lock_depth = -1;
 
-       /*
-        * This function is called with the bkl, which also was the old
-        * locking used here.
-        * do_journal_begin() will soon check if we hold the lock (ie: was the
-        * bkl). This is likely because do_journal_begin() has several another
-        * callers because at this time, it doesn't seem to be necessary to
-        * protect against anything.
-        * Anyway, let's be conservative and lock for now.
-        */
-       reiserfs_write_lock(s);
-
        jdev_name = NULL;
        if (reiserfs_parse_options
            (s, (char *)data, &(sbi->s_mount_opt), &blocks, &jdev_name,
             &commit_max_age, qf_names, &qfmt) == 0) {
-               goto error;
+               goto error_unlocked;
        }
        if (jdev_name && jdev_name[0]) {
                REISERFS_SB(s)->s_jdev = kstrdup(jdev_name, GFP_KERNEL);
@@ -1777,7 +1764,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
 
        if (blocks) {
                SWARN(silent, s, "jmacd-7", "resize option for remount only");
-               goto error;
+               goto error_unlocked;
        }
 
        /* try old format (undistributed bitmap, super block in 8-th 1k block of a device) */
@@ -1787,7 +1774,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
        else if (read_super_block(s, REISERFS_DISK_OFFSET_IN_BYTES)) {
                SWARN(silent, s, "sh-2021", "can not find reiserfs on %s",
                      reiserfs_bdevname(s));
-               goto error;
+               goto error_unlocked;
        }
 
        rs = SB_DISK_SUPER_BLOCK(s);
@@ -1803,7 +1790,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
                      "or increase size of your LVM partition");
                SWARN(silent, s, "", "Or may be you forgot to "
                      "reboot after fdisk when it told you to");
-               goto error;
+               goto error_unlocked;
        }
 
        sbi->s_mount_state = SB_REISERFS_STATE(s);
@@ -1811,8 +1798,9 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
 
        if ((errval = reiserfs_init_bitmap_cache(s))) {
                SWARN(silent, s, "jmacd-8", "unable to read bitmap");
-               goto error;
+               goto error_unlocked;
        }
+
        errval = -EINVAL;
 #ifdef CONFIG_REISERFS_CHECK
        SWARN(silent, s, "", "CONFIG_REISERFS_CHECK is set ON");
@@ -1835,24 +1823,26 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
        if (reiserfs_barrier_flush(s)) {
                printk("reiserfs: using flush barriers\n");
        }
+
        // set_device_ro(s->s_dev, 1) ;
        if (journal_init(s, jdev_name, old_format, commit_max_age)) {
                SWARN(silent, s, "sh-2022",
                      "unable to initialize journal space");
-               goto error;
+               goto error_unlocked;
        } else {
                jinit_done = 1; /* once this is set, journal_release must be called
                                 ** if we error out of the mount
                                 */
        }
+
        if (reread_meta_blocks(s)) {
                SWARN(silent, s, "jmacd-9",
                      "unable to reread meta blocks after journal init");
-               goto error;
+               goto error_unlocked;
        }
 
        if (replay_only(s))
-               goto error;
+               goto error_unlocked;
 
        if (bdev_read_only(s->s_bdev) && !(s->s_flags & MS_RDONLY)) {
                SWARN(silent, s, "clm-7000",
@@ -1866,9 +1856,19 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
                         reiserfs_init_locked_inode, (void *)(&args));
        if (!root_inode) {
                SWARN(silent, s, "jmacd-10", "get root inode failed");
-               goto error;
+               goto error_unlocked;
        }
 
+       /*
+        * This path assumed to be called with the BKL in the old times.
+        * Now we have inherited the big reiserfs lock from it and many
+        * reiserfs helpers called in the mount path and elsewhere require
+        * this lock to be held even if it's not always necessary. Let's be
+        * conservative and hold it early. The window can be reduced after
+        * careful review of the code.
+        */
+       reiserfs_write_lock(s);
+
        if (root_inode->i_state & I_NEW) {
                reiserfs_read_locked_inode(root_inode, &args);
                unlock_new_inode(root_inode);
@@ -1995,12 +1995,16 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
        return (0);
 
 error:
-       if (jinit_done) {       /* kill the commit thread, free journal ram */
+       reiserfs_write_unlock(s);
+
+error_unlocked:
+       /* kill the commit thread, free journal ram */
+       if (jinit_done) {
+               reiserfs_write_lock(s);
                journal_release_error(NULL, s);
+               reiserfs_write_unlock(s);
        }
 
-       reiserfs_write_unlock(s);
-
        reiserfs_free_bitmap_cache(s);
        if (SB_BUFFER_WITH_SB(s))
                brelse(SB_BUFFER_WITH_SB(s));
index eed99428f1046d6a3dbd6191340d9d70e18b7ce0..e1a7779dd3cb18e72276569f8d2b0ce77e53a42c 100644 (file)
@@ -28,9 +28,10 @@ static unsigned long romfs_get_unmapped_area(struct file *file,
        struct inode *inode = file->f_mapping->host;
        struct mtd_info *mtd = inode->i_sb->s_mtd;
        unsigned long isize, offset, maxpages, lpages;
+       int ret;
 
        if (!mtd)
-               goto cant_map_directly;
+               return (unsigned long) -ENOSYS;
 
        /* the mapping mustn't extend beyond the EOF */
        lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -41,23 +42,20 @@ static unsigned long romfs_get_unmapped_area(struct file *file,
        if ((pgoff >= maxpages) || (maxpages - pgoff < lpages))
                return (unsigned long) -EINVAL;
 
-       /* we need to call down to the MTD layer to do the actual mapping */
-       if (mtd->get_unmapped_area) {
-               if (addr != 0)
-                       return (unsigned long) -EINVAL;
-
-               if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
-                       return (unsigned long) -EINVAL;
+       if (addr != 0)
+               return (unsigned long) -EINVAL;
 
-               offset += ROMFS_I(inode)->i_dataoffset;
-               if (offset > mtd->size - len)
-                       return (unsigned long) -EINVAL;
+       if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
+               return (unsigned long) -EINVAL;
 
-               return mtd->get_unmapped_area(mtd, len, offset, flags);
-       }
+       offset += ROMFS_I(inode)->i_dataoffset;
+       if (offset > mtd->size - len)
+               return (unsigned long) -EINVAL;
 
-cant_map_directly:
-       return (unsigned long) -ENOSYS;
+       ret = mtd_get_unmapped_area(mtd, len, offset, flags);
+       if (ret == -EOPNOTSUPP)
+               ret = -ENOSYS;
+       return (unsigned long) ret;
 }
 
 /*
index 6189c74d97f03ef6ba3eafc19fc1a8924b42d4b9..66d59d0a14029ebc8b1fa3bb2d0ecfad38199e66 100644 (file)
@@ -1986,12 +1986,11 @@ again:
 
                                if (path[h].in_tree)
                                        continue;
-                               nnode = kmalloc(sz, GFP_NOFS);
+                               nnode = kmemdup(&path[h].nnode, sz, GFP_NOFS);
                                if (!nnode) {
                                        err = -ENOMEM;
                                        goto out;
                                }
-                               memcpy(nnode, &path[h].nnode, sz);
                                parent = nnode->parent;
                                parent->nbranch[nnode->iip].nnode = nnode;
                                path[h].ptr.nnode = nnode;
@@ -2004,12 +2003,11 @@ again:
                                const size_t sz = sizeof(struct ubifs_pnode);
                                struct ubifs_nnode *parent;
 
-                               pnode = kmalloc(sz, GFP_NOFS);
+                               pnode = kmemdup(&path[h].pnode, sz, GFP_NOFS);
                                if (!pnode) {
                                        err = -ENOMEM;
                                        goto out;
                                }
-                               memcpy(pnode, &path[h].pnode, sz);
                                parent = pnode->parent;
                                parent->nbranch[pnode->iip].pnode = pnode;
                                path[h].ptr.pnode = pnode;
index 066738647685ba8ad2d371060bfd226e12020e3b..e14ee53159db36b0ecade9b31ad6466678db6b74 100644 (file)
@@ -344,12 +344,11 @@ static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr,
                return err;
        }
 
-       lnc_node = kmalloc(zbr->len, GFP_NOFS);
+       lnc_node = kmemdup(node, zbr->len, GFP_NOFS);
        if (!lnc_node)
                /* We don't have to have the cache, so no error */
                return 0;
 
-       memcpy(lnc_node, node, zbr->len);
        zbr->leaf = lnc_node;
        return 0;
 }
index bf18f7a04544b28bb0fd989e33065ee17bb7a95e..85b2722687545b0e3299d7bdc0275ebe9fb2d2e2 100644 (file)
@@ -138,12 +138,11 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
        ui = ubifs_inode(inode);
        ui->xattr = 1;
        ui->flags |= UBIFS_XATTR_FL;
-       ui->data = kmalloc(size, GFP_NOFS);
+       ui->data = kmemdup(value, size, GFP_NOFS);
        if (!ui->data) {
                err = -ENOMEM;
                goto out_free;
        }
-       memcpy(ui->data, value, size);
        inode->i_size = ui->ui_size = size;
        ui->data_len = size;
 
@@ -204,12 +203,11 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
                return err;
 
        kfree(ui->data);
-       ui->data = kmalloc(size, GFP_NOFS);
+       ui->data = kmemdup(value, size, GFP_NOFS);
        if (!ui->data) {
                err = -ENOMEM;
                goto out_free;
        }
-       memcpy(ui->data, value, size);
        inode->i_size = ui->ui_size = size;
        ui->data_len = size;
 
index 912088773a69ef55b5313d7a6ccd5acebbc1de36..448303bdb85fd8009fdb6a6367fa9f2ed69b80ba 100644 (file)
@@ -19,6 +19,8 @@
 #include <asm-generic/iomap.h>
 #endif
 
+#include <asm-generic/pci_iomap.h>
+
 #ifndef mmiowb
 #define mmiowb() do {} while (0)
 #endif
@@ -283,9 +285,7 @@ static inline void writesb(const void __iomem *addr, const void *buf, int len)
 #define __io_virt(x) ((void __force *) (x))
 
 #ifndef CONFIG_GENERIC_IOMAP
-/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
 static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
 {
 }
@@ -327,7 +327,7 @@ static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
 #define ioremap_wc ioremap_nocache
 #endif
 
-static inline void iounmap(void *addr)
+static inline void iounmap(void __iomem *addr)
 {
 }
 #endif /* CONFIG_MMU */
index 98dcd76ce836db71243b031a2bdfc301627d7a03..8a3d4fde26040600f7ddb9d7c3e67a4807b265ff 100644 (file)
@@ -67,18 +67,15 @@ extern void ioport_unmap(void __iomem *);
 #endif
 
 #ifdef CONFIG_PCI
-/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
 struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
 extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
 #else
 struct pci_dev;
-static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
-{
-       return NULL;
-}
 static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
 { }
 #endif
 
+#include <asm-generic/pci_iomap.h>
+
 #endif
index 351889d1de19282fdb10155d06d32301d148ba13..37d1fe28960a8ae7d82b2f804008b444c7fde556 100644 (file)
@@ -71,10 +71,14 @@ extern unsigned long memory_end;
 #define PAGE_OFFSET            (0)
 #endif
 
+#ifndef ARCH_PFN_OFFSET
+#define ARCH_PFN_OFFSET                (PAGE_OFFSET >> PAGE_SHIFT)
+#endif
+
 #ifndef __ASSEMBLY__
 
-#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
-#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
+#define __va(x) ((void *)((unsigned long) (x)))
+#define __pa(x) ((unsigned long) (x))
 
 #define virt_to_pfn(kaddr)     (__pa(kaddr) >> PAGE_SHIFT)
 #define pfn_to_virt(pfn)       __va((pfn) << PAGE_SHIFT)
@@ -86,7 +90,7 @@ extern unsigned long memory_end;
 #define page_to_phys(page)      ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
 #endif
 
-#define pfn_valid(pfn)         ((pfn) < max_mapnr)
+#define pfn_valid(pfn)         ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
 
 #define        virt_addr_valid(kaddr)  (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
                                ((void *)(kaddr) < (void *)memory_end))
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
new file mode 100644 (file)
index 0000000..8de4b73
--- /dev/null
@@ -0,0 +1,25 @@
+/* Generic I/O port emulation, based on MN10300 code
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef __ASM_GENERIC_PCI_IOMAP_H
+#define __ASM_GENERIC_PCI_IOMAP_H
+
+struct pci_dev;
+#ifdef CONFIG_PCI
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+#else
+static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
+{
+       return NULL;
+}
+#endif
+
+#endif /* __ASM_GENERIC_IO_H */
index ac68c999b6c2165bbba9bea374505cbc60f5abf9..9788568f79782befb0be7cf29d148220f9c898c4 100644 (file)
@@ -289,9 +289,14 @@ strncpy_from_user(char *dst, const char __user *src, long count)
  * Return 0 on exception, a value greater than N if too long
  */
 #ifndef __strnlen_user
-#define __strnlen_user strnlen
+#define __strnlen_user(s, n) (strnlen((s), (n)) + 1)
 #endif
 
+/*
+ * Unlike strnlen, strnlen_user includes the nul terminator in
+ * its returned count. Callers should check for a returned value
+ * greater than N as an indication the string is too long.
+ */
 static inline long strnlen_user(const char __user *src, long n)
 {
        if (!access_ok(VERIFY_READ, src, 1))
index 3a60ac88952001580be0e85b4dcd6c6b7c2462b5..a5c0e10fd47d4a93a3bbc49c04bbff4f686b08c9 100644 (file)
@@ -1,4 +1,5 @@
 header-y += drm.h
+header-y += drm_fourcc.h
 header-y += drm_mode.h
 header-y += drm_sarea.h
 header-y += i810_drm.h
index 4be33b4ca2f898a10296d3e2a6a4469942c6555a..49d94ede2ec2bc2612271c5a7f046c4d568802a9 100644 (file)
@@ -714,6 +714,10 @@ struct drm_get_cap {
 #define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
 #define DRM_IOCTL_MODE_MAP_DUMB    DRM_IOWR(0xB3, struct drm_mode_map_dumb)
 #define DRM_IOCTL_MODE_DESTROY_DUMB    DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
+#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
+#define DRM_IOCTL_MODE_GETPLANE        DRM_IOWR(0xB6, struct drm_mode_get_plane)
+#define DRM_IOCTL_MODE_SETPLANE        DRM_IOWR(0xB7, struct drm_mode_set_plane)
+#define DRM_IOCTL_MODE_ADDFB2          DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
 
 /**
  * Device specific ioctls should only be in their respective headers
index e8acca892af00a50ed38213103e306642091493e..76caa67c22e2629391a4124396723a94a5736e56 100644 (file)
@@ -918,7 +918,7 @@ struct drm_driver {
        int dev_priv_size;
        struct drm_ioctl_desc *ioctls;
        int num_ioctls;
-       struct file_operations fops;
+       const struct file_operations *fops;
        union {
                struct pci_driver *pci;
                struct platform_device *platform_device;
@@ -1696,5 +1696,13 @@ extern void drm_platform_exit(struct drm_driver *driver, struct platform_device
 extern int drm_get_platform_dev(struct platform_device *pdev,
                                struct drm_driver *driver);
 
+/* returns true if currently okay to sleep */
+static __inline__ bool drm_can_sleep(void)
+{
+       if (in_atomic() || in_dbg_master() || irqs_disabled())
+               return false;
+       return true;
+}
+
 #endif                         /* __KERNEL__ */
 #endif
index 8020798092820b82813e2c6026c8507d9dbe5923..63e4fce6728829bb98c9630220fc75f21d19814e 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/types.h>
 #include <linux/idr.h>
-
 #include <linux/fb.h>
 
+#include <drm/drm_fourcc.h>
+
 struct drm_device;
 struct drm_mode_set;
 struct drm_framebuffer;
@@ -44,6 +45,7 @@ struct drm_framebuffer;
 #define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
 #define DRM_MODE_OBJECT_FB 0xfbfbfbfb
 #define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
+#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
 
 struct drm_mode_object {
        uint32_t id;
@@ -118,7 +120,6 @@ struct drm_display_mode {
 
        char name[DRM_DISPLAY_MODE_LEN];
 
-       int connector_count;
        enum drm_mode_status status;
        int type;
 
@@ -238,13 +239,15 @@ struct drm_framebuffer {
        struct list_head head;
        struct drm_mode_object base;
        const struct drm_framebuffer_funcs *funcs;
-       unsigned int pitch;
+       unsigned int pitches[4];
+       unsigned int offsets[4];
        unsigned int width;
        unsigned int height;
        /* depth can be 15 or 16 */
        unsigned int depth;
        int bits_per_pixel;
        int flags;
+       uint32_t pixel_format; /* fourcc format */
        struct list_head filp_head;
        /* if you are using the helper */
        void *helper_private;
@@ -278,6 +281,7 @@ struct drm_crtc;
 struct drm_connector;
 struct drm_encoder;
 struct drm_pending_vblank_event;
+struct drm_plane;
 
 /**
  * drm_crtc_funcs - control CRTCs for a given device
@@ -341,10 +345,21 @@ struct drm_crtc_funcs {
 
 /**
  * drm_crtc - central CRTC control structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object for ID tracking etc.
  * @enabled: is this CRTC enabled?
+ * @mode: current mode timings
+ * @hwmode: mode timings as programmed to hw regs
  * @x: x position on screen
  * @y: y position on screen
  * @funcs: CRTC control functions
+ * @gamma_size: size of gamma ramp
+ * @gamma_store: gamma ramp values
+ * @framedur_ns: precise frame timing
+ * @framedur_ns: precise line timing
+ * @pixeldur_ns: precise pixel timing
+ * @helper_private: mid-layer private data
  *
  * Each CRTC may have one or more connectors associated with it.  This structure
  * allows the CRTC to be controlled.
@@ -423,6 +438,13 @@ struct drm_connector_funcs {
        void (*force)(struct drm_connector *connector);
 };
 
+/**
+ * drm_encoder_funcs - encoder controls
+ * @reset: reset state (e.g. at init or resume time)
+ * @destroy: cleanup and free associated data
+ *
+ * Encoders sit between CRTCs and connectors.
+ */
 struct drm_encoder_funcs {
        void (*reset)(struct drm_encoder *encoder);
        void (*destroy)(struct drm_encoder *encoder);
@@ -435,6 +457,18 @@ struct drm_encoder_funcs {
 
 /**
  * drm_encoder - central DRM encoder structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object
+ * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
+ * @possible_crtcs: bitmask of potential CRTC bindings
+ * @possible_clones: bitmask of potential sibling encoders for cloning
+ * @crtc: currently bound CRTC
+ * @funcs: control functions
+ * @helper_private: mid-layer private data
+ *
+ * CRTCs drive pixels to encoders, which convert them into signals
+ * appropriate for a given connector or set of connectors.
  */
 struct drm_encoder {
        struct drm_device *dev;
@@ -470,14 +504,37 @@ enum drm_connector_force {
 
 /**
  * drm_connector - central DRM connector control structure
- * @crtc: CRTC this connector is currently connected to, NULL if none
+ * @dev: parent DRM device
+ * @kdev: kernel device for sysfs attributes
+ * @attr: sysfs attributes
+ * @head: list management
+ * @base: base KMS object
+ * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
+ * @connector_type_id: index into connector type enum
  * @interlace_allowed: can this connector handle interlaced modes?
  * @doublescan_allowed: can this connector handle doublescan?
- * @available_modes: modes available on this connector (from get_modes() + user)
- * @initial_x: initial x position for this connector
- * @initial_y: initial y position for this connector
- * @status: connector connected?
+ * @modes: modes available on this connector (from fill_modes() + user)
+ * @status: one of the drm_connector_status enums (connected, not, or unknown)
+ * @probed_modes: list of modes derived directly from the display
+ * @display_info: information about attached display (e.g. from EDID)
  * @funcs: connector control functions
+ * @user_modes: user added mode list
+ * @edid_blob_ptr: DRM property containing EDID if present
+ * @property_ids: property tracking for this connector
+ * @property_values: value pointers or data for properties
+ * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
+ * @dpms: current dpms state
+ * @helper_private: mid-layer private data
+ * @force: a %DRM_FORCE_<foo> state for forced mode sets
+ * @encoder_ids: valid encoders for this connector
+ * @encoder: encoder driving this connector, if any
+ * @eld: EDID-like data, if present
+ * @dvi_dual: dual link DVI, if found
+ * @max_tmds_clock: max clock rate, if found
+ * @latency_present: AV delay info from ELD, if found
+ * @video_latency: video latency info from ELD, if found
+ * @audio_latency: audio latency info from ELD, if found
+ * @null_edid_counter: track sinks that give us all zeros for the EDID
  *
  * Each connector may be connected to one or more CRTCs, or may be clonable by
  * another connector if they can share a CRTC.  Each connector also has a specific
@@ -498,7 +555,6 @@ struct drm_connector {
        bool doublescan_allowed;
        struct list_head modes; /* list of modes on this connector */
 
-       int initial_x, initial_y;
        enum drm_connector_status status;
 
        /* these are modes added by probing with DDC or the BIOS */
@@ -522,7 +578,6 @@ struct drm_connector {
        /* forced on connector */
        enum drm_connector_force force;
        uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
-       uint32_t force_encoder_id;
        struct drm_encoder *encoder; /* currently active encoder */
 
        /* EDID bits */
@@ -536,7 +591,71 @@ struct drm_connector {
 };
 
 /**
- * struct drm_mode_set
+ * drm_plane_funcs - driver plane control functions
+ * @update_plane: update the plane configuration
+ * @disable_plane: shut down the plane
+ * @destroy: clean up plane resources
+ */
+struct drm_plane_funcs {
+       int (*update_plane)(struct drm_plane *plane,
+                           struct drm_crtc *crtc, struct drm_framebuffer *fb,
+                           int crtc_x, int crtc_y,
+                           unsigned int crtc_w, unsigned int crtc_h,
+                           uint32_t src_x, uint32_t src_y,
+                           uint32_t src_w, uint32_t src_h);
+       int (*disable_plane)(struct drm_plane *plane);
+       void (*destroy)(struct drm_plane *plane);
+};
+
+/**
+ * drm_plane - central DRM plane control structure
+ * @dev: DRM device this plane belongs to
+ * @head: for list management
+ * @base: base mode object
+ * @possible_crtcs: pipes this plane can be bound to
+ * @format_types: array of formats supported by this plane
+ * @format_count: number of formats supported
+ * @crtc: currently bound CRTC
+ * @fb: currently bound fb
+ * @gamma_size: size of gamma table
+ * @gamma_store: gamma correction table
+ * @enabled: enabled flag
+ * @funcs: helper functions
+ * @helper_private: storage for drver layer
+ */
+struct drm_plane {
+       struct drm_device *dev;
+       struct list_head head;
+
+       struct drm_mode_object base;
+
+       uint32_t possible_crtcs;
+       uint32_t *format_types;
+       uint32_t format_count;
+
+       struct drm_crtc *crtc;
+       struct drm_framebuffer *fb;
+
+       /* CRTC gamma size for reporting to userspace */
+       uint32_t gamma_size;
+       uint16_t *gamma_store;
+
+       bool enabled;
+
+       const struct drm_plane_funcs *funcs;
+       void *helper_private;
+};
+
+/**
+ * drm_mode_set - new values for a CRTC config change
+ * @head: list management
+ * @fb: framebuffer to use for new config
+ * @crtc: CRTC whose configuration we're about to change
+ * @mode: mode timings to use
+ * @x: position of this CRTC relative to @fb
+ * @y: position of this CRTC relative to @fb
+ * @connectors: array of connectors to drive with this CRTC if possible
+ * @num_connectors: size of @connectors array
  *
  * Represents a single crtc the connectors that it drives with what mode
  * and from which framebuffer it scans out from.
@@ -558,13 +677,33 @@ struct drm_mode_set {
 };
 
 /**
- * struct drm_mode_config_funcs - configure CRTCs for a given screen layout
+ * struct drm_mode_config_funcs - basic driver provided mode setting functions
+ * @fb_create: create a new framebuffer object
+ * @output_poll_changed: function to handle output configuration changes
+ *
+ * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
+ * involve drivers.
  */
 struct drm_mode_config_funcs {
-       struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd);
+       struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
+                                            struct drm_file *file_priv,
+                                            struct drm_mode_fb_cmd2 *mode_cmd);
        void (*output_poll_changed)(struct drm_device *dev);
 };
 
+/**
+ * drm_mode_group - group of mode setting resources for potential sub-grouping
+ * @num_crtcs: CRTC count
+ * @num_encoders: encoder count
+ * @num_connectors: connector count
+ * @id_list: list of KMS object IDs in this group
+ *
+ * Currently this simply tracks the global mode setting state.  But in the
+ * future it could allow groups of objects to be set aside into independent
+ * control groups for use by different user level processes (e.g. two X servers
+ * running simultaneously on different heads, each with their own mode
+ * configuration and freedom of mode setting).
+ */
 struct drm_mode_group {
        uint32_t num_crtcs;
        uint32_t num_encoders;
@@ -576,7 +715,30 @@ struct drm_mode_group {
 
 /**
  * drm_mode_config - Mode configuration control structure
+ * @mutex: mutex protecting KMS related lists and structures
+ * @idr_mutex: mutex for KMS ID allocation and management
+ * @crtc_idr: main KMS ID tracking object
+ * @num_fb: number of fbs available
+ * @fb_list: list of framebuffers available
+ * @num_connector: number of connectors on this device
+ * @connector_list: list of connector objects
+ * @num_encoder: number of encoders on this device
+ * @encoder_list: list of encoder objects
+ * @num_crtc: number of CRTCs on this device
+ * @crtc_list: list of CRTC objects
+ * @min_width: minimum pixel width on this device
+ * @min_height: minimum pixel height on this device
+ * @max_width: maximum pixel width on this device
+ * @max_height: maximum pixel height on this device
+ * @funcs: core driver provided mode setting functions
+ * @fb_base: base address of the framebuffer
+ * @poll_enabled: track polling status for this device
+ * @output_poll_work: delayed work for polling in process context
+ * @*_property: core property tracking
  *
+ * Core mode resource tracking structure.  All CRTC, encoders, and connectors
+ * enumerated by the driver are added here, as are global properties.  Some
+ * global restrictions are also here, e.g. dimension restrictions.
  */
 struct drm_mode_config {
        struct mutex mutex; /* protects configuration (mode lists etc.) */
@@ -589,6 +751,8 @@ struct drm_mode_config {
        struct list_head connector_list;
        int num_encoder;
        struct list_head encoder_list;
+       int num_plane;
+       struct list_head plane_list;
 
        int num_crtc;
        struct list_head crtc_list;
@@ -641,6 +805,7 @@ struct drm_mode_config {
 #define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
 #define obj_to_property(x) container_of(x, struct drm_property, base)
 #define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
+#define obj_to_plane(x) container_of(x, struct drm_plane, base)
 
 
 extern void drm_crtc_init(struct drm_device *dev,
@@ -660,6 +825,14 @@ extern void drm_encoder_init(struct drm_device *dev,
                             const struct drm_encoder_funcs *funcs,
                             int encoder_type);
 
+extern int drm_plane_init(struct drm_device *dev,
+                         struct drm_plane *plane,
+                         unsigned long possible_crtcs,
+                         const struct drm_plane_funcs *funcs,
+                         const uint32_t *formats, uint32_t format_count,
+                         bool priv);
+extern void drm_plane_cleanup(struct drm_plane *plane);
+
 extern void drm_encoder_cleanup(struct drm_encoder *encoder);
 
 extern char *drm_get_connector_name(struct drm_connector *connector);
@@ -753,17 +926,25 @@ extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
 /* IOCTLs */
 extern int drm_mode_getresources(struct drm_device *dev,
                                 void *data, struct drm_file *file_priv);
-
+extern int drm_mode_getplane_res(struct drm_device *dev, void *data,
+                                  struct drm_file *file_priv);
 extern int drm_mode_getcrtc(struct drm_device *dev,
                            void *data, struct drm_file *file_priv);
 extern int drm_mode_getconnector(struct drm_device *dev,
                              void *data, struct drm_file *file_priv);
 extern int drm_mode_setcrtc(struct drm_device *dev,
                            void *data, struct drm_file *file_priv);
+extern int drm_mode_getplane(struct drm_device *dev,
+                              void *data, struct drm_file *file_priv);
+extern int drm_mode_setplane(struct drm_device *dev,
+                              void *data, struct drm_file *file_priv);
 extern int drm_mode_cursor_ioctl(struct drm_device *dev,
                                void *data, struct drm_file *file_priv);
 extern int drm_mode_addfb(struct drm_device *dev,
                          void *data, struct drm_file *file_priv);
+extern int drm_mode_addfb2(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv);
+extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
 extern int drm_mode_rmfb(struct drm_device *dev,
                         void *data, struct drm_file *file_priv);
 extern int drm_mode_getfb(struct drm_device *dev,
@@ -824,4 +1005,7 @@ extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
                                    void *data, struct drm_file *file_priv);
 extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
                                      void *data, struct drm_file *file_priv);
+
+extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+                                int *bpp);
 #endif /* __DRM_CRTC_H__ */
index 73b071203dcc8bf61d65a75bac74524ba579fbcb..37515d1afab31bb4ccca0e2391eac06fe8d2b553 100644 (file)
@@ -117,7 +117,7 @@ extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
 extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
 
 extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
-                                         struct drm_mode_fb_cmd *mode_cmd);
+                                         struct drm_mode_fb_cmd2 *mode_cmd);
 
 static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
                                       const struct drm_crtc_helper_funcs *funcs)
@@ -144,4 +144,7 @@ extern void drm_helper_hpd_irq_event(struct drm_device *dev);
 
 extern void drm_kms_helper_poll_disable(struct drm_device *dev);
 extern void drm_kms_helper_poll_enable(struct drm_device *dev);
+
+extern int drm_format_num_planes(uint32_t format);
+
 #endif
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
new file mode 100644 (file)
index 0000000..bdf0152
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DRM_FOURCC_H
+#define DRM_FOURCC_H
+
+#include <linux/types.h>
+
+#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
+                                ((__u32)(c) << 16) | ((__u32)(d) << 24))
+
+#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
+
+/* color index */
+#define DRM_FORMAT_C8          fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
+
+/* 8 bpp RGB */
+#define DRM_FORMAT_RGB332      fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
+#define DRM_FORMAT_BGR233      fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
+
+/* 16 bpp RGB */
+#define DRM_FORMAT_XRGB4444    fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_XBGR4444    fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBX4444    fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRX4444    fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
+
+#define DRM_FORMAT_ARGB4444    fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_ABGR4444    fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBA4444    fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRA4444    fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
+
+#define DRM_FORMAT_XRGB1555    fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_XBGR1555    fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBX5551    fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRX5551    fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
+
+#define DRM_FORMAT_ARGB1555    fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_ABGR1555    fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBA5551    fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRA5551    fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
+
+#define DRM_FORMAT_RGB565      fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
+#define DRM_FORMAT_BGR565      fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
+
+/* 24 bpp RGB */
+#define DRM_FORMAT_RGB888      fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
+#define DRM_FORMAT_BGR888      fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
+
+/* 32 bpp RGB */
+#define DRM_FORMAT_XRGB8888    fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_XBGR8888    fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBX8888    fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRX8888    fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
+
+#define DRM_FORMAT_ARGB8888    fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_ABGR8888    fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBA8888    fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRA8888    fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
+
+#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
+
+#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+
+/* packed YCbCr */
+#define DRM_FORMAT_YUYV                fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_YVYU                fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_UYVY                fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
+#define DRM_FORMAT_VYUY                fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
+
+#define DRM_FORMAT_AYUV                fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+
+/*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [7:0] Y
+ * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
+ * or
+ * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
+ */
+#define DRM_FORMAT_NV12                fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV21                fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
+#define DRM_FORMAT_NV16                fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV61                fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
+
+/* 2 non contiguous plane YCbCr */
+#define DRM_FORMAT_NV12M       fourcc_code('N', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV12MT      fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
+
+/*
+ * 3 plane YCbCr
+ * index 0: Y plane, [7:0] Y
+ * index 1: Cb plane, [7:0] Cb
+ * index 2: Cr plane, [7:0] Cr
+ * or
+ * index 1: Cr plane, [7:0] Cr
+ * index 2: Cb plane, [7:0] Cb
+ */
+#define DRM_FORMAT_YUV410      fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU410      fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV411      fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU411      fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV420      fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU420      fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV422      fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU422      fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV444      fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU444      fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
+
+/* 3 non contiguous plane YCbCr */
+#define DRM_FORMAT_YUV420M     fourcc_code('Y', 'M', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
+
+#endif /* DRM_FOURCC_H */
index ddd46db65b57257164f13d703186bb7da5e7bb3a..2a2acda8b437089222a6b1d1cad4aaed6d08ddbe 100644 (file)
@@ -120,11 +120,48 @@ struct drm_mode_crtc {
        struct drm_mode_modeinfo mode;
 };
 
-#define DRM_MODE_ENCODER_NONE   0
-#define DRM_MODE_ENCODER_DAC    1
-#define DRM_MODE_ENCODER_TMDS   2
-#define DRM_MODE_ENCODER_LVDS   3
-#define DRM_MODE_ENCODER_TVDAC  4
+#define DRM_MODE_PRESENT_TOP_FIELD     (1<<0)
+#define DRM_MODE_PRESENT_BOTTOM_FIELD  (1<<1)
+
+/* Planes blend with or override other bits on the CRTC */
+struct drm_mode_set_plane {
+       __u32 plane_id;
+       __u32 crtc_id;
+       __u32 fb_id; /* fb object contains surface format type */
+       __u32 flags; /* see above flags */
+
+       /* Signed dest location allows it to be partially off screen */
+       __s32 crtc_x, crtc_y;
+       __u32 crtc_w, crtc_h;
+
+       /* Source values are 16.16 fixed point */
+       __u32 src_x, src_y;
+       __u32 src_h, src_w;
+};
+
+struct drm_mode_get_plane {
+       __u32 plane_id;
+
+       __u32 crtc_id;
+       __u32 fb_id;
+
+       __u32 possible_crtcs;
+       __u32 gamma_size;
+
+       __u32 count_format_types;
+       __u64 format_type_ptr;
+};
+
+struct drm_mode_get_plane_res {
+       __u64 plane_id_ptr;
+       __u32 count_planes;
+};
+
+#define DRM_MODE_ENCODER_NONE  0
+#define DRM_MODE_ENCODER_DAC   1
+#define DRM_MODE_ENCODER_TMDS  2
+#define DRM_MODE_ENCODER_LVDS  3
+#define DRM_MODE_ENCODER_TVDAC 4
 #define DRM_MODE_ENCODER_VIRTUAL 5
 
 struct drm_mode_get_encoder {
@@ -231,6 +268,33 @@ struct drm_mode_fb_cmd {
        __u32 handle;
 };
 
+#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
+
+struct drm_mode_fb_cmd2 {
+       __u32 fb_id;
+       __u32 width, height;
+       __u32 pixel_format; /* fourcc code from drm_fourcc.h */
+       __u32 flags; /* see above flags */
+
+       /*
+        * In case of planar formats, this ioctl allows up to 4
+        * buffer objects with offets and pitches per plane.
+        * The pitch and offset order is dictated by the fourcc,
+        * e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
+        *
+        *   YUV 4:2:0 image with a plane of 8 bit Y samples
+        *   followed by an interleaved U/V plane containing
+        *   8 bit 2x2 subsampled colour difference samples.
+        *
+        * So it would consist of Y as offset[0] and UV as
+        * offeset[1].  Note that offset[0] will generally
+        * be 0.
+        */
+       __u32 handles[4];
+       __u32 pitches[4]; /* pitch for each plane */
+       __u32 offsets[4]; /* offset of each plane */
+};
+
 #define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
 #define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
 #define DRM_MODE_FB_DIRTY_FLAGS         0x03
diff --git a/include/drm/drm_sman.h b/include/drm/drm_sman.h
deleted file mode 100644 (file)
index 08ecf83..0000000
+++ /dev/null
@@ -1,176 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-/*
- * Simple memory MANager interface that keeps track on allocate regions on a
- * per "owner" basis. All regions associated with an "owner" can be released
- * with a simple call. Typically if the "owner" exists. The owner is any
- * "unsigned long" identifier. Can typically be a pointer to a file private
- * struct or a context identifier.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#ifndef DRM_SMAN_H
-#define DRM_SMAN_H
-
-#include "drmP.h"
-#include "drm_hashtab.h"
-
-/*
- * A class that is an abstration of a simple memory allocator.
- * The sman implementation provides a default such allocator
- * using the drm_mm.c implementation. But the user can replace it.
- * See the SiS implementation, which may use the SiS FB kernel module
- * for memory management.
- */
-
-struct drm_sman_mm {
-       /* private info. If allocated, needs to be destroyed by the destroy
-          function */
-       void *private;
-
-       /* Allocate a memory block with given size and alignment.
-          Return an opaque reference to the memory block */
-
-       void *(*allocate) (void *private, unsigned long size,
-                          unsigned alignment);
-
-       /* Free a memory block. "ref" is the opaque reference that we got from
-          the "alloc" function */
-
-       void (*free) (void *private, void *ref);
-
-       /* Free all resources associated with this allocator */
-
-       void (*destroy) (void *private);
-
-       /* Return a memory offset from the opaque reference returned from the
-          "alloc" function */
-
-       unsigned long (*offset) (void *private, void *ref);
-};
-
-struct drm_memblock_item {
-       struct list_head owner_list;
-       struct drm_hash_item user_hash;
-       void *mm_info;
-       struct drm_sman_mm *mm;
-       struct drm_sman *sman;
-};
-
-struct drm_sman {
-       struct drm_sman_mm *mm;
-       int num_managers;
-       struct drm_open_hash owner_hash_tab;
-       struct drm_open_hash user_hash_tab;
-       struct list_head owner_items;
-};
-
-/*
- * Take down a memory manager. This function should only be called after a
- * successful init and after a call to drm_sman_cleanup.
- */
-
-extern void drm_sman_takedown(struct drm_sman * sman);
-
-/*
- * Allocate structures for a manager.
- * num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
- * user_order is the log2 of the number of buckets in the user hash table.
- *         set this to approximately log2 of the max number of memory regions
- *         that will be allocated for _all_ pools together.
- * owner_order is the log2 of the number of buckets in the owner hash table.
- *         set this to approximately log2 of
- *         the number of client file connections that will
- *         be using the manager.
- *
- */
-
-extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
-                        unsigned int user_order, unsigned int owner_order);
-
-/*
- * Initialize a drm_mm.c allocator. Should be called only once for each
- * manager unless a customized allogator is used.
- */
-
-extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
-                             unsigned long start, unsigned long size);
-
-/*
- * Initialize a customized allocator for one of the managers.
- * (See the SiS module). The object pointed to by "allocator" is copied,
- * so it can be destroyed after this call.
- */
-
-extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
-                               struct drm_sman_mm * allocator);
-
-/*
- * Allocate a memory block. Aligment is not implemented yet.
- */
-
-extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
-                                               unsigned int manager,
-                                               unsigned long size,
-                                               unsigned alignment,
-                                               unsigned long owner);
-/*
- * Free a memory block identified by its user hash key.
- */
-
-extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
-
-/*
- * returns 1 iff there are no stale memory blocks associated with this owner.
- * Typically called to determine if we need to idle the hardware and call
- * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
- * resources associated with owner.
- */
-
-extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
-
-/*
- * Frees all stale memory blocks associated with this owner. Note that this
- * requires that the hardware is finished with all blocks, so the graphics engine
- * should be idled before this call is made. This function also frees
- * any resources associated with "owner" and should be called when owner
- * is not going to be referenced anymore.
- */
-
-extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
-
-/*
- * Frees all stale memory blocks associated with the memory manager.
- * See idling above.
- */
-
-extern void drm_sman_cleanup(struct drm_sman * sman);
-
-#endif
index 12050434d57a248909d695a80445e958dea81e32..5e120f1c5cd927ec154ef1bc5d59ae207f62d92e 100644 (file)
@@ -74,9 +74,16 @@ struct drm_exynos_gem_mmap {
        uint64_t mapped;
 };
 
+struct drm_exynos_plane_set_zpos {
+       __u32 plane_id;
+       __s32 zpos;
+};
+
 #define DRM_EXYNOS_GEM_CREATE          0x00
 #define DRM_EXYNOS_GEM_MAP_OFFSET      0x01
 #define DRM_EXYNOS_GEM_MMAP            0x02
+/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
+#define DRM_EXYNOS_PLANE_SET_ZPOS      0x06
 
 #define DRM_IOCTL_EXYNOS_GEM_CREATE            DRM_IOWR(DRM_COMMAND_BASE + \
                DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
@@ -87,6 +94,9 @@ struct drm_exynos_gem_mmap {
 #define DRM_IOCTL_EXYNOS_GEM_MMAP      DRM_IOWR(DRM_COMMAND_BASE + \
                DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap)
 
+#define DRM_IOCTL_EXYNOS_PLANE_SET_ZPOS        DRM_IOWR(DRM_COMMAND_BASE + \
+               DRM_EXYNOS_PLANE_SET_ZPOS, struct drm_exynos_plane_set_zpos)
+
 /**
  * Platform Specific Structure for DRM based FIMD.
  *
@@ -102,4 +112,31 @@ struct exynos_drm_fimd_pdata {
        unsigned int                    bpp;
 };
 
+/**
+ * Platform Specific Structure for DRM based HDMI.
+ *
+ * @hdmi_dev: device point to specific hdmi driver.
+ * @mixer_dev: device point to specific mixer driver.
+ *
+ * this structure is used for common hdmi driver and each device object
+ * would be used to access specific device driver(hdmi or mixer driver)
+ */
+struct exynos_drm_common_hdmi_pd {
+       struct device *hdmi_dev;
+       struct device *mixer_dev;
+};
+
+/**
+ * Platform Specific Structure for DRM based HDMI core.
+ *
+ * @timing: default video mode for initializing
+ * @default_win: default window layer number to be used for UI.
+ * @bpp: default bit per pixel.
+ */
+struct exynos_drm_hdmi_pdata {
+       struct fb_videomode             timing;
+       unsigned int                    default_win;
+       unsigned int                    bpp;
+};
+
 #endif
diff --git a/include/drm/gma_drm.h b/include/drm/gma_drm.h
new file mode 100644 (file)
index 0000000..1136867
--- /dev/null
@@ -0,0 +1,91 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ * Copyright (c) 2008, Tungsten Graphics Inc.  Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_DRM_H_
+#define _PSB_DRM_H_
+
+/*
+ *     Manage the LUT for an output
+ */
+struct drm_psb_dpst_lut_arg {
+       uint8_t lut[256];
+       int output_id;
+};
+
+/*
+ *     Validate modes
+ */
+struct drm_psb_mode_operation_arg {
+       u32 obj_id;
+       u16 operation;
+       struct drm_mode_modeinfo mode;
+       u64 data;
+};
+
+/*
+ *     Query the stolen memory for smarter management of
+ *     memory by the server
+ */
+struct drm_psb_stolen_memory_arg {
+       u32 base;
+       u32 size;
+};
+
+struct drm_psb_get_pipe_from_crtc_id_arg {
+       /** ID of CRTC being requested **/
+       u32 crtc_id;
+       /** pipe of requested CRTC **/
+       u32 pipe;
+};
+
+struct drm_psb_gem_create {
+       __u64 size;
+       __u32 handle;
+       __u32 flags;
+#define GMA_GEM_CREATE_STOLEN          1       /* Stolen memory can be used */
+};
+
+struct drm_psb_gem_mmap {
+       __u32 handle;
+       __u32 pad;
+       /**
+        * Fake offset to use for subsequent mmap call
+        *
+        * This is a fixed-size type for 32/64 compatibility.
+        */
+       __u64 offset;
+};
+
+/* Controlling the kernel modesetting buffers */
+
+#define DRM_GMA_GEM_CREATE     0x00            /* Create a GEM object */
+#define DRM_GMA_GEM_MMAP       0x01            /* Map GEM memory */
+#define DRM_GMA_STOLEN_MEMORY  0x02            /* Report stolen memory */
+#define DRM_GMA_2D_OP          0x03            /* Will be merged later */
+#define DRM_GMA_GAMMA          0x04            /* Set gamma table */
+#define DRM_GMA_ADB            0x05            /* Get backlight */
+#define DRM_GMA_DPST_BL                0x06            /* Set backlight */
+#define DRM_GMA_GET_PIPE_FROM_CRTC_ID 0x1      /* CRTC to physical pipe# */
+#define DRM_GMA_MODE_OPERATION 0x07            /* Mode validation/DC set */
+#define        PSB_MODE_OPERATION_MODE_VALID   0x01
+
+
+#endif
index 28c0d114cb52dfaebd9756eec62187e3df56491f..924f6a454fed97d9b84dbbbbdfc32751b82759db 100644 (file)
@@ -198,6 +198,8 @@ typedef struct _drm_i915_sarea {
 #define DRM_I915_OVERLAY_PUT_IMAGE     0x27
 #define DRM_I915_OVERLAY_ATTRS 0x28
 #define DRM_I915_GEM_EXECBUFFER2       0x29
+#define DRM_I915_GET_SPRITE_COLORKEY   0x2a
+#define DRM_I915_SET_SPRITE_COLORKEY   0x2b
 
 #define DRM_IOCTL_I915_INIT            DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH           DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -239,6 +241,8 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_GEM_MADVISE     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
 #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE       DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
 #define DRM_IOCTL_I915_OVERLAY_ATTRS   DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
+#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
@@ -291,6 +295,7 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_COHERENT_RINGS   13
 #define I915_PARAM_HAS_EXEC_CONSTANTS   14
 #define I915_PARAM_HAS_RELAXED_DELTA    15
+#define I915_PARAM_HAS_GEN7_SOL_RESET   16
 
 typedef struct drm_i915_getparam {
        int param;
@@ -653,6 +658,9 @@ struct drm_i915_gem_execbuffer2 {
        __u64 rsvd2;
 };
 
+/** Resets the SO write offset registers for transform feedback on gen7. */
+#define I915_EXEC_GEN7_SOL_RESET       (1<<8)
+
 struct drm_i915_gem_pin {
        /** Handle of the buffer to be pinned. */
        __u32 handle;
@@ -844,4 +852,36 @@ struct drm_intel_overlay_attrs {
        __u32 gamma5;
 };
 
+/*
+ * Intel sprite handling
+ *
+ * Color keying works with a min/mask/max tuple.  Both source and destination
+ * color keying is allowed.
+ *
+ * Source keying:
+ * Sprite pixels within the min & max values, masked against the color channels
+ * specified in the mask field, will be transparent.  All other pixels will
+ * be displayed on top of the primary plane.  For RGB surfaces, only the min
+ * and mask fields will be used; ranged compares are not allowed.
+ *
+ * Destination keying:
+ * Primary plane pixels that match the min value, masked against the color
+ * channels specified in the mask field, will be replaced by corresponding
+ * pixels from the sprite plane.
+ *
+ * Note that source & destination keying are exclusive; only one can be
+ * active on a given plane.
+ */
+
+#define I915_SET_COLORKEY_NONE         (1<<0) /* disable color key matching */
+#define I915_SET_COLORKEY_DESTINATION  (1<<1)
+#define I915_SET_COLORKEY_SOURCE       (1<<2)
+struct drm_intel_sprite_colorkey {
+       __u32 plane_id;
+       __u32 min_value;
+       __u32 channel_mask;
+       __u32 max_value;
+       __u32 flags;
+};
+
 #endif                         /* _I915_DRM_H_ */
index be94be6d6f17afdd027b3cf61679c73651ac6f9e..b55da40953fda7ca9cc9b9823ebe21b4430f1f19 100644 (file)
@@ -509,6 +509,7 @@ typedef struct {
 #define DRM_RADEON_GEM_SET_TILING      0x28
 #define DRM_RADEON_GEM_GET_TILING      0x29
 #define DRM_RADEON_GEM_BUSY            0x2a
+#define DRM_RADEON_GEM_VA              0x2b
 
 #define DRM_IOCTL_RADEON_CP_INIT    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
 #define DRM_IOCTL_RADEON_CP_START   DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@@ -550,6 +551,7 @@ typedef struct {
 #define DRM_IOCTL_RADEON_GEM_SET_TILING        DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
 #define DRM_IOCTL_RADEON_GEM_GET_TILING        DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
 #define DRM_IOCTL_RADEON_GEM_BUSY      DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
+#define DRM_IOCTL_RADEON_GEM_VA                DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va)
 
 typedef struct drm_radeon_init {
        enum {
@@ -872,12 +874,39 @@ struct drm_radeon_gem_pwrite {
        uint64_t data_ptr;
 };
 
+#define RADEON_VA_MAP                  1
+#define RADEON_VA_UNMAP                        2
+
+#define RADEON_VA_RESULT_OK            0
+#define RADEON_VA_RESULT_ERROR         1
+#define RADEON_VA_RESULT_VA_EXIST      2
+
+#define RADEON_VM_PAGE_VALID           (1 << 0)
+#define RADEON_VM_PAGE_READABLE                (1 << 1)
+#define RADEON_VM_PAGE_WRITEABLE       (1 << 2)
+#define RADEON_VM_PAGE_SYSTEM          (1 << 3)
+#define RADEON_VM_PAGE_SNOOPED         (1 << 4)
+
+struct drm_radeon_gem_va {
+       uint32_t                handle;
+       uint32_t                operation;
+       uint32_t                vm_id;
+       uint32_t                flags;
+       uint64_t                offset;
+};
+
 #define RADEON_CHUNK_ID_RELOCS 0x01
 #define RADEON_CHUNK_ID_IB     0x02
 #define RADEON_CHUNK_ID_FLAGS  0x03
 
 /* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
 #define RADEON_CS_KEEP_TILING_FLAGS 0x01
+#define RADEON_CS_USE_VM            0x02
+/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
+#define RADEON_CS_RING_GFX          0
+#define RADEON_CS_RING_COMPUTE      1
+/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */
+/* 0 = normal, + = higher priority, - = lower priority */
 
 struct drm_radeon_cs_chunk {
        uint32_t                chunk_id;
@@ -885,6 +914,9 @@ struct drm_radeon_cs_chunk {
        uint64_t                chunk_data;
 };
 
+/* drm_radeon_cs_reloc.flags */
+#define RADEON_RELOC_DONT_SYNC         0x01
+
 struct drm_radeon_cs_reloc {
        uint32_t                handle;
        uint32_t                read_domains;
@@ -916,6 +948,10 @@ struct drm_radeon_cs {
 #define RADEON_INFO_NUM_TILE_PIPES     0x0b /* tile pipes for r600+ */
 #define RADEON_INFO_FUSION_GART_WORKING        0x0c /* fusion writes to GTT were broken before this */
 #define RADEON_INFO_BACKEND_MAP                0x0d /* pipe to backend map, needed by mesa */
+/* virtual address start, va < start are reserved by the kernel */
+#define RADEON_INFO_VA_START           0x0e
+/* maximum size of ib using the virtual memory cs */
+#define RADEON_INFO_IB_VM_MAX_SIZE     0x0f
 
 struct drm_radeon_info {
        uint32_t                request;
index 30f7b38274668c0fa766cad45bee6256425968f3..035b804dda6de514e629276cab0bee555d7110ed 100644 (file)
@@ -64,4 +64,8 @@ typedef struct {
        unsigned int offset, size;
 } drm_sis_fb_t;
 
+struct sis_file_private {
+       struct list_head obj_list;
+};
+
 #endif                         /* __SIS_DRM_H__ */
index 42e34698518643d346e1b23c6aa08640f93eabe4..974c8f801c39eb665b35de3f5a37d1632b607b88 100644 (file)
@@ -122,17 +122,12 @@ struct ttm_mem_reg {
  * be mmapped by user space. Each of these bos occupy a slot in the
  * device address space, that can be used for normal vm operations.
  *
- * @ttm_bo_type_user: These are user-space memory areas that are made
- * available to the GPU by mapping the buffer pages into the GPU aperture
- * space. These buffers cannot be mmaped from the device address space.
- *
  * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
  * but they cannot be accessed from user-space. For kernel-only use.
  */
 
 enum ttm_bo_type {
        ttm_bo_type_device,
-       ttm_bo_type_user,
        ttm_bo_type_kernel
 };
 
@@ -434,9 +429,9 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
  * -EBUSY if the buffer is busy and no_wait is true.
  * -ERESTARTSYS if interrupted by a signal.
  */
-
 extern int
 ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
+
 /**
  * ttm_bo_synccpu_write_release:
  *
@@ -446,6 +441,22 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
  */
 extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
 
+/**
+ * ttm_bo_acc_size
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo_size: size of the buffer object in byte.
+ * @struct_size: size of the structure holding buffer object datas
+ *
+ * Returns size to account for a buffer object
+ */
+size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
+                      unsigned long bo_size,
+                      unsigned struct_size);
+size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
+                          unsigned long bo_size,
+                          unsigned struct_size);
+
 /**
  * ttm_bo_init
  *
@@ -493,6 +504,7 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
                        struct file *persistent_swap_storage,
                        size_t acc_size,
                        void (*destroy) (struct ttm_buffer_object *));
+
 /**
  * ttm_bo_synccpu_object_init
  *
index 94eb1434316e5314237a1bc5dafb54ab550b9aa1..d43e892307ff63a286c1a142a876cf8bb4d7985d 100644 (file)
 struct ttm_backend;
 
 struct ttm_backend_func {
-       /**
-        * struct ttm_backend_func member populate
-        *
-        * @backend: Pointer to a struct ttm_backend.
-        * @num_pages: Number of pages to populate.
-        * @pages: Array of pointers to ttm pages.
-        * @dummy_read_page: Page to be used instead of NULL pages in the
-        * array @pages.
-        * @dma_addrs: Array of DMA (bus) address of the ttm pages.
-        *
-        * Populate the backend with ttm pages. Depending on the backend,
-        * it may or may not copy the @pages array.
-        */
-       int (*populate) (struct ttm_backend *backend,
-                        unsigned long num_pages, struct page **pages,
-                        struct page *dummy_read_page,
-                        dma_addr_t *dma_addrs);
-       /**
-        * struct ttm_backend_func member clear
-        *
-        * @backend: Pointer to a struct ttm_backend.
-        *
-        * This is an "unpopulate" function. Release all resources
-        * allocated with populate.
-        */
-       void (*clear) (struct ttm_backend *backend);
-
        /**
         * struct ttm_backend_func member bind
         *
-        * @backend: Pointer to a struct ttm_backend.
+        * @ttm: Pointer to a struct ttm_tt.
         * @bo_mem: Pointer to a struct ttm_mem_reg describing the
         * memory type and location for binding.
         *
@@ -80,46 +53,29 @@ struct ttm_backend_func {
         * indicated by @bo_mem. This function should be able to handle
         * differences between aperture and system page sizes.
         */
-       int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
+       int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
 
        /**
         * struct ttm_backend_func member unbind
         *
-        * @backend: Pointer to a struct ttm_backend.
+        * @ttm: Pointer to a struct ttm_tt.
         *
         * Unbind previously bound backend pages. This function should be
         * able to handle differences between aperture and system page sizes.
         */
-       int (*unbind) (struct ttm_backend *backend);
+       int (*unbind) (struct ttm_tt *ttm);
 
        /**
         * struct ttm_backend_func member destroy
         *
-        * @backend: Pointer to a struct ttm_backend.
+        * @ttm: Pointer to a struct ttm_tt.
         *
-        * Destroy the backend.
+        * Destroy the backend. This will be call back from ttm_tt_destroy so
+        * don't call ttm_tt_destroy from the callback or infinite loop.
         */
-       void (*destroy) (struct ttm_backend *backend);
-};
-
-/**
- * struct ttm_backend
- *
- * @bdev: Pointer to a struct ttm_bo_device.
- * @flags: For driver use.
- * @func: Pointer to a struct ttm_backend_func that describes
- * the backend methods.
- *
- */
-
-struct ttm_backend {
-       struct ttm_bo_device *bdev;
-       uint32_t flags;
-       struct ttm_backend_func *func;
+       void (*destroy) (struct ttm_tt *ttm);
 };
 
-#define TTM_PAGE_FLAG_USER            (1 << 1)
-#define TTM_PAGE_FLAG_USER_DIRTY      (1 << 2)
 #define TTM_PAGE_FLAG_WRITE           (1 << 3)
 #define TTM_PAGE_FLAG_SWAPPED         (1 << 4)
 #define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
@@ -135,23 +91,18 @@ enum ttm_caching_state {
 /**
  * struct ttm_tt
  *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @func: Pointer to a struct ttm_backend_func that describes
+ * the backend methods.
  * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
  * pointer.
  * @pages: Array of pages backing the data.
- * @first_himem_page: Himem pages are put last in the page array, which
- * enables us to run caching attribute changes on only the first part
- * of the page array containing lomem pages. This is the index of the
- * first himem page.
- * @last_lomem_page: Index of the last lomem page in the page array.
  * @num_pages: Number of pages in the page array.
  * @bdev: Pointer to the current struct ttm_bo_device.
  * @be: Pointer to the ttm backend.
- * @tsk: The task for user ttm.
- * @start: virtual address for user ttm.
  * @swap_storage: Pointer to shmem struct file for swap storage.
  * @caching_state: The current caching state of the pages.
  * @state: The current binding state of the pages.
- * @dma_address: The DMA (bus) addresses of the pages (if TTM_PAGE_FLAG_DMA32)
  *
  * This is a structure holding the pages, caching- and aperture binding
  * status for a buffer object that isn't backed by fixed (VRAM / AGP)
@@ -159,16 +110,14 @@ enum ttm_caching_state {
  */
 
 struct ttm_tt {
+       struct ttm_bo_device *bdev;
+       struct ttm_backend_func *func;
        struct page *dummy_read_page;
        struct page **pages;
-       long first_himem_page;
-       long last_lomem_page;
        uint32_t page_flags;
        unsigned long num_pages;
        struct ttm_bo_global *glob;
        struct ttm_backend *be;
-       struct task_struct *tsk;
-       unsigned long start;
        struct file *swap_storage;
        enum ttm_caching_state caching_state;
        enum {
@@ -176,7 +125,23 @@ struct ttm_tt {
                tt_unbound,
                tt_unpopulated,
        } state;
+};
+
+/**
+ * struct ttm_dma_tt
+ *
+ * @ttm: Base ttm_tt struct.
+ * @dma_address: The DMA (bus) addresses of the pages
+ * @pages_list: used by some page allocation backend
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+struct ttm_dma_tt {
+       struct ttm_tt ttm;
        dma_addr_t *dma_address;
+       struct list_head pages_list;
 };
 
 #define TTM_MEMTYPE_FLAG_FIXED         (1 << 0)        /* Fixed (on-card) PCI memory */
@@ -351,15 +316,42 @@ struct ttm_mem_type_manager {
 
 struct ttm_bo_driver {
        /**
-        * struct ttm_bo_driver member create_ttm_backend_entry
+        * ttm_tt_create
         *
-        * @bdev: The buffer object device.
+        * @bdev: pointer to a struct ttm_bo_device:
+        * @size: Size of the data needed backing.
+        * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+        * @dummy_read_page: See struct ttm_bo_device.
         *
-        * Create a driver specific struct ttm_backend.
+        * Create a struct ttm_tt to back data with system memory pages.
+        * No pages are actually allocated.
+        * Returns:
+        * NULL: Out of memory.
         */
+       struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
+                                       unsigned long size,
+                                       uint32_t page_flags,
+                                       struct page *dummy_read_page);
 
-       struct ttm_backend *(*create_ttm_backend_entry)
-        (struct ttm_bo_device *bdev);
+       /**
+        * ttm_tt_populate
+        *
+        * @ttm: The struct ttm_tt to contain the backing pages.
+        *
+        * Allocate all backing pages
+        * Returns:
+        * -ENOMEM: Out of memory.
+        */
+       int (*ttm_tt_populate)(struct ttm_tt *ttm);
+
+       /**
+        * ttm_tt_unpopulate
+        *
+        * @ttm: The struct ttm_tt to contain the backing pages.
+        *
+        * Free all backing page
+        */
+       void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
 
        /**
         * struct ttm_bo_driver member invalidate_caches
@@ -477,9 +469,6 @@ struct ttm_bo_global_ref {
  * @dummy_read_page: Pointer to a dummy page used for mapping requests
  * of unpopulated pages.
  * @shrink: A shrink callback object used for buffer object swap.
- * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
- * used by a buffer object. This is excluding page arrays and backing pages.
- * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
  * @device_list_mutex: Mutex protecting the device list.
  * This mutex is held while traversing the device list for pm options.
  * @lru_lock: Spinlock protecting the bo subsystem lru lists.
@@ -497,8 +486,6 @@ struct ttm_bo_global {
        struct ttm_mem_global *mem_glob;
        struct page *dummy_read_page;
        struct ttm_mem_shrink shrink;
-       size_t ttm_bo_extra_size;
-       size_t ttm_bo_size;
        struct mutex device_list_mutex;
        spinlock_t lru_lock;
 
@@ -600,8 +587,9 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
 }
 
 /**
- * ttm_tt_create
+ * ttm_tt_init
  *
+ * @ttm: The struct ttm_tt.
  * @bdev: pointer to a struct ttm_bo_device:
  * @size: Size of the data needed backing.
  * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
@@ -612,28 +600,22 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
  * Returns:
  * NULL: Out of memory.
  */
-extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
-                                   unsigned long size,
-                                   uint32_t page_flags,
-                                   struct page *dummy_read_page);
+extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+                       unsigned long size, uint32_t page_flags,
+                       struct page *dummy_read_page);
+extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+                          unsigned long size, uint32_t page_flags,
+                          struct page *dummy_read_page);
 
 /**
- * ttm_tt_set_user:
+ * ttm_tt_fini
  *
- * @ttm: The struct ttm_tt to populate.
- * @tsk: A struct task_struct for which @start is a valid user-space address.
- * @start: A valid user-space address.
- * @num_pages: Size in pages of the user memory area.
+ * @ttm: the ttm_tt structure.
  *
- * Populate a struct ttm_tt with a user-space memory area after first pinning
- * the pages backing it.
- * Returns:
- * !0: Error.
+ * Free memory of ttm_tt structure
  */
-
-extern int ttm_tt_set_user(struct ttm_tt *ttm,
-                          struct task_struct *tsk,
-                          unsigned long start, unsigned long num_pages);
+extern void ttm_tt_fini(struct ttm_tt *ttm);
+extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
 
 /**
  * ttm_ttm_bind:
@@ -645,21 +627,12 @@ extern int ttm_tt_set_user(struct ttm_tt *ttm,
  */
 extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
 
-/**
- * ttm_tt_populate:
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Add backing pages to all of @ttm
- */
-extern int ttm_tt_populate(struct ttm_tt *ttm);
-
 /**
  * ttm_ttm_destroy:
  *
  * @ttm: The struct ttm_tt.
  *
- * Unbind, unpopulate and destroy a struct ttm_tt.
+ * Unbind, unpopulate and destroy common struct ttm_tt.
  */
 extern void ttm_tt_destroy(struct ttm_tt *ttm);
 
@@ -673,19 +646,13 @@ extern void ttm_tt_destroy(struct ttm_tt *ttm);
 extern void ttm_tt_unbind(struct ttm_tt *ttm);
 
 /**
- * ttm_ttm_destroy:
+ * ttm_tt_swapin:
  *
  * @ttm: The struct ttm_tt.
- * @index: Index of the desired page.
- *
- * Return a pointer to the struct page backing @ttm at page
- * index @index. If the page is unpopulated, one will be allocated to
- * populate that index.
  *
- * Returns:
- * NULL on OOM.
+ * Swap in a previously swap out ttm_tt.
  */
-extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
+extern int ttm_tt_swapin(struct ttm_tt *ttm);
 
 /**
  * ttm_tt_cache_flush:
@@ -1046,17 +1013,25 @@ extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
 #include <linux/agp_backend.h>
 
 /**
- * ttm_agp_backend_init
+ * ttm_agp_tt_create
  *
  * @bdev: Pointer to a struct ttm_bo_device.
  * @bridge: The agp bridge this device is sitting on.
+ * @size: Size of the data needed backing.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @dummy_read_page: See struct ttm_bo_device.
+ *
  *
  * Create a TTM backend that uses the indicated AGP bridge as an aperture
  * for TT memory. This function uses the linux agpgart interface to
  * bind and unbind memory backing a ttm_tt.
  */
-extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
-                                               struct agp_bridge_data *bridge);
+extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+                                       struct agp_bridge_data *bridge,
+                                       unsigned long size, uint32_t page_flags,
+                                       struct page *dummy_read_page);
+int ttm_agp_tt_populate(struct ttm_tt *ttm);
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
 #endif
 
 #endif
index 129de12353f1914161273e1b7338b24541cd1fa7..5fe27400d176cb16f14adbe6b7d7a584e167c0be 100644 (file)
 #include "ttm_memory.h"
 
 /**
- * Get count number of pages from pool to pages list.
+ * Initialize pool allocator.
+ */
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+/**
+ * Free pool allocator.
+ */
+void ttm_page_alloc_fini(void);
+
+/**
+ * ttm_pool_populate:
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
  *
- * @pages: head of empty linked list where pages are filled.
- * @flags: ttm flags for page allocation.
- * @cstate: ttm caching state for the page.
- * @count: number of pages to allocate.
- * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
+ * Add backing pages to all of @ttm
  */
-int ttm_get_pages(struct list_head *pages,
-                 int flags,
-                 enum ttm_caching_state cstate,
-                 unsigned count,
-                 dma_addr_t *dma_address);
+extern int ttm_pool_populate(struct ttm_tt *ttm);
+
 /**
- * Put linked list of pages to pool.
+ * ttm_pool_unpopulate:
+ *
+ * @ttm: The struct ttm_tt which to free backing pages.
  *
- * @pages: list of pages to free.
- * @page_count: number of pages in the list. Zero can be passed for unknown
- * count.
- * @flags: ttm flags for page allocation.
- * @cstate: ttm caching state.
- * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
+ * Free all pages of @ttm
  */
-void ttm_put_pages(struct list_head *pages,
-                  unsigned page_count,
-                  int flags,
-                  enum ttm_caching_state cstate,
-                  dma_addr_t *dma_address);
+extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
+
+/**
+ * Output the state of pools to debugfs file
+ */
+extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
+
+
+#ifdef CONFIG_SWIOTLB
 /**
  * Initialize pool allocator.
  */
-int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+
 /**
  * Free pool allocator.
  */
-void ttm_page_alloc_fini(void);
+void ttm_dma_page_alloc_fini(void);
 
 /**
  * Output the state of pools to debugfs file
  */
-extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
+extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
+
+extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+
+#else
+static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
+                                         unsigned max_pages)
+{
+       return -ENODEV;
+}
+
+static inline void ttm_dma_page_alloc_fini(void) { return; }
+
+static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+       return 0;
+}
+#endif
+
 #endif
index fd11a5bd892d6459fb9a96866d2c8fe15fd9884f..79b3b6e0f6b30011d2b733e61466ee64d62c4c00 100644 (file)
@@ -274,4 +274,8 @@ typedef struct drm_via_dmablit {
        drm_via_blitsync_t sync;
 } drm_via_dmablit_t;
 
+struct via_file_private {
+       struct list_head obj_list;
+};
+
 #endif                         /* _VIA_DRM_H_ */
index 6001b4da39ddc9ebd52e0f7691268abbe8ebb69d..627a3a42e4d8e3ba015ad32a6bf01f779c356943 100644 (file)
@@ -302,6 +302,10 @@ extern bool osc_sb_apei_support_acked;
                                OSC_PCI_EXPRESS_PME_CONTROL |           \
                                OSC_PCI_EXPRESS_AER_CONTROL |           \
                                OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
+
+#define OSC_PCI_NATIVE_HOTPLUG (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |    \
+                               OSC_SHPC_NATIVE_HP_CONTROL)
+
 extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
                                             u32 *mask, u32 req);
 extern void acpi_early_init(void);
index be3d9a77d6edd66cbf0208bf9145490a90981129..73a25005d88aa7cc76f7e8049b9697be18955c39 100644 (file)
@@ -23,6 +23,8 @@ struct ata_port_info;
 struct ahci_platform_data {
        int (*init)(struct device *dev, void __iomem *addr);
        void (*exit)(struct device *dev);
+       int (*suspend)(struct device *dev);
+       int (*resume)(struct device *dev);
        const struct ata_port_info *ata_port_info;
        unsigned int force_port_map;
        unsigned int mask_port_map;
index a6863a2dec1f6883b6be0b3fd68ca7f6d8982d83..ef00610837d4f9e1b556028c151df0118b89677c 100644 (file)
 #ifndef _ASM_X86_AMD_IOMMU_H
 #define _ASM_X86_AMD_IOMMU_H
 
-#include <linux/irqreturn.h>
+#include <linux/types.h>
 
 #ifdef CONFIG_AMD_IOMMU
 
+struct task_struct;
+struct pci_dev;
+
 extern int amd_iommu_detect(void);
 
+
+/**
+ * amd_iommu_enable_device_erratum() - Enable erratum workaround for device
+ *                                    in the IOMMUv2 driver
+ * @pdev: The PCI device the workaround is necessary for
+ * @erratum: The erratum workaround to enable
+ *
+ * The function needs to be called before amd_iommu_init_device().
+ * Possible values for the erratum number are for now:
+ * - AMD_PRI_DEV_ERRATUM_ENABLE_RESET - Reset PRI capability when PRI
+ *                                     is enabled
+ * - AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE - Limit number of outstanding PRI
+ *                                      requests to one
+ */
+#define AMD_PRI_DEV_ERRATUM_ENABLE_RESET               0
+#define AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE              1
+
+extern void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum);
+
+/**
+ * amd_iommu_init_device() - Init device for use with IOMMUv2 driver
+ * @pdev: The PCI device to initialize
+ * @pasids: Number of PASIDs to support for this device
+ *
+ * This function does all setup for the device pdev so that it can be
+ * used with IOMMUv2.
+ * Returns 0 on success or negative value on error.
+ */
+extern int amd_iommu_init_device(struct pci_dev *pdev, int pasids);
+
+/**
+ * amd_iommu_free_device() - Free all IOMMUv2 related device resources
+ *                          and disable IOMMUv2 usage for this device
+ * @pdev: The PCI device to disable IOMMUv2 usage for'
+ */
+extern void amd_iommu_free_device(struct pci_dev *pdev);
+
+/**
+ * amd_iommu_bind_pasid() - Bind a given task to a PASID on a device
+ * @pdev: The PCI device to bind the task to
+ * @pasid: The PASID on the device the task should be bound to
+ * @task: the task to bind
+ *
+ * The function returns 0 on success or a negative value on error.
+ */
+extern int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
+                               struct task_struct *task);
+
+/**
+ * amd_iommu_unbind_pasid() - Unbind a PASID from its task on
+ *                           a device
+ * @pdev: The device of the PASID
+ * @pasid: The PASID to unbind
+ *
+ * When this function returns the device is no longer using the PASID
+ * and the PASID is no longer bound to its task.
+ */
+extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid);
+
+/**
+ * amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed
+ *                                 PRI requests
+ * @pdev: The PCI device the call-back should be registered for
+ * @cb: The call-back function
+ *
+ * The IOMMUv2 driver invokes this call-back when it is unable to
+ * successfully handle a PRI request. The device driver can then decide
+ * which PRI response the device should see. Possible return values for
+ * the call-back are:
+ *
+ * - AMD_IOMMU_INV_PRI_RSP_SUCCESS - Send SUCCESS back to the device
+ * - AMD_IOMMU_INV_PRI_RSP_INVALID - Send INVALID back to the device
+ * - AMD_IOMMU_INV_PRI_RSP_FAIL    - Send Failure back to the device,
+ *                                  the device is required to disable
+ *                                  PRI when it receives this response
+ *
+ * The function returns 0 on success or negative value on error.
+ */
+#define AMD_IOMMU_INV_PRI_RSP_SUCCESS  0
+#define AMD_IOMMU_INV_PRI_RSP_INVALID  1
+#define AMD_IOMMU_INV_PRI_RSP_FAIL     2
+
+typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev,
+                                       int pasid,
+                                       unsigned long address,
+                                       u16);
+
+extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
+                                       amd_iommu_invalid_ppr_cb cb);
+
+/**
+ * amd_iommu_device_info() - Get information about IOMMUv2 support of a
+ *                          PCI device
+ * @pdev: PCI device to query information from
+ * @info: A pointer to an amd_iommu_device_info structure which will contain
+ *       the information about the PCI device
+ *
+ * Returns 0 on success, negative value on error
+ */
+
+#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP     0x1    /* ATS feature supported */
+#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP     0x2    /* PRI feature supported */
+#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP   0x4    /* PASID context supported */
+#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP    0x8    /* Device may request execution
+                                                   on memory pages */
+#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP   0x10    /* Device may request
+                                                   super-user privileges */
+
+struct amd_iommu_device_info {
+       int max_pasids;
+       u32 flags;
+};
+
+extern int amd_iommu_device_info(struct pci_dev *pdev,
+                                struct amd_iommu_device_info *info);
+
+/**
+ * amd_iommu_set_invalidate_ctx_cb() - Register a call-back for invalidating
+ *                                    a pasid context. This call-back is
+ *                                    invoked when the IOMMUv2 driver needs to
+ *                                    invalidate a PASID context, for example
+ *                                    because the task that is bound to that
+ *                                    context is about to exit.
+ *
+ * @pdev: The PCI device the call-back should be registered for
+ * @cb: The call-back function
+ */
+
+typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid);
+
+extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
+                                          amd_iommu_invalidate_ctx cb);
+
 #else
 
 static inline int amd_iommu_detect(void) { return -ENODEV; }
index dfadc96e9d63851c334885f0ec7761c8d6cfe3ff..2f4079175afb81f80f641e28da9b1145b5415bae 100644 (file)
@@ -29,6 +29,7 @@
    the kernel context */
 #define __cold                 __attribute__((__cold__))
 
+#define __linktime_error(message) __attribute__((__error__(message)))
 
 #if __GNUC_MINOR__ >= 5
 /*
index 320d6c94ff848d5db94fb1fd76576501a88e9a3a..4a243546d142b3e0264b115332bf73e354aa0fdf 100644 (file)
@@ -293,7 +293,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
 #ifndef __compiletime_error
 # define __compiletime_error(message)
 #endif
-
+#ifndef __linktime_error
+# define __linktime_error(message)
+#endif
 /*
  * Prevent the compiler from merging or refetching accesses.  The compiler
  * is also forbidden from reordering successive instances of ACCESS_ONCE(),
index d4e02f5353a080ae03bf2233eea266dcad5c36ba..6c7f6e9546c7fde54a88228ebc52144e6bd27692 100644 (file)
@@ -2,7 +2,7 @@
 *******************************************************************************
 **
 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
-**  Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
+**  Copyright (C) 2004-2011 Red Hat, Inc.  All rights reserved.
 **
 **  This copyrighted material is made available to anyone wishing to use,
 **  modify, copy, or redistribute it subject to the terms and conditions
@@ -74,15 +74,76 @@ struct dlm_lksb {
 
 #ifdef __KERNEL__
 
+struct dlm_slot {
+       int nodeid; /* 1 to MAX_INT */
+       int slot;   /* 1 to MAX_INT */
+};
+
+/*
+ * recover_prep: called before the dlm begins lock recovery.
+ *   Notfies lockspace user that locks from failed members will be granted.
+ * recover_slot: called after recover_prep and before recover_done.
+ *   Identifies a failed lockspace member.
+ * recover_done: called after the dlm completes lock recovery.
+ *   Identifies lockspace members and lockspace generation number.
+ */
+
+struct dlm_lockspace_ops {
+       void (*recover_prep) (void *ops_arg);
+       void (*recover_slot) (void *ops_arg, struct dlm_slot *slot);
+       void (*recover_done) (void *ops_arg, struct dlm_slot *slots,
+                             int num_slots, int our_slot, uint32_t generation);
+};
+
 /*
  * dlm_new_lockspace
  *
- * Starts a lockspace with the given name.  If the named lockspace exists in
- * the cluster, the calling node joins it.
+ * Create/join a lockspace.
+ *
+ * name: lockspace name, null terminated, up to DLM_LOCKSPACE_LEN (not
+ *   including terminating null).
+ *
+ * cluster: cluster name, null terminated, up to DLM_LOCKSPACE_LEN (not
+ *   including terminating null).  Optional.  When cluster is null, it
+ *   is not used.  When set, dlm_new_lockspace() returns -EBADR if cluster
+ *   is not equal to the dlm cluster name.
+ *
+ * flags:
+ * DLM_LSFL_NODIR
+ *   The dlm should not use a resource directory, but statically assign
+ *   resource mastery to nodes based on the name hash that is otherwise
+ *   used to select the directory node.  Must be the same on all nodes.
+ * DLM_LSFL_TIMEWARN
+ *   The dlm should emit netlink messages if locks have been waiting
+ *   for a configurable amount of time.  (Unused.)
+ * DLM_LSFL_FS
+ *   The lockspace user is in the kernel (i.e. filesystem).  Enables
+ *   direct bast/cast callbacks.
+ * DLM_LSFL_NEWEXCL
+ *   dlm_new_lockspace() should return -EEXIST if the lockspace exists.
+ *
+ * lvblen: length of lvb in bytes.  Must be multiple of 8.
+ *   dlm_new_lockspace() returns an error if this does not match
+ *   what other nodes are using.
+ *
+ * ops: callbacks that indicate lockspace recovery points so the
+ *   caller can coordinate its recovery and know lockspace members.
+ *   This is only used by the initial dlm_new_lockspace() call.
+ *   Optional.
+ *
+ * ops_arg: arg for ops callbacks.
+ *
+ * ops_result: tells caller if the ops callbacks (if provided) will
+ *   be used or not.  0: will be used, -EXXX will not be used.
+ *   -EOPNOTSUPP: the dlm does not have recovery_callbacks enabled.
+ *
+ * lockspace: handle for dlm functions
  */
 
-int dlm_new_lockspace(const char *name, int namelen,
-                     dlm_lockspace_t **lockspace, uint32_t flags, int lvblen);
+int dlm_new_lockspace(const char *name, const char *cluster,
+                     uint32_t flags, int lvblen,
+                     const struct dlm_lockspace_ops *ops, void *ops_arg,
+                     int *ops_result, dlm_lockspace_t **lockspace);
 
 /*
  * dlm_release_lockspace
index 18bea78fe47b3e5ce97f98bac45fd6e1e6acd785..8e2b7bac437869d8058427d513115e457a57752d 100644 (file)
@@ -33,6 +33,7 @@
 #define EM_H8_300      46      /* Renesas H8/300,300H,H8S */
 #define EM_MN10300     89      /* Panasonic/MEI MN10300, AM33 */
 #define EM_BLACKFIN     106     /* ADI Blackfin Processor */
+#define EM_TI_C6000    140     /* TI C6X DSPs */
 #define EM_FRV         0x5441  /* Fujitsu FR-V */
 #define EM_AVR32       0x18ad  /* Atmel AVR32 */
 
index 3a76faf6a3ee82cd20f82d36208c6f50c8f29685..581e74b7df95e3d063bbbcaece35aafe60fad774 100644 (file)
@@ -36,6 +36,7 @@ struct vm_area_struct;
 #endif
 #define ___GFP_NO_KSWAPD       0x400000u
 #define ___GFP_OTHER_NODE      0x800000u
+#define ___GFP_WRITE           0x1000000u
 
 /*
  * GFP bitmasks..
@@ -85,6 +86,7 @@ struct vm_area_struct;
 
 #define __GFP_NO_KSWAPD        ((__force gfp_t)___GFP_NO_KSWAPD)
 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
+#define __GFP_WRITE    ((__force gfp_t)___GFP_WRITE)   /* Allocator intends to dirty page */
 
 /*
  * This may seem redundant, but it's a way of annotating false positives vs.
@@ -92,7 +94,7 @@ struct vm_area_struct;
  */
 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
 
-#define __GFP_BITS_SHIFT 24    /* Room for N __GFP_FOO bits */
+#define __GFP_BITS_SHIFT 25    /* Room for N __GFP_FOO bits */
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /* This equals 0, but use constants in case they ever change */
@@ -313,7 +315,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
 static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
                                                unsigned int order)
 {
-       VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+       VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid));
 
        return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
 }
@@ -358,6 +360,7 @@ void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
 extern void __free_pages(struct page *page, unsigned int order);
 extern void free_pages(unsigned long addr, unsigned int order);
 extern void free_hot_cold_page(struct page *page, int cold);
+extern void free_hot_cold_page_list(struct list_head *list, int cold);
 
 #define __free_page(page) __free_pages((page), 0)
 #define free_page(addr) free_pages((addr), 0)
@@ -367,9 +370,25 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
 void drain_all_pages(void);
 void drain_local_pages(void *dummy);
 
+/*
+ * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
+ * GFP flags are used before interrupts are enabled. Once interrupts are
+ * enabled, it is set to __GFP_BITS_MASK while the system is running. During
+ * hibernation, it is used by PM to avoid I/O during memory allocation while
+ * devices are suspended.
+ */
 extern gfp_t gfp_allowed_mask;
 
 extern void pm_restrict_gfp_mask(void);
 extern void pm_restore_gfp_mask(void);
 
+#ifdef CONFIG_PM_SLEEP
+extern bool pm_suspended_storage(void);
+#else
+static inline bool pm_suspended_storage(void)
+{
+       return false;
+}
+#endif /* CONFIG_PM_SLEEP */
+
 #endif /* __LINUX_GFP_H */
index c235e4e8767c5468bdb3057d6513b386d99dc576..3a95da60fd3e5cda95617fffc824168437373d95 100644 (file)
@@ -72,6 +72,7 @@
 #include <linux/workqueue.h>
 #include <linux/input.h>
 #include <linux/semaphore.h>
+#include <linux/power_supply.h>
 
 /*
  * We parse each description item into this structure. Short items data
@@ -190,6 +191,7 @@ struct hid_item {
 #define HID_UP_UNDEFINED       0x00000000
 #define HID_UP_GENDESK         0x00010000
 #define HID_UP_SIMULATION      0x00020000
+#define HID_UP_GENDEVCTRLS     0x00060000
 #define HID_UP_KEYBOARD                0x00070000
 #define HID_UP_LED             0x00080000
 #define HID_UP_BUTTON          0x00090000
@@ -239,6 +241,8 @@ struct hid_item {
 #define HID_GD_RIGHT           0x00010092
 #define HID_GD_LEFT            0x00010093
 
+#define HID_DC_BATTERYSTRENGTH 0x00060020
+
 #define HID_DG_DIGITIZER       0x000d0001
 #define HID_DG_PEN             0x000d0002
 #define HID_DG_LIGHTPEN                0x000d0003
@@ -482,6 +486,19 @@ struct hid_device {                                                        /* device report descriptor */
        struct hid_driver *driver;
        struct hid_ll_driver *ll_driver;
 
+#ifdef CONFIG_HID_BATTERY_STRENGTH
+       /*
+        * Power supply information for HID devices which report
+        * battery strength. power_supply is registered iff
+        * battery.name is non-NULL.
+        */
+       struct power_supply battery;
+       __s32 battery_min;
+       __s32 battery_max;
+       __s32 battery_report_type;
+       __s32 battery_report_id;
+#endif
+
        unsigned int status;                                            /* see STAT flags above */
        unsigned claimed;                                               /* Claimed by hidinput, hiddev? */
        unsigned quirks;                                                /* Various quirks the device can pull on us */
@@ -712,6 +729,8 @@ extern void hidinput_disconnect(struct hid_device *);
 int hid_set_field(struct hid_field *, unsigned, __s32);
 int hid_input_report(struct hid_device *, int type, u8 *, int, int);
 int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
+struct hid_field *hidinput_get_led_field(struct hid_device *hid);
+unsigned int hidinput_count_leds(struct hid_device *hid);
 void hid_output_report(struct hid_report *report, __u8 *data);
 struct hid_device *hid_allocate_device(void);
 struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
@@ -719,6 +738,8 @@ int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
 int hid_check_keys_pressed(struct hid_device *hid);
 int hid_connect(struct hid_device *hid, unsigned int connect_mask);
 void hid_disconnect(struct hid_device *hid);
+const struct hid_device_id *hid_match_id(struct hid_device *hdev,
+                                        const struct hid_device_id *id);
 
 /**
  * hid_map_usage - map usage input bits
diff --git a/include/linux/input/auo-pixcir-ts.h b/include/linux/input/auo-pixcir-ts.h
new file mode 100644 (file)
index 0000000..75d4be7
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Driver for AUO in-cell touchscreens
+ *
+ * Copyright (c) 2011 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on auo_touch.h from Dell Streak kernel
+ *
+ * Copyright (c) 2008 QUALCOMM Incorporated.
+ * Copyright (c) 2008 QUALCOMM USA, INC.
+ *
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __AUO_PIXCIR_TS_H__
+#define __AUO_PIXCIR_TS_H__
+
+/*
+ * Interrupt modes:
+ * periodical:         interrupt is asserted periodicaly
+ * compare coordinates:        interrupt is asserted when coordinates change
+ * indicate touch:     interrupt is asserted during touch
+ */
+#define AUO_PIXCIR_INT_PERIODICAL      0x00
+#define AUO_PIXCIR_INT_COMP_COORD      0x01
+#define AUO_PIXCIR_INT_TOUCH_IND       0x02
+
+/*
+ * @gpio_int           interrupt gpio
+ * @int_setting                one of AUO_PIXCIR_INT_*
+ * @init_hw            hardwarespecific init
+ * @exit_hw            hardwarespecific shutdown
+ * @x_max              x-resolution
+ * @y_max              y-resolution
+ */
+struct auo_pixcir_ts_platdata {
+       int gpio_int;
+
+       int int_setting;
+
+       void (*init_hw)(struct i2c_client *);
+       void (*exit_hw)(struct i2c_client *);
+
+       unsigned int x_max;
+       unsigned int y_max;
+};
+
+#endif
diff --git a/include/linux/input/gp2ap002a00f.h b/include/linux/input/gp2ap002a00f.h
new file mode 100644 (file)
index 0000000..aad2fd4
--- /dev/null
@@ -0,0 +1,22 @@
+#ifndef _GP2AP002A00F_H_
+#define _GP2AP002A00F_H_
+
+#include <linux/i2c.h>
+
+#define GP2A_I2C_NAME "gp2ap002a00f"
+
+/**
+ * struct gp2a_platform_data - Sharp gp2ap002a00f proximity platform data
+ * @vout_gpio: The gpio connected to the object detected pin (VOUT)
+ * @wakeup: Set to true if the proximity can wake the device from suspend
+ * @hw_setup: Callback for setting up hardware such as gpios and vregs
+ * @hw_shutdown: Callback for properly shutting down hardware
+ */
+struct gp2a_platform_data {
+       int vout_gpio;
+       bool wakeup;
+       int (*hw_setup)(struct i2c_client *client);
+       int (*hw_shutdown)(struct i2c_client *client);
+};
+
+#endif
diff --git a/include/linux/input/gpio_tilt.h b/include/linux/input/gpio_tilt.h
new file mode 100644 (file)
index 0000000..c1cc52d
--- /dev/null
@@ -0,0 +1,73 @@
+#ifndef _INPUT_GPIO_TILT_H
+#define _INPUT_GPIO_TILT_H
+
+/**
+ * struct gpio_tilt_axis - Axis used by the tilt switch
+ * @axis:              Constant describing the axis, e.g. ABS_X
+ * @min:               minimum value for abs_param
+ * @max:               maximum value for abs_param
+ * @fuzz:              fuzz value for abs_param
+ * @flat:              flat value for abs_param
+ */
+struct gpio_tilt_axis {
+       int axis;
+       int min;
+       int max;
+       int fuzz;
+       int flat;
+};
+
+/**
+ * struct gpio_tilt_state - state description
+ * @gpios:             bitfield of gpio target-states for the value
+ * @axes:              array containing the axes settings for the gpio state
+ *                     The array indizes must correspond to the axes defined
+ *                     in platform_data
+ *
+ * This structure describes a supported axis settings
+ * and the necessary gpio-state which represent it.
+ *
+ * The n-th bit in the bitfield describes the state of the n-th GPIO
+ * from the gpios-array defined in gpio_regulator_config below.
+ */
+struct gpio_tilt_state {
+       int gpios;
+       int *axes;
+};
+
+/**
+ * struct gpio_tilt_platform_data
+ * @gpios:             Array containing the gpios determining the tilt state
+ * @nr_gpios:          Number of gpios
+ * @axes:              Array of gpio_tilt_axis descriptions
+ * @nr_axes:           Number of axes
+ * @states:            Array of gpio_tilt_state entries describing
+ *                     the gpio state for specific tilts
+ * @nr_states:         Number of states available
+ * @debounce_interval: debounce ticks interval in msecs
+ * @poll_interval:     polling interval in msecs - for polling driver only
+ * @enable:            callback to enable the tilt switch
+ * @disable:           callback to disable the tilt switch
+ *
+ * This structure contains gpio-tilt-switch configuration
+ * information that must be passed by platform code to the
+ * gpio-tilt input driver.
+ */
+struct gpio_tilt_platform_data {
+       struct gpio *gpios;
+       int nr_gpios;
+
+       struct gpio_tilt_axis *axes;
+       int nr_axes;
+
+       struct gpio_tilt_state *states;
+       int nr_states;
+
+       int debounce_interval;
+
+       unsigned int poll_interval;
+       int (*enable)(struct device *dev);
+       void (*disable)(struct device *dev);
+};
+
+#endif
diff --git a/include/linux/input/pixcir_ts.h b/include/linux/input/pixcir_ts.h
new file mode 100644 (file)
index 0000000..7163d91
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef        _PIXCIR_I2C_TS_H
+#define        _PIXCIR_I2C_TS_H
+
+struct pixcir_ts_platform_data {
+       int (*attb_read_val)(void);
+       int x_max;
+       int y_max;
+};
+
+#endif
diff --git a/include/linux/input/samsung-keypad.h b/include/linux/input/samsung-keypad.h
new file mode 100644 (file)
index 0000000..f25619b
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Samsung Keypad platform data definitions
+ *
+ * Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __SAMSUNG_KEYPAD_H
+#define __SAMSUNG_KEYPAD_H
+
+#include <linux/input/matrix_keypad.h>
+
+#define SAMSUNG_MAX_ROWS       8
+#define SAMSUNG_MAX_COLS       8
+
+/**
+ * struct samsung_keypad_platdata - Platform device data for Samsung Keypad.
+ * @keymap_data: pointer to &matrix_keymap_data.
+ * @rows: number of keypad row supported.
+ * @cols: number of keypad col supported.
+ * @no_autorepeat: disable key autorepeat.
+ * @wakeup: controls whether the device should be set up as wakeup source.
+ * @cfg_gpio: configure the GPIO.
+ *
+ * Initialisation data specific to either the machine or the platform
+ * for the device driver to use or call-back when configuring gpio.
+ */
+struct samsung_keypad_platdata {
+       const struct matrix_keymap_data *keymap_data;
+       unsigned int rows;
+       unsigned int cols;
+       bool no_autorepeat;
+       bool wakeup;
+
+       void (*cfg_gpio)(unsigned int rows, unsigned int cols);
+};
+
+#endif /* __SAMSUNG_KEYPAD_H */
diff --git a/include/linux/input/tca8418_keypad.h b/include/linux/input/tca8418_keypad.h
new file mode 100644 (file)
index 0000000..e71a85d
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * TCA8418 keypad platform support
+ *
+ * Copyright (C) 2011 Fuel7, Inc.  All rights reserved.
+ *
+ * Author: Kyle Manna <kyle.manna@fuel7.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * If you can't comply with GPLv2, alternative licensing terms may be
+ * arranged. Please contact Fuel7, Inc. (http://fuel7.com/) for proprietary
+ * alternative licensing inquiries.
+ */
+
+#ifndef _TCA8418_KEYPAD_H
+#define _TCA8418_KEYPAD_H
+
+#include <linux/types.h>
+#include <linux/input/matrix_keypad.h>
+
+#define TCA8418_I2C_ADDR       0x34
+#define        TCA8418_NAME            "tca8418_keypad"
+
+struct tca8418_keypad_platform_data {
+       const struct matrix_keymap_data *keymap_data;
+       unsigned rows;
+       unsigned cols;
+       bool rep;
+       bool irq_is_gpio;
+};
+
+#endif
index 432acc4c054df1134dddb9be501de5c27d84d926..d937580417ba668d343b30b1741d59139f7924b9 100644 (file)
@@ -48,19 +48,34 @@ struct iommu_domain {
 
 #ifdef CONFIG_IOMMU_API
 
+/**
+ * struct iommu_ops - iommu ops and capabilities
+ * @domain_init: init iommu domain
+ * @domain_destroy: destroy iommu domain
+ * @attach_dev: attach device to an iommu domain
+ * @detach_dev: detach device from an iommu domain
+ * @map: map a physically contiguous memory region to an iommu domain
+ * @unmap: unmap a physically contiguous memory region from an iommu domain
+ * @iova_to_phys: translate iova to physical address
+ * @domain_has_cap: domain capabilities query
+ * @commit: commit iommu domain
+ * @pgsize_bitmap: bitmap of supported page sizes
+ */
 struct iommu_ops {
        int (*domain_init)(struct iommu_domain *domain);
        void (*domain_destroy)(struct iommu_domain *domain);
        int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
        void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
        int (*map)(struct iommu_domain *domain, unsigned long iova,
-                  phys_addr_t paddr, int gfp_order, int prot);
-       int (*unmap)(struct iommu_domain *domain, unsigned long iova,
-                    int gfp_order);
+                  phys_addr_t paddr, size_t size, int prot);
+       size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
+                    size_t size);
        phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
                                    unsigned long iova);
        int (*domain_has_cap)(struct iommu_domain *domain,
                              unsigned long cap);
+       int (*device_group)(struct device *dev, unsigned int *groupid);
+       unsigned long pgsize_bitmap;
 };
 
 extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);
@@ -72,15 +87,16 @@ extern int iommu_attach_device(struct iommu_domain *domain,
 extern void iommu_detach_device(struct iommu_domain *domain,
                                struct device *dev);
 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
-                    phys_addr_t paddr, int gfp_order, int prot);
-extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
-                      int gfp_order);
+                    phys_addr_t paddr, size_t size, int prot);
+extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+                      size_t size);
 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
                                      unsigned long iova);
 extern int iommu_domain_has_cap(struct iommu_domain *domain,
                                unsigned long cap);
 extern void iommu_set_fault_handler(struct iommu_domain *domain,
                                        iommu_fault_handler_t handler);
+extern int iommu_device_group(struct device *dev, unsigned int *groupid);
 
 /**
  * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
@@ -179,6 +195,11 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
 {
 }
 
+static inline int iommu_device_group(struct device *dev, unsigned int *groupid)
+{
+       return -ENODEV;
+}
+
 #endif /* CONFIG_IOMMU_API */
 
 #endif /* __LINUX_IOMMU_H */
index 2092ea21e469eeeaa415a885ead161fed0ac7d38..5557baefed60b5f5bb3dd0984bbbe7f2e0a4c8fc 100644 (file)
@@ -1151,6 +1151,7 @@ extern int        jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t);
 extern int     jbd2_journal_test_revoke(journal_t *, unsigned long long, tid_t);
 extern void    jbd2_journal_clear_revoke(journal_t *);
 extern void    jbd2_journal_switch_revoke_table(journal_t *journal);
+extern void    jbd2_clear_buffer_revoked_flags(journal_t *journal);
 
 /*
  * The log thread user interface:
index e8b1597b5cf2592b85157674b1d1fcbd555d309a..f48e8a52854427466203d28142293914d77c1647 100644 (file)
@@ -665,6 +665,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
 #define BUILD_BUG_ON_ZERO(e) (0)
 #define BUILD_BUG_ON_NULL(e) ((void*)0)
 #define BUILD_BUG_ON(condition)
+#define BUILD_BUG() (0)
 #else /* __CHECKER__ */
 
 /* Force a compilation error if a constant expression is not a power of 2 */
@@ -703,6 +704,21 @@ extern int __build_bug_on_failed;
                if (condition) __build_bug_on_failed = 1;       \
        } while(0)
 #endif
+
+/**
+ * BUILD_BUG - break compile if used.
+ *
+ * If you have some code that you expect the compiler to eliminate at
+ * build time, you should use BUILD_BUG to detect if it is
+ * unexpectedly used.
+ */
+#define BUILD_BUG()                                            \
+       do {                                                    \
+               extern void __build_bug_failed(void)            \
+                       __linktime_error("BUILD_BUG failed");   \
+               __build_bug_failed();                           \
+       } while (0)
+
 #endif /* __CHECKER__ */
 
 /* Trap pasters of __FUNCTION__ at compile-time */
index d5262319997806bb6f1fdfcfb50b6cef37428b0e..900c76337e8f387b1dcf183260e43c1625536500 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/signal.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/mmu_notifier.h>
 #include <linux/preempt.h>
 #include <linux/msi.h>
 #include <linux/slab.h>
@@ -50,6 +51,9 @@
 #define KVM_REQ_APF_HALT          12
 #define KVM_REQ_STEAL_UPDATE      13
 #define KVM_REQ_NMI               14
+#define KVM_REQ_IMMEDIATE_EXIT    15
+#define KVM_REQ_PMU               16
+#define KVM_REQ_PMI               17
 
 #define KVM_USERSPACE_IRQ_SOURCE_ID    0
 
@@ -179,6 +183,7 @@ struct kvm_memory_slot {
        unsigned long *rmap;
        unsigned long *dirty_bitmap;
        unsigned long *dirty_bitmap_head;
+       unsigned long nr_dirty_pages;
        struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
        unsigned long userspace_addr;
        int user_alloc;
@@ -224,11 +229,20 @@ struct kvm_irq_routing_table {};
 
 #endif
 
+#ifndef KVM_MEM_SLOTS_NUM
+#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+#endif
+
+/*
+ * Note:
+ * memslots are not sorted by id anymore, please use id_to_memslot()
+ * to get the memslot by its id.
+ */
 struct kvm_memslots {
-       int nmemslots;
        u64 generation;
-       struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
-                                       KVM_PRIVATE_MEM_SLOTS];
+       struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
+       /* The mapping table from slot id to the index in memslots[]. */
+       int id_to_index[KVM_MEM_SLOTS_NUM];
 };
 
 struct kvm {
@@ -239,7 +253,6 @@ struct kvm {
        struct srcu_struct srcu;
 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
        u32 bsp_vcpu_id;
-       struct kvm_vcpu *bsp_vcpu;
 #endif
        struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
        atomic_t online_vcpus;
@@ -302,6 +315,11 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
             (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
             idx++)
 
+#define kvm_for_each_memslot(memslot, slots)   \
+       for (memslot = &slots->memslots[0];     \
+             memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
+               memslot++)
+
 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
 
@@ -314,6 +332,7 @@ void kvm_exit(void);
 
 void kvm_get_kvm(struct kvm *kvm);
 void kvm_put_kvm(struct kvm *kvm);
+void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
 
 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
 {
@@ -322,6 +341,18 @@ static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
                        || lockdep_is_held(&kvm->slots_lock));
 }
 
+static inline struct kvm_memory_slot *
+id_to_memslot(struct kvm_memslots *slots, int id)
+{
+       int index = slots->id_to_index[id];
+       struct kvm_memory_slot *slot;
+
+       slot = &slots->memslots[index];
+
+       WARN_ON(slot->id != id);
+       return slot;
+}
+
 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
index 47a070b0520e1ee766b5b4b26816d12d9f45cad8..ff476ddaf3103cc02e5688bc00528e7d042af891 100644 (file)
@@ -35,4 +35,3 @@ static inline int kvm_para_has_feature(unsigned int feature)
 }
 #endif /* __KERNEL__ */
 #endif /* __LINUX_KVM_PARA_H */
-
diff --git a/include/linux/leds-tca6507.h b/include/linux/leds-tca6507.h
new file mode 100644 (file)
index 0000000..dcabf4f
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * TCA6507 LED chip driver.
+ *
+ * Copyright (C) 2011 Neil Brown <neil@brown.name>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_TCA6507_H
+#define __LINUX_TCA6507_H
+#include <linux/leds.h>
+
+struct tca6507_platform_data {
+       struct led_platform_data leds;
+#ifdef CONFIG_GPIOLIB
+       int gpio_base;
+       void (*setup)(unsigned gpio_base, unsigned ngpio);
+#endif
+};
+
+#define        TCA6507_MAKE_GPIO 1
+#endif /* __LINUX_TCA6507_H*/
index 7978eec1b7d9964420c2e854afba51a8f9cbf8f5..7c727a90d70da6229afeb9c9cf7c8601841863e2 100644 (file)
@@ -164,11 +164,11 @@ static inline void mpol_get(struct mempolicy *pol)
                atomic_inc(&pol->refcnt);
 }
 
-extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
-static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
+extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
+static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
 {
        if (a == b)
-               return 1;
+               return true;
        return __mpol_equal(a, b);
 }
 
@@ -257,9 +257,9 @@ static inline int vma_migratable(struct vm_area_struct *vma)
 
 struct mempolicy {};
 
-static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
+static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
 {
-       return 1;
+       return true;
 }
 
 static inline void mpol_put(struct mempolicy *p)
index 3816c2fac0ad66d48b42669063c4efcf2264e700..a98e2a316d1f2e4235cc628e52b44058112ab976 100644 (file)
@@ -69,6 +69,7 @@ struct regulator_init_data;
 struct mc13xxx_regulator_init_data {
        int id;
        struct regulator_init_data *init_data;
+       struct device_node *node;
 };
 
 struct mc13xxx_regulator_platform_data {
index 8bf2cb9502dd7ba492680d371279577392c6d3fe..d0cb12eba402638d33490f71663ef8d0bcf086a1 100644 (file)
 #define TPS65910_GPIO_STS                              BIT(1)
 #define TPS65910_GPIO_SET                              BIT(0)
 
+/* Regulator Index Definitions */
+#define TPS65910_REG_VRTC                              0
+#define TPS65910_REG_VIO                               1
+#define TPS65910_REG_VDD1                              2
+#define TPS65910_REG_VDD2                              3
+#define TPS65910_REG_VDD3                              4
+#define TPS65910_REG_VDIG1                             5
+#define TPS65910_REG_VDIG2                             6
+#define TPS65910_REG_VPLL                              7
+#define TPS65910_REG_VDAC                              8
+#define TPS65910_REG_VAUX1                             9
+#define TPS65910_REG_VAUX2                             10
+#define TPS65910_REG_VAUX33                            11
+#define TPS65910_REG_VMMC                              12
+
+#define TPS65911_REG_VDDCTRL                           4
+#define TPS65911_REG_LDO1                              5
+#define TPS65911_REG_LDO2                              6
+#define TPS65911_REG_LDO3                              7
+#define TPS65911_REG_LDO4                              8
+#define TPS65911_REG_LDO5                              9
+#define TPS65911_REG_LDO6                              10
+#define TPS65911_REG_LDO7                              11
+#define TPS65911_REG_LDO8                              12
+
+/* Max number of TPS65910/11 regulators */
+#define TPS65910_NUM_REGS                              13
+
 /**
  * struct tps65910_board
  * Board platform data may be used to initialize regulators.
@@ -751,7 +779,7 @@ struct tps65910_board {
        int irq_base;
        int vmbch_threshold;
        int vmbch2_threshold;
-       struct regulator_init_data *tps65910_pmic_init_data;
+       struct regulator_init_data *tps65910_pmic_init_data[TPS65910_NUM_REGS];
 };
 
 /**
index 5d9b4c9813bdb7ae5700c3f078dc5629371c5f4d..6eba2cc016c95cf1c8af5ad8ea1ded7172024a7b 100644 (file)
@@ -1482,6 +1482,18 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
        return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 }
 
+/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
+static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
+                               unsigned long vm_start, unsigned long vm_end)
+{
+       struct vm_area_struct *vma = find_vma(mm, vm_start);
+
+       if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
+               vma = NULL;
+
+       return vma;
+}
+
 #ifdef CONFIG_MMU
 pgprot_t vm_get_page_prot(unsigned long vm_flags);
 #else
@@ -1618,5 +1630,22 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
                                unsigned int pages_per_huge_page);
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+extern unsigned int _debug_guardpage_minorder;
+
+static inline unsigned int debug_guardpage_minorder(void)
+{
+       return _debug_guardpage_minorder;
+}
+
+static inline bool page_is_guard(struct page *page)
+{
+       return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+}
+#else
+static inline unsigned int debug_guardpage_minorder(void) { return 0; }
+static inline bool page_is_guard(struct page *page) { return false; }
+#endif /* CONFIG_DEBUG_PAGEALLOC */
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
index 3ac040f1936963e729c3688af5702ec9f97bb977..ca6ca92418a6eedc96d76c120ffd4095624d0441 100644 (file)
@@ -317,6 +317,12 @@ struct zone {
         */
        unsigned long           lowmem_reserve[MAX_NR_ZONES];
 
+       /*
+        * This is a per-zone reserve of pages that should not be
+        * considered dirtyable memory.
+        */
+       unsigned long           dirty_balance_reserve;
+
 #ifdef CONFIG_NUMA
        int node;
        /*
index 05acced439a38ec1faf822200740d032778e9022..ce93a341337d48a78b5449ea249aac04c3f8284d 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef LINUX_MSI_H
 #define LINUX_MSI_H
 
+#include <linux/kobject.h>
 #include <linux/list.h>
 
 struct msi_msg {
@@ -44,6 +45,8 @@ struct msi_desc {
 
        /* Last set MSI message */
        struct msi_msg msg;
+
+       struct kobject kobj;
 };
 
 /*
index d249254929722f041cb8ae78df8517790e18aec0..d5d2ec6494bbefc3c662352d57a7fe794955ab36 100644 (file)
@@ -354,10 +354,10 @@ static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cf
                onecmd = cmd;
                break;
        case 2:
-               onecmd = cpu_to_cfi16(cmd);
+               onecmd = cpu_to_cfi16(map, cmd);
                break;
        case 4:
-               onecmd = cpu_to_cfi32(cmd);
+               onecmd = cpu_to_cfi32(map, cmd);
                break;
        }
 
@@ -437,10 +437,10 @@ static inline unsigned long cfi_merge_status(map_word val, struct map_info *map,
        case 1:
                break;
        case 2:
-               res = cfi16_to_cpu(res);
+               res = cfi16_to_cpu(map, res);
                break;
        case 4:
-               res = cfi32_to_cpu(res);
+               res = cfi32_to_cpu(map, res);
                break;
        default: BUG();
        }
@@ -480,12 +480,12 @@ static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
        if (map_bankwidth_is_1(map)) {
                return val.x[0];
        } else if (map_bankwidth_is_2(map)) {
-               return cfi16_to_cpu(val.x[0]);
+               return cfi16_to_cpu(map, val.x[0]);
        } else {
                /* No point in a 64-bit byteswap since that would just be
                   swapping the responses from different chips, and we are
                   only interested in one chip (a representative sample) */
-               return cfi32_to_cpu(val.x[0]);
+               return cfi32_to_cpu(map, val.x[0]);
        }
 }
 
@@ -496,12 +496,12 @@ static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
        if (map_bankwidth_is_1(map)) {
                return val.x[0] & 0xff;
        } else if (map_bankwidth_is_2(map)) {
-               return cfi16_to_cpu(val.x[0]);
+               return cfi16_to_cpu(map, val.x[0]);
        } else {
                /* No point in a 64-bit byteswap since that would just be
                   swapping the responses from different chips, and we are
                   only interested in one chip (a representative sample) */
-               return cfi32_to_cpu(val.x[0]);
+               return cfi32_to_cpu(map, val.x[0]);
        }
 }
 
index 51cc3f5917a80725683421d501f1f0016c284aa6..b97a625071f8c63d5c7b314d1b648b18afe87754 100644 (file)
 
 #include <asm/byteorder.h>
 
-#ifndef CONFIG_MTD_CFI_ADV_OPTIONS
-
-#define CFI_HOST_ENDIAN
-
-#else
-
-#ifdef CONFIG_MTD_CFI_NOSWAP
-#define CFI_HOST_ENDIAN
-#endif
-
-#ifdef CONFIG_MTD_CFI_LE_BYTE_SWAP
-#define CFI_LITTLE_ENDIAN
-#endif
-
-#ifdef CONFIG_MTD_CFI_BE_BYTE_SWAP
-#define CFI_BIG_ENDIAN
-#endif
-
-#endif
-
-#if defined(CFI_LITTLE_ENDIAN)
-#define cpu_to_cfi8(x) (x)
-#define cfi8_to_cpu(x) (x)
-#define cpu_to_cfi16(x) cpu_to_le16(x)
-#define cpu_to_cfi32(x) cpu_to_le32(x)
-#define cpu_to_cfi64(x) cpu_to_le64(x)
-#define cfi16_to_cpu(x) le16_to_cpu(x)
-#define cfi32_to_cpu(x) le32_to_cpu(x)
-#define cfi64_to_cpu(x) le64_to_cpu(x)
-#elif defined (CFI_BIG_ENDIAN)
-#define cpu_to_cfi8(x) (x)
-#define cfi8_to_cpu(x) (x)
-#define cpu_to_cfi16(x) cpu_to_be16(x)
-#define cpu_to_cfi32(x) cpu_to_be32(x)
-#define cpu_to_cfi64(x) cpu_to_be64(x)
-#define cfi16_to_cpu(x) be16_to_cpu(x)
-#define cfi32_to_cpu(x) be32_to_cpu(x)
-#define cfi64_to_cpu(x) be64_to_cpu(x)
-#elif defined (CFI_HOST_ENDIAN)
-#define cpu_to_cfi8(x) (x)
-#define cfi8_to_cpu(x) (x)
-#define cpu_to_cfi16(x) (x)
-#define cpu_to_cfi32(x) (x)
-#define cpu_to_cfi64(x) (x)
-#define cfi16_to_cpu(x) (x)
-#define cfi32_to_cpu(x) (x)
-#define cfi64_to_cpu(x) (x)
+#define CFI_HOST_ENDIAN 1
+#define CFI_LITTLE_ENDIAN 2
+#define CFI_BIG_ENDIAN 3
+
+#if !defined(CONFIG_MTD_CFI_ADV_OPTIONS) || defined(CONFIG_MTD_CFI_NOSWAP)
+#define CFI_DEFAULT_ENDIAN CFI_HOST_ENDIAN
+#elif defined(CONFIG_MTD_CFI_LE_BYTE_SWAP)
+#define CFI_DEFAULT_ENDIAN CFI_LITTLE_ENDIAN
+#elif defined(CONFIG_MTD_CFI_BE_BYTE_SWAP)
+#define CFI_DEFAULT_ENDIAN CFI_BIG_ENDIAN
 #else
 #error No CFI endianness defined
 #endif
+
+#define cfi_default(s) ((s)?:CFI_DEFAULT_ENDIAN)
+#define cfi_be(s) (cfi_default(s) == CFI_BIG_ENDIAN)
+#define cfi_le(s) (cfi_default(s) == CFI_LITTLE_ENDIAN)
+#define cfi_host(s) (cfi_default(s) == CFI_HOST_ENDIAN)
+
+#define cpu_to_cfi8(map, x) (x)
+#define cfi8_to_cpu(map, x) (x)
+#define cpu_to_cfi16(map, x) _cpu_to_cfi(16, (map)->swap, (x))
+#define cpu_to_cfi32(map, x) _cpu_to_cfi(32, (map)->swap, (x))
+#define cpu_to_cfi64(map, x) _cpu_to_cfi(64, (map)->swap, (x))
+#define cfi16_to_cpu(map, x) _cfi_to_cpu(16, (map)->swap, (x))
+#define cfi32_to_cpu(map, x) _cfi_to_cpu(32, (map)->swap, (x))
+#define cfi64_to_cpu(map, x) _cfi_to_cpu(64, (map)->swap, (x))
+
+#define _cpu_to_cfi(w, s, x) (cfi_host(s)?(x):_swap_to_cfi(w, s, x))
+#define _cfi_to_cpu(w, s, x) (cfi_host(s)?(x):_swap_to_cpu(w, s, x))
+#define _swap_to_cfi(w, s, x) (cfi_be(s)?cpu_to_be##w(x):cpu_to_le##w(x))
+#define _swap_to_cpu(w, s, x) (cfi_be(s)?be##w##_to_cpu(x):le##w##_to_cpu(x))
index a9e6ba46865eb9a3be3a2713f6819bf988462b22..94e924e2ecd5799f29b8ac6ef4723411be1be9f5 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/list.h>
 #include <linux/string.h>
 #include <linux/bug.h>
-
+#include <linux/kernel.h>
 
 #include <asm/unaligned.h>
 #include <asm/system.h>
@@ -214,6 +214,7 @@ struct map_info {
        void __iomem *virt;
        void *cached;
 
+       int swap; /* this mapping's byte-swapping requirement */
        int bankwidth; /* in octets. This isn't necessarily the width
                       of actual bus cycles -- it's the repeat interval
                      in bytes, before you are talking to the first chip again.
index 9f5b312af7838d5275ff9611030da3861ec2d612..1a81fde8f3331d9652054325aa36809cb40b45f1 100644 (file)
@@ -171,87 +171,60 @@ struct mtd_info {
        struct mtd_erase_region_info *eraseregions;
 
        /*
-        * Erase is an asynchronous operation.  Device drivers are supposed
-        * to call instr->callback() whenever the operation completes, even
-        * if it completes with a failure.
-        * Callers are supposed to pass a callback function and wait for it
-        * to be called before writing to the block.
+        * Do not call via these pointers, use corresponding mtd_*()
+        * wrappers instead.
         */
        int (*erase) (struct mtd_info *mtd, struct erase_info *instr);
-
-       /* This stuff for eXecute-In-Place */
-       /* phys is optional and may be set to NULL */
        int (*point) (struct mtd_info *mtd, loff_t from, size_t len,
-                       size_t *retlen, void **virt, resource_size_t *phys);
-
-       /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
+                     size_t *retlen, void **virt, resource_size_t *phys);
        void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
-
-       /* Allow NOMMU mmap() to directly map the device (if not NULL)
-        * - return the address to which the offset maps
-        * - return -ENOSYS to indicate refusal to do the mapping
-        */
        unsigned long (*get_unmapped_area) (struct mtd_info *mtd,
                                            unsigned long len,
                                            unsigned long offset,
                                            unsigned long flags);
-
-       /* Backing device capabilities for this device
-        * - provides mmap capabilities
-        */
-       struct backing_dev_info *backing_dev_info;
-
-
-       int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
-       int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
-
-       /* In blackbox flight recorder like scenarios we want to make successful
-          writes in interrupt context. panic_write() is only intended to be
-          called when its known the kernel is about to panic and we need the
-          write to succeed. Since the kernel is not going to be running for much
-          longer, this function can break locks and delay to ensure the write
-          succeeds (but not sleep). */
-
-       int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
-
+       int (*read) (struct mtd_info *mtd, loff_t from, size_t len,
+                    size_t *retlen, u_char *buf);
+       int (*write) (struct mtd_info *mtd, loff_t to, size_t len,
+                     size_t *retlen, const u_char *buf);
+       int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
+                           size_t *retlen, const u_char *buf);
        int (*read_oob) (struct mtd_info *mtd, loff_t from,
                         struct mtd_oob_ops *ops);
        int (*write_oob) (struct mtd_info *mtd, loff_t to,
-                        struct mtd_oob_ops *ops);
-
-       /*
-        * Methods to access the protection register area, present in some
-        * flash devices. The user data is one time programmable but the
-        * factory data is read only.
-        */
-       int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len);
-       int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
-       int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len);
-       int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
-       int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
-       int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len);
-
-       /* kvec-based read/write methods.
-          NB: The 'count' parameter is the number of _vectors_, each of
-          which contains an (ofs, len) tuple.
-       */
-       int (*writev) (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen);
-
-       /* Sync */
+                         struct mtd_oob_ops *ops);
+       int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
+                                  size_t len);
+       int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
+                                  size_t len, size_t *retlen, u_char *buf);
+       int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
+                                  size_t len);
+       int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+                                  size_t len, size_t *retlen, u_char *buf);
+       int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t to, size_t len,
+                                   size_t *retlen, u_char *buf);
+       int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+                                  size_t len);
+       int (*writev) (struct mtd_info *mtd, const struct kvec *vecs,
+                       unsigned long count, loff_t to, size_t *retlen);
        void (*sync) (struct mtd_info *mtd);
-
-       /* Chip-supported device locking */
        int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
        int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
        int (*is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
-
-       /* Power Management functions */
+       int (*block_isbad) (struct mtd_info *mtd, loff_t ofs);
+       int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);
        int (*suspend) (struct mtd_info *mtd);
        void (*resume) (struct mtd_info *mtd);
+       /*
+        * If the driver is something smart, like UBI, it may need to maintain
+        * its own reference counting. The below functions are only for driver.
+        */
+       int (*get_device) (struct mtd_info *mtd);
+       void (*put_device) (struct mtd_info *mtd);
 
-       /* Bad block management functions */
-       int (*block_isbad) (struct mtd_info *mtd, loff_t ofs);
-       int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);
+       /* Backing device capabilities for this device
+        * - provides mmap capabilities
+        */
+       struct backing_dev_info *backing_dev_info;
 
        struct notifier_block reboot_notifier;  /* default mode before reboot */
 
@@ -265,18 +238,218 @@ struct mtd_info {
        struct module *owner;
        struct device dev;
        int usecount;
-
-       /* If the driver is something smart, like UBI, it may need to maintain
-        * its own reference counting. The below functions are only for driver.
-        * The driver may register its callbacks. These callbacks are not
-        * supposed to be called by MTD users */
-       int (*get_device) (struct mtd_info *mtd);
-       void (*put_device) (struct mtd_info *mtd);
 };
 
-static inline struct mtd_info *dev_to_mtd(struct device *dev)
+/*
+ * Erase is an asynchronous operation.  Device drivers are supposed
+ * to call instr->callback() whenever the operation completes, even
+ * if it completes with a failure.
+ * Callers are supposed to pass a callback function and wait for it
+ * to be called before writing to the block.
+ */
+static inline int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+       return mtd->erase(mtd, instr);
+}
+
+/*
+ * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
+ */
+static inline int mtd_point(struct mtd_info *mtd, loff_t from, size_t len,
+                           size_t *retlen, void **virt, resource_size_t *phys)
+{
+       *retlen = 0;
+       if (!mtd->point)
+               return -EOPNOTSUPP;
+       return mtd->point(mtd, from, len, retlen, virt, phys);
+}
+
+/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
+static inline void mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
 {
-       return dev ? dev_get_drvdata(dev) : NULL;
+       return mtd->unpoint(mtd, from, len);
+}
+
+/*
+ * Allow NOMMU mmap() to directly map the device (if not NULL)
+ * - return the address to which the offset maps
+ * - return -ENOSYS to indicate refusal to do the mapping
+ */
+static inline unsigned long mtd_get_unmapped_area(struct mtd_info *mtd,
+                                                 unsigned long len,
+                                                 unsigned long offset,
+                                                 unsigned long flags)
+{
+       if (!mtd->get_unmapped_area)
+               return -EOPNOTSUPP;
+       return mtd->get_unmapped_area(mtd, len, offset, flags);
+}
+
+static inline int mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+                          size_t *retlen, u_char *buf)
+{
+       return mtd->read(mtd, from, len, retlen, buf);
+}
+
+static inline int mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+                           size_t *retlen, const u_char *buf)
+{
+       *retlen = 0;
+       if (!mtd->write)
+               return -EROFS;
+       return mtd->write(mtd, to, len, retlen, buf);
+}
+
+/*
+ * In blackbox flight recorder like scenarios we want to make successful writes
+ * in interrupt context. panic_write() is only intended to be called when its
+ * known the kernel is about to panic and we need the write to succeed. Since
+ * the kernel is not going to be running for much longer, this function can
+ * break locks and delay to ensure the write succeeds (but not sleep).
+ */
+static inline int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+                                 size_t *retlen, const u_char *buf)
+{
+       *retlen = 0;
+       if (!mtd->panic_write)
+               return -EOPNOTSUPP;
+       return mtd->panic_write(mtd, to, len, retlen, buf);
+}
+
+static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from,
+                              struct mtd_oob_ops *ops)
+{
+       ops->retlen = ops->oobretlen = 0;
+       if (!mtd->read_oob)
+               return -EOPNOTSUPP;
+       return mtd->read_oob(mtd, from, ops);
+}
+
+static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
+                               struct mtd_oob_ops *ops)
+{
+       ops->retlen = ops->oobretlen = 0;
+       if (!mtd->write_oob)
+               return -EOPNOTSUPP;
+       return mtd->write_oob(mtd, to, ops);
+}
+
+/*
+ * Method to access the protection register area, present in some flash
+ * devices. The user data is one time programmable but the factory data is read
+ * only.
+ */
+static inline int mtd_get_fact_prot_info(struct mtd_info *mtd,
+                                        struct otp_info *buf, size_t len)
+{
+       if (!mtd->get_fact_prot_info)
+               return -EOPNOTSUPP;
+       return mtd->get_fact_prot_info(mtd, buf, len);
+}
+
+static inline int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
+                                        size_t len, size_t *retlen,
+                                        u_char *buf)
+{
+       *retlen = 0;
+       if (!mtd->read_fact_prot_reg)
+               return -EOPNOTSUPP;
+       return mtd->read_fact_prot_reg(mtd, from, len, retlen, buf);
+}
+
+static inline int mtd_get_user_prot_info(struct mtd_info *mtd,
+                                        struct otp_info *buf,
+                                        size_t len)
+{
+       if (!mtd->get_user_prot_info)
+               return -EOPNOTSUPP;
+       return mtd->get_user_prot_info(mtd, buf, len);
+}
+
+static inline int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
+                                        size_t len, size_t *retlen,
+                                        u_char *buf)
+{
+       *retlen = 0;
+       if (!mtd->read_user_prot_reg)
+               return -EOPNOTSUPP;
+       return mtd->read_user_prot_reg(mtd, from, len, retlen, buf);
+}
+
+static inline int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to,
+                                         size_t len, size_t *retlen,
+                                         u_char *buf)
+{
+       *retlen = 0;
+       if (!mtd->write_user_prot_reg)
+               return -EOPNOTSUPP;
+       return mtd->write_user_prot_reg(mtd, to, len, retlen, buf);
+}
+
+static inline int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
+                                        size_t len)
+{
+       if (!mtd->lock_user_prot_reg)
+               return -EOPNOTSUPP;
+       return mtd->lock_user_prot_reg(mtd, from, len);
+}
+
+int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+              unsigned long count, loff_t to, size_t *retlen);
+
+static inline void mtd_sync(struct mtd_info *mtd)
+{
+       if (mtd->sync)
+               mtd->sync(mtd);
+}
+
+/* Chip-supported device locking */
+static inline int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+       if (!mtd->lock)
+               return -EOPNOTSUPP;
+       return mtd->lock(mtd, ofs, len);
+}
+
+static inline int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+       if (!mtd->unlock)
+               return -EOPNOTSUPP;
+       return mtd->unlock(mtd, ofs, len);
+}
+
+static inline int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+       if (!mtd->is_locked)
+               return -EOPNOTSUPP;
+       return mtd->is_locked(mtd, ofs, len);
+}
+
+static inline int mtd_suspend(struct mtd_info *mtd)
+{
+       if (!mtd->suspend)
+               return -EOPNOTSUPP;
+       return mtd->suspend(mtd);
+}
+
+static inline void mtd_resume(struct mtd_info *mtd)
+{
+       if (mtd->resume)
+               mtd->resume(mtd);
+}
+
+static inline int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+       if (!mtd->block_isbad)
+               return -EOPNOTSUPP;
+       return mtd->block_isbad(mtd, ofs);
+}
+
+static inline int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+       if (!mtd->block_markbad)
+               return -EOPNOTSUPP;
+       return mtd->block_markbad(mtd, ofs);
 }
 
 static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
@@ -309,6 +482,16 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
        return do_div(sz, mtd->writesize);
 }
 
+static inline int mtd_has_oob(const struct mtd_info *mtd)
+{
+       return mtd->read_oob && mtd->write_oob;
+}
+
+static inline int mtd_can_have_bb(const struct mtd_info *mtd)
+{
+       return !!mtd->block_isbad;
+}
+
        /* Kernel-side ioctl definitions */
 
 struct mtd_partition;
@@ -338,13 +521,6 @@ struct mtd_notifier {
 
 extern void register_mtd_user (struct mtd_notifier *new);
 extern int unregister_mtd_user (struct mtd_notifier *old);
-
-int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
-                      unsigned long count, loff_t to, size_t *retlen);
-
-int default_mtd_readv(struct mtd_info *mtd, struct kvec *vecs,
-                     unsigned long count, loff_t from, size_t *retlen);
-
 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
 
 void mtd_erase_callback(struct erase_info *instr);
index 904131bab501380e9a82dbfdafa412609531b7f1..63b5a8b6dfbda263525aada5aeee309b312b2620 100644 (file)
@@ -555,6 +555,7 @@ struct nand_chip {
 #define NAND_MFR_HYNIX         0xad
 #define NAND_MFR_MICRON                0x2c
 #define NAND_MFR_AMD           0x01
+#define NAND_MFR_MACRONIX      0xc2
 
 /**
  * struct nand_flash_dev - NAND Flash Device ID Structure
index 04e018160e2b516be1e29c383e983036c31ffef6..d2887e76b7f66845b3662c9c52b80b211b78f38b 100644 (file)
@@ -30,6 +30,7 @@ struct physmap_flash_data {
        unsigned int            pfow_base;
        char                    *probe_type;
        struct mtd_partition    *parts;
+       const char              **part_probe_types;
 };
 
 #endif /* __LINUX_MTD_PHYSMAP__ */
index b5479df8378d2fe3981ffab93bb5ff39980e9474..ba4d7656ecfde15c98188afee4a086b19746772f 100644 (file)
@@ -153,6 +153,7 @@ struct nfs_server {
        struct rb_root          openowner_id;
        struct rb_root          lockowner_id;
 #endif
+       struct list_head        state_owners_lru;
        struct list_head        layouts;
        struct list_head        delegations;
        void (*destroy)(struct nfs_server *);
index ae7d6a380dae2852b7e9a48fba3fb7265f0df6f0..308c188770185962547e196312aa87f304aa4502 100644 (file)
@@ -66,6 +66,8 @@ struct idmap_msg {
 /* Forward declaration to make this header independent of others */
 struct nfs_client;
 struct nfs_server;
+struct nfs_fattr;
+struct nfs4_string;
 
 #ifdef CONFIG_NFS_USE_NEW_IDMAPPER
 
@@ -97,6 +99,12 @@ void nfs_idmap_delete(struct nfs_client *);
 
 #endif /* CONFIG_NFS_USE_NEW_IDMAPPER */
 
+void nfs_fattr_init_names(struct nfs_fattr *fattr,
+               struct nfs4_string *owner_name,
+               struct nfs4_string *group_name);
+void nfs_fattr_free_names(struct nfs_fattr *);
+void nfs_fattr_map_and_free_names(struct nfs_server *, struct nfs_fattr *);
+
 int nfs_map_name_to_uid(const struct nfs_server *, const char *, size_t, __u32 *);
 int nfs_map_group_to_gid(const struct nfs_server *, const char *, size_t, __u32 *);
 int nfs_map_uid_to_name(const struct nfs_server *, __u32, char *, size_t);
index 2a7c533be5dd8aa2f584fde1faad94c558130d75..a764cef06b733f34f89ed4a618bfb41e33428258 100644 (file)
 /* Forward declaration for NFS v3 */
 struct nfs4_secinfo_flavors;
 
+struct nfs4_string {
+       unsigned int len;
+       char *data;
+};
+
 struct nfs_fsid {
        uint64_t                major;
        uint64_t                minor;
@@ -61,6 +66,8 @@ struct nfs_fattr {
        struct timespec         pre_ctime;      /* pre_op_attr.ctime      */
        unsigned long           time_start;
        unsigned long           gencount;
+       struct nfs4_string      *owner_name;
+       struct nfs4_string      *group_name;
 };
 
 #define NFS_ATTR_FATTR_TYPE            (1U << 0)
@@ -85,6 +92,8 @@ struct nfs_fattr {
 #define NFS_ATTR_FATTR_V4_REFERRAL     (1U << 19)      /* NFSv4 referral */
 #define NFS_ATTR_FATTR_MOUNTPOINT      (1U << 20)      /* Treat as mountpoint */
 #define NFS_ATTR_FATTR_MOUNTED_ON_FILEID               (1U << 21)
+#define NFS_ATTR_FATTR_OWNER_NAME      (1U << 22)
+#define NFS_ATTR_FATTR_GROUP_NAME      (1U << 23)
 
 #define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \
                | NFS_ATTR_FATTR_MODE \
@@ -324,6 +333,7 @@ struct nfs_openargs {
        const struct qstr *     name;
        const struct nfs_server *server;         /* Needed for ID mapping */
        const u32 *             bitmask;
+       const u32 *             dir_bitmask;
        __u32                   claim;
        struct nfs4_sequence_args       seq_args;
 };
@@ -342,6 +352,8 @@ struct nfs_openres {
        __u32                   do_recall;
        __u64                   maxsize;
        __u32                   attrset[NFS4_BITMAP_SIZE];
+       struct nfs4_string      *owner;
+       struct nfs4_string      *group_owner;
        struct nfs4_sequence_res        seq_res;
 };
 
@@ -602,11 +614,16 @@ struct nfs_getaclargs {
        size_t                          acl_len;
        unsigned int                    acl_pgbase;
        struct page **                  acl_pages;
+       struct page *                   acl_scratch;
        struct nfs4_sequence_args       seq_args;
 };
 
+/* getxattr ACL interface flags */
+#define NFS4_ACL_LEN_REQUEST   0x0001  /* zero length getxattr buffer */
 struct nfs_getaclres {
        size_t                          acl_len;
+       size_t                          acl_data_offset;
+       int                             acl_flags;
        struct nfs4_sequence_res        seq_res;
 };
 
@@ -773,11 +790,6 @@ struct nfs3_getaclres {
        struct posix_acl *      acl_default;
 };
 
-struct nfs4_string {
-       unsigned int len;
-       char *data;
-};
-
 #ifdef CONFIG_NFS_V4
 
 typedef u64 clientid4;
index b0638fd91e92fdacc2f2675207ef723342072597..22691f614043df3d3cdf77c3b12dd97a551bf813 100644 (file)
@@ -13,6 +13,7 @@
 
 enum page_debug_flags {
        PAGE_DEBUG_FLAG_POISON,         /* Page is poisoned */
+       PAGE_DEBUG_FLAG_GUARD,
 };
 
 /*
@@ -21,7 +22,8 @@ enum page_debug_flags {
  */
 
 #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
-#if !defined(CONFIG_PAGE_POISONING) \
+#if !defined(CONFIG_PAGE_POISONING) && \
+    !defined(CONFIG_PAGE_GUARD) \
 /* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */
 #error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features!
 #endif
index bab82f4c571c611d91fb9f180dc2a69ed12c95e2..ed17024d2ebee598044ec693632fc4cc1cd1eb7f 100644 (file)
@@ -21,7 +21,6 @@ struct pagevec {
 };
 
 void __pagevec_release(struct pagevec *pvec);
-void __pagevec_free(struct pagevec *pvec);
 void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru);
 void pagevec_strip(struct pagevec *pvec);
 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
@@ -67,12 +66,6 @@ static inline void pagevec_release(struct pagevec *pvec)
                __pagevec_release(pvec);
 }
 
-static inline void pagevec_free(struct pagevec *pvec)
-{
-       if (pagevec_count(pvec))
-               __pagevec_free(pvec);
-}
-
 static inline void __pagevec_lru_add_anon(struct pagevec *pvec)
 {
        ____pagevec_lru_add(pvec, LRU_INACTIVE_ANON);
index 7cea7b6c14133628c5f543e729852bd3db085b3c..c8320144fe790cc3fb8dc5fe593092412a838bae 100644 (file)
@@ -29,7 +29,7 @@ extern void pcie_aspm_pm_state_change(struct pci_dev *pdev);
 extern void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
 extern void pci_disable_link_state(struct pci_dev *pdev, int state);
 extern void pci_disable_link_state_locked(struct pci_dev *pdev, int state);
-extern void pcie_clear_aspm(void);
+extern void pcie_clear_aspm(struct pci_bus *bus);
 extern void pcie_no_aspm(void);
 #else
 static inline void pcie_aspm_init_link_state(struct pci_dev *pdev)
@@ -47,7 +47,7 @@ static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
 static inline void pci_disable_link_state(struct pci_dev *pdev, int state)
 {
 }
-static inline void pcie_clear_aspm(void)
+static inline void pcie_clear_aspm(struct pci_bus *bus)
 {
 }
 static inline void pcie_no_aspm(void)
index 7cda65b5f79806005af9aada8831f8403a519db7..84225c756bd131ce422e926fa0a43a6ddc3dd459 100644 (file)
@@ -336,6 +336,7 @@ struct pci_dev {
        struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
 #ifdef CONFIG_PCI_MSI
        struct list_head msi_list;
+       struct kset *msi_kset;
 #endif
        struct pci_vpd *vpd;
 #ifdef CONFIG_PCI_ATS
index b5d9657f31004d64e53d9467646c1f268f4d51ab..28fe380cb19d59f83f507cfc4e841b7dbf4c288a 100644 (file)
 #define PCI_EXT_CAP_ID_ARI     14
 #define PCI_EXT_CAP_ID_ATS     15
 #define PCI_EXT_CAP_ID_SRIOV   16
+#define PCI_EXT_CAP_ID_PRI     19
 #define PCI_EXT_CAP_ID_LTR     24
+#define PCI_EXT_CAP_ID_PASID   27
 
 /* Advanced Error Reporting */
 #define PCI_ERR_UNCOR_STATUS   4       /* Uncorrectable Error Status */
 #define  PCI_ATS_MIN_STU       12      /* shift of minimum STU block */
 
 /* Page Request Interface */
-#define PCI_PRI_CAP            0x13    /* PRI capability ID */
-#define PCI_PRI_CONTROL_OFF    0x04    /* Offset of control register */
-#define PCI_PRI_STATUS_OFF     0x06    /* Offset of status register */
-#define PCI_PRI_ENABLE         0x0001  /* Enable mask */
-#define PCI_PRI_RESET          0x0002  /* Reset bit mask */
-#define PCI_PRI_STATUS_RF      0x0001  /* Request Failure */
-#define PCI_PRI_STATUS_UPRGI   0x0002  /* Unexpected PRG index */
-#define PCI_PRI_STATUS_STOPPED 0x0100  /* PRI Stopped */
-#define PCI_PRI_MAX_REQ_OFF    0x08    /* Cap offset for max reqs supported */
-#define PCI_PRI_ALLOC_REQ_OFF  0x0c    /* Cap offset for max reqs allowed */
+#define PCI_PRI_CTRL           0x04    /* PRI control register */
+#define  PCI_PRI_CTRL_ENABLE   0x01    /* Enable */
+#define  PCI_PRI_CTRL_RESET    0x02    /* Reset */
+#define PCI_PRI_STATUS         0x06    /* PRI status register */
+#define  PCI_PRI_STATUS_RF     0x001   /* Response Failure */
+#define  PCI_PRI_STATUS_UPRGI  0x002   /* Unexpected PRG index */
+#define  PCI_PRI_STATUS_STOPPED        0x100   /* PRI Stopped */
+#define PCI_PRI_MAX_REQ                0x08    /* PRI max reqs supported */
+#define PCI_PRI_ALLOC_REQ      0x0c    /* PRI max reqs allowed */
 
 /* PASID capability */
-#define PCI_PASID_CAP          0x1b    /* PASID capability ID */
-#define PCI_PASID_CAP_OFF      0x04    /* PASID feature register */
-#define PCI_PASID_CONTROL_OFF   0x06    /* PASID control register */
-#define PCI_PASID_ENABLE       0x01    /* Enable/Supported bit */
-#define PCI_PASID_EXEC         0x02    /* Exec permissions Enable/Supported */
-#define PCI_PASID_PRIV         0x04    /* Priviledge Mode Enable/Support */
+#define PCI_PASID_CAP          0x04    /* PASID feature register */
+#define  PCI_PASID_CAP_EXEC    0x02    /* Exec permissions Supported */
+#define  PCI_PASID_CAP_PRIV    0x04    /* Priviledge Mode Supported */
+#define PCI_PASID_CTRL         0x06    /* PASID control register */
+#define  PCI_PASID_CTRL_ENABLE 0x01    /* Enable bit */
+#define  PCI_PASID_CTRL_EXEC   0x02    /* Exec permissions Enable */
+#define  PCI_PASID_CTRL_PRIV   0x04    /* Priviledge Mode Enable */
 
 /* Single Root I/O Virtualization */
 #define PCI_SRIOV_CAP          0x04    /* SR-IOV Capabilities */
index 38d10326246afbbec371b5cddc2beaff37f1dd48..e7cf6669ac3482e606c04e90470af6a37d4adcf4 100644 (file)
@@ -30,6 +30,8 @@ struct pid_namespace {
 #ifdef CONFIG_BSD_PROCESS_ACCT
        struct bsd_acct_struct *bacct;
 #endif
+       gid_t pid_gid;
+       int hide_pid;
 };
 
 extern struct pid_namespace init_pid_ns;
index 88863531d862c6844189d9555432e7bc09e7b67e..d0aecb7f6fb9f56e9fb5de32487ebe33f078ad9a 100644 (file)
@@ -48,7 +48,7 @@ struct pinmux_map {
        const char *group;
        struct device *dev;
        const char *dev_name;
-       const bool hog_on_boot;
+       bool hog_on_boot;
 };
 
 /*
@@ -66,30 +66,22 @@ struct pinmux_map {
        { .name = a, .ctrl_dev_name = b, .function = c }
 
 /*
- * Convenience macro to map a function onto the primary device pinctrl device
- * this is especially helpful on systems that have only one pin controller
- * or need to set up a lot of mappings on the primary controller.
- */
-#define PINMUX_MAP_PRIMARY(a, b, c) \
-       { .name = a, .ctrl_dev_name = "pinctrl.0", .function = b, \
-         .dev_name = c }
-
-/*
- * Convenience macro to map a system function onto the primary pinctrl device.
- * System functions are not assigned to a particular device.
+ * Convenience macro to map a system function onto a certain pinctrl device,
+ * to be hogged by the pinmux core until the system shuts down.
  */
-#define PINMUX_MAP_PRIMARY_SYS(a, b) \
-       { .name = a, .ctrl_dev_name = "pinctrl.0", .function = b }
+#define PINMUX_MAP_SYS_HOG(a, b, c) \
+       { .name = a, .ctrl_dev_name = b, .function = c, \
+         .hog_on_boot = true }
 
 /*
- * Convenience macro to map a system function onto the primary pinctrl device,
- * to be hogged by the pinmux core until the system shuts down.
+ * Convenience macro to map a system function onto a certain pinctrl device
+ * using a specified group, to be hogged by the pinmux core until the system
+ * shuts down.
  */
-#define PINMUX_MAP_PRIMARY_SYS_HOG(a, b) \
-       { .name = a, .ctrl_dev_name = "pinctrl.0", .function = b, \
+#define PINMUX_MAP_SYS_HOG_GROUP(a, b, c, d)           \
+       { .name = a, .ctrl_dev_name = b, .function = c, .group = d, \
          .hog_on_boot = true }
 
-
 #ifdef CONFIG_PINMUX
 
 extern int pinmux_register_mappings(struct pinmux_map const *map,
diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h
new file mode 100644 (file)
index 0000000..477922c
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Interface the pinconfig portions of the pinctrl subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * This interface is used in the core to keep track of pins.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __LINUX_PINCTRL_PINCONF_H
+#define __LINUX_PINCTRL_PINCONF_H
+
+#ifdef CONFIG_PINCONF
+
+struct pinctrl_dev;
+struct seq_file;
+
+/**
+ * struct pinconf_ops - pin config operations, to be implemented by
+ * pin configuration capable drivers.
+ * @pin_config_get: get the config of a certain pin, if the requested config
+ *     is not available on this controller this should return -ENOTSUPP
+ *     and if it is available but disabled it should return -EINVAL
+ * @pin_config_get: get the config of a certain pin
+ * @pin_config_set: configure an individual pin
+ * @pin_config_group_get: get configurations for an entire pin group
+ * @pin_config_group_set: configure all pins in a group
+ * @pin_config_dbg_show: optional debugfs display hook that will provide
+ *     per-device info for a certain pin in debugfs
+ * @pin_config_group_dbg_show: optional debugfs display hook that will provide
+ *     per-device info for a certain group in debugfs
+ */
+struct pinconf_ops {
+       int (*pin_config_get) (struct pinctrl_dev *pctldev,
+                              unsigned pin,
+                              unsigned long *config);
+       int (*pin_config_set) (struct pinctrl_dev *pctldev,
+                              unsigned pin,
+                              unsigned long config);
+       int (*pin_config_group_get) (struct pinctrl_dev *pctldev,
+                                    unsigned selector,
+                                    unsigned long *config);
+       int (*pin_config_group_set) (struct pinctrl_dev *pctldev,
+                                    unsigned selector,
+                                    unsigned long config);
+       void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev,
+                                    struct seq_file *s,
+                                    unsigned offset);
+       void (*pin_config_group_dbg_show) (struct pinctrl_dev *pctldev,
+                                          struct seq_file *s,
+                                          unsigned selector);
+};
+
+extern int pin_config_get(const char *dev_name, const char *name,
+                         unsigned long *config);
+extern int pin_config_set(const char *dev_name, const char *name,
+                         unsigned long config);
+extern int pin_config_group_get(const char *dev_name,
+                               const char *pin_group,
+                               unsigned long *config);
+extern int pin_config_group_set(const char *dev_name,
+                               const char *pin_group,
+                               unsigned long config);
+
+#else
+
+static inline int pin_config_get(const char *dev_name, const char *name,
+                                unsigned long *config)
+{
+       return 0;
+}
+
+static inline int pin_config_set(const char *dev_name, const char *name,
+                                unsigned long config)
+{
+       return 0;
+}
+
+static inline int pin_config_group_get(const char *dev_name,
+                                      const char *pin_group,
+                                      unsigned long *config)
+{
+       return 0;
+}
+
+static inline int pin_config_group_set(const char *dev_name,
+                                      const char *pin_group,
+                                      unsigned long config)
+{
+       return 0;
+}
+
+#endif
+
+#endif /* __LINUX_PINCTRL_PINCONF_H */
index 04c011038f32080ff6489c5a8cd11aad4878f571..8bd22ee7aa098c6f4cda77276baaf47e89c8823e 100644 (file)
@@ -21,6 +21,7 @@
 
 struct pinctrl_dev;
 struct pinmux_ops;
+struct pinconf_ops;
 struct gpio_chip;
 
 /**
@@ -45,6 +46,7 @@ struct pinctrl_pin_desc {
  * @name: a name for the chip in this range
  * @id: an ID number for the chip in this range
  * @base: base offset of the GPIO range
+ * @pin_base: base pin number of the GPIO range
  * @npins: number of pins in the GPIO range, including the base number
  * @gc: an optional pointer to a gpio_chip
  */
@@ -53,6 +55,7 @@ struct pinctrl_gpio_range {
        const char *name;
        unsigned int id;
        unsigned int base;
+       unsigned int pin_base;
        unsigned int npins;
        struct gpio_chip *gc;
 };
@@ -89,22 +92,20 @@ struct pinctrl_ops {
  *     this pin controller
  * @npins: number of descriptors in the array, usually just ARRAY_SIZE()
  *     of the pins field above
- * @maxpin: since pin spaces may be sparse, there can he "holes" in the
- *     pin range, this attribute gives the maximum pin number in the
- *     total range. This should not be lower than npins for example,
- *     but may be equal to npins if you have no holes in the pin range.
  * @pctlops: pin control operation vtable, to support global concepts like
  *     grouping of pins, this is optional.
- * @pmxops: pinmux operation vtable, if you support pinmuxing in your driver
+ * @pmxops: pinmux operations vtable, if you support pinmuxing in your driver
+ * @confops: pin config operations vtable, if you support pin configuration in
+ *     your driver
  * @owner: module providing the pin controller, used for refcounting
  */
 struct pinctrl_desc {
        const char *name;
        struct pinctrl_pin_desc const *pins;
        unsigned int npins;
-       unsigned int maxpin;
        struct pinctrl_ops *pctlops;
        struct pinmux_ops *pmxops;
+       struct pinconf_ops *confops;
        struct module *owner;
 };
 
@@ -123,7 +124,7 @@ extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev);
 
 struct pinctrl_dev;
 
-/* Sufficiently stupid default function when pinctrl is not in use */
+/* Sufficiently stupid default functions when pinctrl is not in use */
 static inline bool pin_is_valid(struct pinctrl_dev *pctldev, int pin)
 {
        return pin >= 0;
index 3c430e797efc7397d7363e5c421bd8746ef520b4..937b3e2fa36f7a5d1fc2722cd31bbbeb5ee24309 100644 (file)
@@ -52,9 +52,15 @@ struct pinctrl_dev;
  * @disable: disable a certain muxing selector with a certain pin group
  * @gpio_request_enable: requests and enables GPIO on a certain pin.
  *     Implement this only if you can mux every pin individually as GPIO. The
- *     affected GPIO range is passed along with an offset into that
+ *     affected GPIO range is passed along with an offset(pin number) into that
  *     specific GPIO range - function selectors and pin groups are orthogonal
- *     to this, the core will however make sure the pins do not collide
+ *     to this, the core will however make sure the pins do not collide.
+ * @gpio_disable_free: free up GPIO muxing on a certain pin, the reverse of
+ *     @gpio_request_enable
+ * @gpio_set_direction: Since controllers may need different configurations
+ *     depending on whether the GPIO is configured as input or output,
+ *     a direction selector function may be implemented as a backing
+ *     to the GPIO controllers that need pin muxing.
  */
 struct pinmux_ops {
        int (*request) (struct pinctrl_dev *pctldev, unsigned offset);
@@ -73,11 +79,20 @@ struct pinmux_ops {
        int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
                                    struct pinctrl_gpio_range *range,
                                    unsigned offset);
+       void (*gpio_disable_free) (struct pinctrl_dev *pctldev,
+                                  struct pinctrl_gpio_range *range,
+                                  unsigned offset);
+       int (*gpio_set_direction) (struct pinctrl_dev *pctldev,
+                                  struct pinctrl_gpio_range *range,
+                                  unsigned offset,
+                                  bool input);
 };
 
 /* External interface to pinmux */
 extern int pinmux_request_gpio(unsigned gpio);
 extern void pinmux_free_gpio(unsigned gpio);
+extern int pinmux_gpio_direction_input(unsigned gpio);
+extern int pinmux_gpio_direction_output(unsigned gpio);
 extern struct pinmux * __must_check pinmux_get(struct device *dev, const char *name);
 extern void pinmux_put(struct pinmux *pmx);
 extern int pinmux_enable(struct pinmux *pmx);
@@ -94,6 +109,16 @@ static inline void pinmux_free_gpio(unsigned gpio)
 {
 }
 
+static inline int pinmux_gpio_direction_input(unsigned gpio)
+{
+       return 0;
+}
+
+static inline int pinmux_gpio_direction_output(unsigned gpio)
+{
+       return 0;
+}
+
 static inline struct pinmux * __must_check pinmux_get(struct device *dev, const char *name)
 {
        return NULL;
index 6d9e575519cc1c4293372fbeb6f6761551298f38..85c5073062390b3b1129f1d5af9ec0c8ed1c4f5e 100644 (file)
@@ -253,7 +253,7 @@ extern const struct proc_ns_operations utsns_operations;
 extern const struct proc_ns_operations ipcns_operations;
 
 union proc_op {
-       int (*proc_get_link)(struct inode *, struct path *);
+       int (*proc_get_link)(struct dentry *, struct path *);
        int (*proc_read)(struct task_struct *task, char *page);
        int (*proc_show)(struct seq_file *m,
                struct pid_namespace *ns, struct pid *pid,
index f7756d146c6164560fd919f16699b0d601a09f26..f2698a0edfc44486a95bd8379b1fa40dd56e1e70 100644 (file)
@@ -149,6 +149,8 @@ int regulator_bulk_enable(int num_consumers,
                          struct regulator_bulk_data *consumers);
 int regulator_bulk_disable(int num_consumers,
                           struct regulator_bulk_data *consumers);
+int regulator_bulk_force_disable(int num_consumers,
+                          struct regulator_bulk_data *consumers);
 void regulator_bulk_free(int num_consumers,
                         struct regulator_bulk_data *consumers);
 
@@ -212,6 +214,11 @@ static inline int regulator_disable(struct regulator *regulator)
        return 0;
 }
 
+static inline int regulator_force_disable(struct regulator *regulator)
+{
+       return 0;
+}
+
 static inline int regulator_disable_deferred(struct regulator *regulator,
                                             int ms)
 {
@@ -242,6 +249,12 @@ static inline int regulator_bulk_disable(int num_consumers,
        return 0;
 }
 
+static inline int regulator_bulk_force_disable(int num_consumers,
+                                       struct regulator_bulk_data *consumers)
+{
+       return 0;
+}
+
 static inline void regulator_bulk_free(int num_consumers,
                                       struct regulator_bulk_data *consumers)
 {
index 52c89ae32f64c8829dd68162d65b3dc2ae135afd..4214b9a9d1c968d05714a37c7d15cbdab905bf44 100644 (file)
@@ -154,6 +154,7 @@ enum regulator_type {
  * this type.
  *
  * @name: Identifying name for the regulator.
+ * @supply_name: Identifying the regulator supply
  * @id: Numerical identifier for the regulator.
  * @n_voltages: Number of selectors available for ops.list_voltage().
  * @ops: Regulator operations table.
@@ -163,6 +164,7 @@ enum regulator_type {
  */
 struct regulator_desc {
        const char *name;
+       const char *supply_name;
        int id;
        unsigned n_voltages;
        struct regulator_ops *ops;
@@ -212,7 +214,7 @@ struct regulator_dev {
 
 struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
        struct device *dev, const struct regulator_init_data *init_data,
-       void *driver_data);
+       void *driver_data, struct device_node *of_node);
 void regulator_unregister(struct regulator_dev *rdev);
 
 int regulator_notifier_call_chain(struct regulator_dev *rdev,
diff --git a/include/linux/regulator/of_regulator.h b/include/linux/regulator/of_regulator.h
new file mode 100644 (file)
index 0000000..769704f
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * OpenFirmware regulator support routines
+ *
+ */
+
+#ifndef __LINUX_OF_REG_H
+#define __LINUX_OF_REG_H
+
+#if defined(CONFIG_OF)
+extern struct regulator_init_data
+       *of_get_regulator_init_data(struct device *dev,
+                                   struct device_node *node);
+#else
+static inline struct regulator_init_data
+       *of_get_regulator_init_data(struct device *dev,
+                                   struct device_node *node)
+{
+       return NULL;
+}
+#endif /* CONFIG_OF */
+
+#endif /* __LINUX_OF_REG_H */
index 2148b122779b5a2fd8421c5e15cfc9ca91ea85ff..1afb9954bbf125fc3da62e2d9ef631b6508b5b89 100644 (file)
@@ -120,6 +120,7 @@ void anon_vma_init(void);   /* create anon_vma_cachep */
 int  anon_vma_prepare(struct vm_area_struct *);
 void unlink_anon_vmas(struct vm_area_struct *);
 int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
+void anon_vma_moveto_tail(struct vm_area_struct *);
 int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
 void __anon_vma_link(struct vm_area_struct *);
 
index f044f66018f2fa319347c7eb1365631ece58730b..21cd0303af5107ef944b27c334750d6cedfb13ef 100644 (file)
@@ -1544,6 +1544,7 @@ struct task_struct {
         */
        int nr_dirtied;
        int nr_dirtied_pause;
+       unsigned long dirty_paused_when; /* start of a write-and-pause period */
 
 #ifdef CONFIG_LATENCYTOP
        int latency_record_count;
index a822300a253b0d2be69b477be3b078448ec25017..7987ce74874b366b413ef6191b67caf02b42aba4 100644 (file)
@@ -254,6 +254,7 @@ extern void set_current_blocked(const sigset_t *);
 extern int show_unhandled_signals;
 
 extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie);
+extern void block_sigmask(struct k_sigaction *ka, int signr);
 extern void exit_signals(struct task_struct *tsk);
 
 extern struct kmem_cache *sighand_cachep;
index febc4dbec2cad59adc05d3f97e39c8098b78aea4..7874a8a566386a02165ebc0bff474d8bf160f0c0 100644 (file)
@@ -26,6 +26,7 @@ struct auth_cred {
        uid_t   uid;
        gid_t   gid;
        struct group_info *group_info;
+       const char *principal;
        unsigned char machine_cred : 1;
 };
 
@@ -127,7 +128,7 @@ void                        rpc_destroy_generic_auth(void);
 void                   rpc_destroy_authunix(void);
 
 struct rpc_cred *      rpc_lookup_cred(void);
-struct rpc_cred *      rpc_lookup_machine_cred(void);
+struct rpc_cred *      rpc_lookup_machine_cred(const char *service_name);
 int                    rpcauth_register(const struct rpc_authops *);
 int                    rpcauth_unregister(const struct rpc_authops *);
 struct rpc_auth *      rpcauth_create(rpc_authflavor_t, struct rpc_clnt *);
index 8eee9dbbfe7aaddbdb5aaebfbaf5ad82669f846d..f1cfd4c85cd047c4b2fadd367eeb819aabc57d29 100644 (file)
@@ -82,8 +82,8 @@ struct gss_cred {
        enum rpc_gss_svc        gc_service;
        struct gss_cl_ctx __rcu *gc_ctx;
        struct gss_upcall_msg   *gc_upcall;
+       const char              *gc_principal;
        unsigned long           gc_upcall_timestamp;
-       unsigned char           gc_machine_cred : 1;
 };
 
 #endif /* __KERNEL__ */
index a20970ef9e4ebbf5b95aa224d11d30cef7ca7411..af70af3335461ee587d23a8567a9abdb9cc23eb9 100644 (file)
@@ -191,6 +191,8 @@ extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
                             struct xdr_array2_desc *desc);
 extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
                             struct xdr_array2_desc *desc);
+extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
+                            size_t len);
 
 /*
  * Provide some simple tools for XDR buffer overflow-checking etc.
index 1e22e126d2acc7a880f449c5241c3a56e901e519..06061a7f8e69131e2401985e243298cbf9795a50 100644 (file)
@@ -207,6 +207,7 @@ struct swap_list_t {
 /* linux/mm/page_alloc.c */
 extern unsigned long totalram_pages;
 extern unsigned long totalreserve_pages;
+extern unsigned long dirty_balance_reserve;
 extern unsigned int nr_free_buffer_pages(void);
 extern unsigned int nr_free_pagecache_pages(void);
 
index 445702c60d0468c38a14aa0624e3bb1c1df29a5c..e872526fdc5fbb371ca0e420a961aec4738fdfa2 100644 (file)
@@ -24,7 +24,7 @@ extern int swiotlb_force;
 
 extern void swiotlb_init(int verbose);
 extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
-extern unsigned long swioltb_nr_tbl(void);
+extern unsigned long swiotlb_nr_tbl(void);
 
 /*
  * Enumeration for sync targets
index 5c75153f94414d37e64607d350f395549146881d..d21b33c4c6ca8a5ec5a858de75a0e2537dcf6bf3 100644 (file)
@@ -96,13 +96,11 @@ struct ucb1400_gpio {
 
 struct ucb1400_ts {
        struct input_dev        *ts_idev;
-       struct task_struct      *ts_task;
        int                     id;
-       wait_queue_head_t       ts_wait;
-       unsigned int            ts_restart:1;
        int                     irq;
-       unsigned int            irq_pending;    /* not bit field shared */
        struct snd_ac97         *ac97;
+       wait_queue_head_t       ts_wait;
+       bool                    stopped;
 };
 
 struct ucb1400 {
index 111843f88b2a97dc87434631335ab95b6e786b45..43ba5b3ce2a3ac6be68b8aacd2b3740953113b25 100644 (file)
@@ -53,11 +53,7 @@ struct watchdog_info {
 
 #ifdef __KERNEL__
 
-#ifdef CONFIG_WATCHDOG_NOWAYOUT
-#define WATCHDOG_NOWAYOUT      1
-#else
-#define WATCHDOG_NOWAYOUT      0
-#endif
+#include <linux/bitops.h>
 
 struct watchdog_ops;
 struct watchdog_device;
@@ -122,6 +118,21 @@ struct watchdog_device {
 #define WDOG_NO_WAY_OUT                3       /* Is 'nowayout' feature set ? */
 };
 
+#ifdef CONFIG_WATCHDOG_NOWAYOUT
+#define WATCHDOG_NOWAYOUT              1
+#define WATCHDOG_NOWAYOUT_INIT_STATUS  (1 << WDOG_NO_WAY_OUT)
+#else
+#define WATCHDOG_NOWAYOUT              0
+#define WATCHDOG_NOWAYOUT_INIT_STATUS  0
+#endif
+
+/* Use the following function to set the nowayout feature */
+static inline void watchdog_set_nowayout(struct watchdog_device *wdd, int nowayout)
+{
+       if (nowayout)
+               set_bit(WDOG_NO_WAY_OUT, &wdd->status);
+}
+
 /* Use the following functions to manipulate watchdog driver specific data */
 static inline void watchdog_set_drvdata(struct watchdog_device *wdd, void *data)
 {
index 0d556deb497b9988f14ac9322beb8cd815a85211..eb8b9f15f2e03b24ec6ca89b188ee9b955478268 100644 (file)
@@ -297,32 +297,50 @@ extern struct workqueue_struct *system_unbound_wq;
 extern struct workqueue_struct *system_freezable_wq;
 
 extern struct workqueue_struct *
-__alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
-                     struct lock_class_key *key, const char *lock_name);
+__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
+       struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
 
+/**
+ * alloc_workqueue - allocate a workqueue
+ * @fmt: printf format for the name of the workqueue
+ * @flags: WQ_* flags
+ * @max_active: max in-flight work items, 0 for default
+ * @args: args for @fmt
+ *
+ * Allocate a workqueue with the specified parameters.  For detailed
+ * information on WQ_* flags, please refer to Documentation/workqueue.txt.
+ *
+ * The __lock_name macro dance is to guarantee that single lock_class_key
+ * doesn't end up with different namesm, which isn't allowed by lockdep.
+ *
+ * RETURNS:
+ * Pointer to the allocated workqueue on success, %NULL on failure.
+ */
 #ifdef CONFIG_LOCKDEP
-#define alloc_workqueue(name, flags, max_active)               \
+#define alloc_workqueue(fmt, flags, max_active, args...)       \
 ({                                                             \
        static struct lock_class_key __key;                     \
        const char *__lock_name;                                \
                                                                \
-       if (__builtin_constant_p(name))                         \
-               __lock_name = (name);                           \
+       if (__builtin_constant_p(fmt))                          \
+               __lock_name = (fmt);                            \
        else                                                    \
-               __lock_name = #name;                            \
+               __lock_name = #fmt;                             \
                                                                \
-       __alloc_workqueue_key((name), (flags), (max_active),    \
-                             &__key, __lock_name);             \
+       __alloc_workqueue_key((fmt), (flags), (max_active),     \
+                             &__key, __lock_name, ##args);     \
 })
 #else
-#define alloc_workqueue(name, flags, max_active)               \
-       __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
+#define alloc_workqueue(fmt, flags, max_active, args...)       \
+       __alloc_workqueue_key((fmt), (flags), (max_active),     \
+                             NULL, NULL, ##args)
 #endif
 
 /**
  * alloc_ordered_workqueue - allocate an ordered workqueue
- * @name: name of the workqueue
+ * @fmt: printf format for the name of the workqueue
  * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
+ * @args: args for @fmt
  *
  * Allocate an ordered workqueue.  An ordered workqueue executes at
  * most one work item at any given time in the queued order.  They are
@@ -331,11 +349,8 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
  * RETURNS:
  * Pointer to the allocated workqueue on success, %NULL on failure.
  */
-static inline struct workqueue_struct *
-alloc_ordered_workqueue(const char *name, unsigned int flags)
-{
-       return alloc_workqueue(name, WQ_UNBOUND | flags, 1);
-}
+#define alloc_ordered_workqueue(fmt, flags, args...)           \
+       alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
 
 #define create_workqueue(name)                                 \
        alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
index a378c295851f8cf9fe4d804080b30f72cf5101cd..995b8bf630aca90e6c29f0f0d919e95c51586c10 100644 (file)
@@ -7,6 +7,8 @@
 #include <linux/sched.h>
 #include <linux/fs.h>
 
+DECLARE_PER_CPU(int, dirty_throttle_leaks);
+
 /*
  * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
  *
 #define DIRTY_SCOPE            8
 #define DIRTY_FULL_SCOPE       (DIRTY_SCOPE / 2)
 
-/*
- * 4MB minimal write chunk size
- */
-#define MIN_WRITEBACK_PAGES    (4096UL >> (PAGE_CACHE_SHIFT - 10))
-
 struct backing_dev_info;
 
 /*
@@ -124,6 +121,7 @@ void laptop_mode_timer_fn(unsigned long data);
 static inline void laptop_sync_completion(void) { }
 #endif
 void throttle_vm_writeout(gfp_t gfp_mask);
+bool zone_dirty_ok(struct zone *zone);
 
 extern unsigned long global_dirty_limit;
 
@@ -138,8 +136,6 @@ extern int vm_highmem_is_dirtyable;
 extern int block_dump;
 extern int laptop_mode;
 
-extern unsigned long determine_dirtyable_memory(void);
-
 extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
                loff_t *ppos);
@@ -195,6 +191,8 @@ void writeback_set_ratelimit(void);
 void tag_pages_for_writeback(struct address_space *mapping,
                             pgoff_t start, pgoff_t end);
 
+void account_page_redirty(struct page *page);
+
 /* pdflush.c */
 extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
                                   read-only. */
index 1a7e1d20adf9ee657044935a5bfc43874d585bb7..36eace03b2ac79229984b09254db9aed3e3b8d5f 100644 (file)
@@ -198,7 +198,8 @@ struct otp_info {
 #define MEMISLOCKED            _IOR('M', 23, struct erase_info_user)
 /*
  * Most generic write interface; can write in-band and/or out-of-band in various
- * modes (see "struct mtd_write_req")
+ * modes (see "struct mtd_write_req"). This ioctl is not supported for flashes
+ * without OOB, e.g., NOR flash.
  */
 #define MEMWRITE               _IOWR('M', 24, struct mtd_write_req)
 
index 2d70b95b3b555276a0c0f576d2334c6837b2d2fa..7184853ca36015c52d8c42573346e4edce41f1c0 100644 (file)
@@ -63,30 +63,16 @@ enum p9_debug_flags {
 
 #ifdef CONFIG_NET_9P_DEBUG
 extern unsigned int p9_debug_level;
-
-#define P9_DPRINTK(level, format, arg...) \
-do {  \
-       if ((p9_debug_level & level) == level) {\
-               if (level == P9_DEBUG_9P) \
-                       printk(KERN_NOTICE "(%8.8d) " \
-                       format , task_pid_nr(current) , ## arg); \
-               else \
-                       printk(KERN_NOTICE "-- %s (%d): " \
-                       format , __func__, task_pid_nr(current) , ## arg); \
-       } \
-} while (0)
-
+__printf(3, 4)
+void _p9_debug(enum p9_debug_flags level, const char *func,
+              const char *fmt, ...);
+#define p9_debug(level, fmt, ...)                      \
+       _p9_debug(level, __func__, fmt, ##__VA_ARGS__)
 #else
-#define P9_DPRINTK(level, format, arg...)  do { } while (0)
+#define p9_debug(level, fmt, ...)                      \
+       no_printk(fmt, ##__VA_ARGS__)
 #endif
 
-
-#define P9_EPRINTK(level, format, arg...) \
-do { \
-       printk(level "9p: %s (%d): " \
-               format , __func__, task_pid_nr(current), ## arg); \
-} while (0)
-
 /**
  * enum p9_msg_t - 9P message types
  * @P9_TLERROR: not used
index 5591ed54dc93ad67650c1dad43d0aae9b9b60b25..77273f2fdd8020939e35a4e4f3d567d013eadc63 100644 (file)
@@ -185,7 +185,6 @@ typedef void (*activate_complete)(void *, int);
 struct scsi_device_handler {
        /* Used by the infrastructure */
        struct list_head list; /* list of scsi_device_handlers */
-       int idx;
 
        /* Filled by the hardware handler */
        struct module *module;
index 50266c9405fced26eb7a48f386d1889c1f3a6253..5f7d5b3b1c6eaa82c73b3ffcf85321d26f1c705b 100644 (file)
@@ -669,6 +669,9 @@ struct Scsi_Host {
        /* Asynchronous scan in progress */
        unsigned async_scan:1;
 
+       /* Don't resume host in EH */
+       unsigned eh_noresume:1;
+
        /*
         * Optional work queue to be utilized by the transport
         */
index 87f34c3d447d0aa705e344b9132166205c4cbbec..2c3a46d102fd52db05ce500b037d7b72b9d184c1 100644 (file)
@@ -211,6 +211,11 @@ struct iscsi_cls_session {
        unsigned int target_id;
        bool ida_used;
 
+       /*
+        * pid of userspace process that created session or -1 if
+        * created by the kernel.
+        */
+       pid_t creator;
        int state;
        int sid;                                /* session id */
        void *dd_data;                          /* LLD private data */
index 748ff7cbe5557989e6fd40fcacb70e5b8760956f..319538bf17d219d1bb3c11525c94cecdf669b978 100644 (file)
@@ -573,9 +573,9 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
 );
 
 TRACE_EVENT(ext4_mb_release_group_pa,
-       TP_PROTO(struct ext4_prealloc_space *pa),
+       TP_PROTO(struct super_block *sb, struct ext4_prealloc_space *pa),
 
-       TP_ARGS(pa),
+       TP_ARGS(sb, pa),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
@@ -585,7 +585,7 @@ TRACE_EVENT(ext4_mb_release_group_pa,
        ),
 
        TP_fast_assign(
-               __entry->dev            = pa->pa_inode->i_sb->s_dev;
+               __entry->dev            = sb->s_dev;
                __entry->pa_pstart      = pa->pa_pstart;
                __entry->pa_len         = pa->pa_len;
        ),
index a9c87ad8331c61de25b9d2a51489de398a03d306..5f889f16b0c891dbedafd65686c48688aa7110da 100644 (file)
@@ -147,7 +147,7 @@ DEFINE_EVENT(kmem_free, kmem_cache_free,
        TP_ARGS(call_site, ptr)
 );
 
-TRACE_EVENT(mm_page_free_direct,
+TRACE_EVENT(mm_page_free,
 
        TP_PROTO(struct page *page, unsigned int order),
 
@@ -169,7 +169,7 @@ TRACE_EVENT(mm_page_free_direct,
                        __entry->order)
 );
 
-TRACE_EVENT(mm_pagevec_free,
+TRACE_EVENT(mm_page_free_batched,
 
        TP_PROTO(struct page *page, int cold),
 
diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h
new file mode 100644 (file)
index 0000000..dd4ba3b
--- /dev/null
@@ -0,0 +1,33 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM oom
+
+#if !defined(_TRACE_OOM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_OOM_H
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(oom_score_adj_update,
+
+       TP_PROTO(struct task_struct *task),
+
+       TP_ARGS(task),
+
+       TP_STRUCT__entry(
+               __field(        pid_t,  pid)
+               __array(        char,   comm,   TASK_COMM_LEN )
+               __field(         int,   oom_score_adj)
+       ),
+
+       TP_fast_assign(
+               __entry->pid = task->pid;
+               memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+               __entry->oom_score_adj = task->signal->oom_score_adj;
+       ),
+
+       TP_printk("pid=%d comm=%s oom_score_adj=%d",
+               __entry->pid, __entry->comm, __entry->oom_score_adj)
+);
+
+#endif
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/task.h b/include/trace/events/task.h
new file mode 100644 (file)
index 0000000..b53add0
--- /dev/null
@@ -0,0 +1,61 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM task
+
+#if !defined(_TRACE_TASK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_TASK_H
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(task_newtask,
+
+       TP_PROTO(struct task_struct *task, unsigned long clone_flags),
+
+       TP_ARGS(task, clone_flags),
+
+       TP_STRUCT__entry(
+               __field(        pid_t,  pid)
+               __array(        char,   comm, TASK_COMM_LEN)
+               __field( unsigned long, clone_flags)
+               __field(        int,    oom_score_adj)
+       ),
+
+       TP_fast_assign(
+               __entry->pid = task->pid;
+               memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+               __entry->clone_flags = clone_flags;
+               __entry->oom_score_adj = task->signal->oom_score_adj;
+       ),
+
+       TP_printk("pid=%d comm=%s clone_flags=%lx oom_score_adj=%d",
+               __entry->pid, __entry->comm,
+               __entry->clone_flags, __entry->oom_score_adj)
+);
+
+TRACE_EVENT(task_rename,
+
+       TP_PROTO(struct task_struct *task, char *comm),
+
+       TP_ARGS(task, comm),
+
+       TP_STRUCT__entry(
+               __field(        pid_t,  pid)
+               __array(        char, oldcomm,  TASK_COMM_LEN)
+               __array(        char, newcomm,  TASK_COMM_LEN)
+               __field(        int, oom_score_adj)
+       ),
+
+       TP_fast_assign(
+               __entry->pid = task->pid;
+               memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
+               memcpy(entry->newcomm, comm, TASK_COMM_LEN);
+               __entry->oom_score_adj = task->signal->oom_score_adj;
+       ),
+
+       TP_printk("pid=%d oldcomm=%s newcomm=%s oom_score_adj=%d",
+               __entry->pid, __entry->oldcomm,
+               __entry->newcomm, __entry->oom_score_adj)
+);
+
+#endif
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 99d1d0decf88e41a7c0c038d463330e8351046b1..8588a891802339a2939dfc91e5630e3a826781d4 100644 (file)
@@ -300,12 +300,13 @@ TRACE_EVENT(balance_dirty_pages,
                 unsigned long dirty_ratelimit,
                 unsigned long task_ratelimit,
                 unsigned long dirtied,
+                unsigned long period,
                 long pause,
                 unsigned long start_time),
 
        TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
                dirty_ratelimit, task_ratelimit,
-               dirtied, pause, start_time),
+               dirtied, period, pause, start_time),
 
        TP_STRUCT__entry(
                __array(         char,  bdi, 32)
@@ -320,6 +321,8 @@ TRACE_EVENT(balance_dirty_pages,
                __field(unsigned int,   dirtied_pause)
                __field(unsigned long,  paused)
                __field(         long,  pause)
+               __field(unsigned long,  period)
+               __field(         long,  think)
        ),
 
        TP_fast_assign(
@@ -336,6 +339,9 @@ TRACE_EVENT(balance_dirty_pages,
                __entry->task_ratelimit = KBps(task_ratelimit);
                __entry->dirtied        = dirtied;
                __entry->dirtied_pause  = current->nr_dirtied_pause;
+               __entry->think          = current->dirty_paused_when == 0 ? 0 :
+                        (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
+               __entry->period         = period * 1000 / HZ;
                __entry->pause          = pause * 1000 / HZ;
                __entry->paused         = (jiffies - start_time) * 1000 / HZ;
        ),
@@ -346,7 +352,7 @@ TRACE_EVENT(balance_dirty_pages,
                  "bdi_setpoint=%lu bdi_dirty=%lu "
                  "dirty_ratelimit=%lu task_ratelimit=%lu "
                  "dirtied=%u dirtied_pause=%u "
-                 "paused=%lu pause=%ld",
+                 "paused=%lu pause=%ld period=%lu think=%ld",
                  __entry->bdi,
                  __entry->limit,
                  __entry->setpoint,
@@ -358,7 +364,9 @@ TRACE_EVENT(balance_dirty_pages,
                  __entry->dirtied,
                  __entry->dirtied_pause,
                  __entry->paused,      /* ms */
-                 __entry->pause        /* ms */
+                 __entry->pause,       /* ms */
+                 __entry->period,      /* ms */
+                 __entry->think        /* ms */
          )
 );
 
index d287997d3eab3ab9eeea0b802bf41eaa9238f97c..0f773708e02c034f624ca61a3e186bb8e9d68080 100644 (file)
@@ -37,6 +37,13 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
  */
 void unbind_from_irqhandler(unsigned int irq, void *dev_id);
 
+/*
+ * Allow extra references to event channels exposed to userspace by evtchn
+ */
+int evtchn_make_refcounted(unsigned int evtchn);
+int evtchn_get(unsigned int evtchn);
+void evtchn_put(unsigned int evtchn);
+
 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
 int resend_irq_on_evtchn(unsigned int irq);
 void rebind_evtchn_irq(int evtchn, int irq);
index 11e2dfce42f82ecca25fec9e8e6508d47d3615c4..15f8a00ff003953639a4caba0e97109b8db06b36 100644 (file)
@@ -62,6 +62,24 @@ int gnttab_resume(void);
 
 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
                                int readonly);
+int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame,
+                                       int flags, unsigned page_off,
+                                       unsigned length);
+int gnttab_grant_foreign_access_trans(domid_t domid, int flags,
+                                     domid_t trans_domid,
+                                     grant_ref_t trans_gref);
+
+/*
+ * Are sub-page grants available on this version of Xen?  Returns true if they
+ * are, and false if they're not.
+ */
+bool gnttab_subpage_grants_available(void);
+
+/*
+ * Are transitive grants available on this version of Xen?  Returns true if they
+ * are, and false if they're not.
+ */
+bool gnttab_trans_grants_available(void);
 
 /*
  * End access through the given grant reference, iff the grant entry is no
@@ -108,6 +126,13 @@ void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
 
 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
                                     unsigned long frame, int readonly);
+int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid,
+                                           unsigned long frame, int flags,
+                                           unsigned page_off,
+                                           unsigned length);
+int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid,
+                                         int flags, domid_t trans_domid,
+                                         grant_ref_t trans_gref);
 
 void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
                                       unsigned long pfn);
@@ -145,9 +170,11 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
 
 int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
                           unsigned long max_nr_gframes,
-                          struct grant_entry **__shared);
-void arch_gnttab_unmap_shared(struct grant_entry *shared,
-                             unsigned long nr_gframes);
+                          void **__shared);
+int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
+                          unsigned long max_nr_gframes,
+                          grant_status_t **__shared);
+void arch_gnttab_unmap(void *shared, unsigned long nr_gframes);
 
 extern unsigned long xen_hvm_resume_frames;
 unsigned int gnttab_max_grant_frames(void);
@@ -155,9 +182,9 @@ unsigned int gnttab_max_grant_frames(void);
 #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
 
 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
-                       struct gnttab_map_grant_ref *kmap_ops,
+                   struct gnttab_map_grant_ref *kmap_ops,
                    struct page **pages, unsigned int count);
 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
-                     struct page **pages, unsigned int count);
+                     struct page **pages, unsigned int count, bool clear_pte);
 
 #endif /* __ASM_GNTTAB_H__ */
index 39e571796e324fac9782cf75fe27563bf6be0f78..a17d84433e6a1ab54e660fc4f25493a180ce15f2 100644 (file)
  *  Use SMP-safe bit-setting instruction.
  */
 
+/*
+ * Reference to a grant entry in a specified domain's grant table.
+ */
+typedef uint32_t grant_ref_t;
+
 /*
  * A grant table comprises a packed array of grant entries in one or more
  * page frames shared between Xen and a guest.
  * [XEN]: This field is written by Xen and read by the sharing guest.
  * [GST]: This field is written by the guest and read by Xen.
  */
-struct grant_entry {
+
+/*
+ * Version 1 of the grant table entry structure is maintained purely
+ * for backwards compatibility.  New guests should use version 2.
+ */
+struct grant_entry_v1 {
     /* GTF_xxx: various type and flag information.  [XEN,GST] */
     uint16_t flags;
     /* The domain being granted foreign privileges. [GST] */
@@ -108,10 +118,13 @@ struct grant_entry {
  *  GTF_permit_access: Allow @domid to map/access @frame.
  *  GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
  *                       to this guest. Xen writes the page number to @frame.
+ *  GTF_transitive: Allow @domid to transitively access a subrange of
+ *                  @trans_grant in @trans_domid.  No mappings are allowed.
  */
 #define GTF_invalid         (0U<<0)
 #define GTF_permit_access   (1U<<0)
 #define GTF_accept_transfer (2U<<0)
+#define GTF_transitive      (3U<<0)
 #define GTF_type_mask       (3U<<0)
 
 /*
@@ -119,6 +132,9 @@ struct grant_entry {
  *  GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
  *  GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
  *  GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
+ *  GTF_sub_page: Grant access to only a subrange of the page.  @domid
+ *                will only be allowed to copy from the grant, and not
+ *                map it. [GST]
  */
 #define _GTF_readonly       (2)
 #define GTF_readonly        (1U<<_GTF_readonly)
@@ -126,6 +142,8 @@ struct grant_entry {
 #define GTF_reading         (1U<<_GTF_reading)
 #define _GTF_writing        (4)
 #define GTF_writing         (1U<<_GTF_writing)
+#define _GTF_sub_page       (8)
+#define GTF_sub_page        (1U<<_GTF_sub_page)
 
 /*
  * Subflags for GTF_accept_transfer:
@@ -142,15 +160,81 @@ struct grant_entry {
 #define _GTF_transfer_completed (3)
 #define GTF_transfer_completed  (1U<<_GTF_transfer_completed)
 
+/*
+ * Version 2 grant table entries.  These fulfil the same role as
+ * version 1 entries, but can represent more complicated operations.
+ * Any given domain will have either a version 1 or a version 2 table,
+ * and every entry in the table will be the same version.
+ *
+ * The interface by which domains use grant references does not depend
+ * on the grant table version in use by the other domain.
+ */
 
-/***********************************
- * GRANT TABLE QUERIES AND USES
+/*
+ * Version 1 and version 2 grant entries share a common prefix.  The
+ * fields of the prefix are documented as part of struct
+ * grant_entry_v1.
  */
+struct grant_entry_header {
+    uint16_t flags;
+    domid_t  domid;
+};
 
 /*
- * Reference to a grant entry in a specified domain's grant table.
+ * Version 2 of the grant entry structure, here is an union because three
+ * different types are suppotted: full_page, sub_page and transitive.
+ */
+union grant_entry_v2 {
+    struct grant_entry_header hdr;
+
+    /*
+     * This member is used for V1-style full page grants, where either:
+     *
+     * -- hdr.type is GTF_accept_transfer, or
+     * -- hdr.type is GTF_permit_access and GTF_sub_page is not set.
+     *
+     * In that case, the frame field has the same semantics as the
+     * field of the same name in the V1 entry structure.
+     */
+    struct {
+       struct grant_entry_header hdr;
+       uint32_t pad0;
+       uint64_t frame;
+    } full_page;
+
+    /*
+     * If the grant type is GTF_grant_access and GTF_sub_page is set,
+     * @domid is allowed to access bytes [@page_off,@page_off+@length)
+     * in frame @frame.
+     */
+    struct {
+       struct grant_entry_header hdr;
+       uint16_t page_off;
+       uint16_t length;
+       uint64_t frame;
+    } sub_page;
+
+    /*
+     * If the grant is GTF_transitive, @domid is allowed to use the
+     * grant @gref in domain @trans_domid, as if it was the local
+     * domain.  Obviously, the transitive access must be compatible
+     * with the original grant.
+     */
+    struct {
+       struct grant_entry_header hdr;
+       domid_t trans_domid;
+       uint16_t pad0;
+       grant_ref_t gref;
+    } transitive;
+
+    uint32_t __spacer[4]; /* Pad to a power of two */
+};
+
+typedef uint16_t grant_status_t;
+
+/***********************************
+ * GRANT TABLE QUERIES AND USES
  */
-typedef uint32_t grant_ref_t;
 
 /*
  * Handle to track a mapping created via a grant reference.
@@ -321,6 +405,79 @@ struct gnttab_query_size {
 };
 DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size);
 
+/*
+ * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings
+ * tracked by <handle> but atomically replace the page table entry with one
+ * pointing to the machine address under <new_addr>.  <new_addr> will be
+ * redirected to the null entry.
+ * NOTES:
+ *  1. The call may fail in an undefined manner if either mapping is not
+ *     tracked by <handle>.
+ *  2. After executing a batch of unmaps, it is guaranteed that no stale
+ *     mappings will remain in the device or host TLBs.
+ */
+#define GNTTABOP_unmap_and_replace    7
+struct gnttab_unmap_and_replace {
+    /* IN parameters. */
+    uint64_t host_addr;
+    uint64_t new_addr;
+    grant_handle_t handle;
+    /* OUT parameters. */
+    int16_t  status;              /* GNTST_* */
+};
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_and_replace);
+
+/*
+ * GNTTABOP_set_version: Request a particular version of the grant
+ * table shared table structure.  This operation can only be performed
+ * once in any given domain.  It must be performed before any grants
+ * are activated; otherwise, the domain will be stuck with version 1.
+ * The only defined versions are 1 and 2.
+ */
+#define GNTTABOP_set_version          8
+struct gnttab_set_version {
+    /* IN parameters */
+    uint32_t version;
+};
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_set_version);
+
+/*
+ * GNTTABOP_get_status_frames: Get the list of frames used to store grant
+ * status for <dom>. In grant format version 2, the status is separated
+ * from the other shared grant fields to allow more efficient synchronization
+ * using barriers instead of atomic cmpexch operations.
+ * <nr_frames> specify the size of vector <frame_list>.
+ * The frame addresses are returned in the <frame_list>.
+ * Only <nr_frames> addresses are returned, even if the table is larger.
+ * NOTES:
+ *  1. <dom> may be specified as DOMID_SELF.
+ *  2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
+ */
+#define GNTTABOP_get_status_frames     9
+struct gnttab_get_status_frames {
+    /* IN parameters. */
+    uint32_t nr_frames;
+    domid_t  dom;
+    /* OUT parameters. */
+    int16_t  status;              /* GNTST_* */
+    GUEST_HANDLE(uint64_t) frame_list;
+};
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_status_frames);
+
+/*
+ * GNTTABOP_get_version: Get the grant table version which is in
+ * effect for domain <dom>.
+ */
+#define GNTTABOP_get_version          10
+struct gnttab_get_version {
+    /* IN parameters */
+    domid_t dom;
+    uint16_t pad;
+    /* OUT parameters */
+    uint32_t version;
+};
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version);
+
 /*
  * Bitfield values for update_pin_status.flags.
  */
index f6f07aa35af5f8b0e91347d9707ffd08eccd577f..7cdfca24eafbcc4bc9c1a95f192aaaed6aee5a81 100644 (file)
@@ -87,4 +87,7 @@ struct xenstore_domain_interface {
     XENSTORE_RING_IDX rsp_cons, rsp_prod;
 };
 
+/* Violating this is very bad.  See docs/misc/xenstore.txt. */
+#define XENSTORE_PAYLOAD_MAX 4096
+
 #endif /* _XS_WIRE_H */
index 6a6e91449347078a3ac2c3d66a10578e19e465fd..a890804945e3eda793ebe5e7b78a5d80fab974c0 100644 (file)
@@ -523,6 +523,8 @@ struct tmem_op {
        } u;
 };
 
+DEFINE_GUEST_HANDLE(u64);
+
 #else /* __ASSEMBLY__ */
 
 /* In assembly code we cannot use C numeric constant suffixes. */
index b1b6676c1c438485ad48b04db6f8d87f2ed3c432..e8c599b237c268de42faf4f26daf61dc8490d320 100644 (file)
@@ -85,8 +85,6 @@ struct xenbus_device_id
 
 /* A xenbus driver. */
 struct xenbus_driver {
-       char *name;
-       struct module *owner;
        const struct xenbus_device_id *ids;
        int (*probe)(struct xenbus_device *dev,
                     const struct xenbus_device_id *id);
@@ -101,31 +99,20 @@ struct xenbus_driver {
        int (*is_ready)(struct xenbus_device *dev);
 };
 
-static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
-{
-       return container_of(drv, struct xenbus_driver, driver);
+#define DEFINE_XENBUS_DRIVER(var, drvname, methods...)         \
+struct xenbus_driver var ## _driver = {                                \
+       .driver.name = drvname + 0 ?: var ## _ids->devicetype,  \
+       .driver.owner = THIS_MODULE,                            \
+       .ids = var ## _ids, ## methods                          \
 }
 
-int __must_check __xenbus_register_frontend(struct xenbus_driver *drv,
-                                           struct module *owner,
-                                           const char *mod_name);
-
-static inline int __must_check
-xenbus_register_frontend(struct xenbus_driver *drv)
+static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
 {
-       WARN_ON(drv->owner != THIS_MODULE);
-       return __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME);
+       return container_of(drv, struct xenbus_driver, driver);
 }
 
-int __must_check __xenbus_register_backend(struct xenbus_driver *drv,
-                                          struct module *owner,
-                                          const char *mod_name);
-static inline int __must_check
-xenbus_register_backend(struct xenbus_driver *drv)
-{
-       WARN_ON(drv->owner != THIS_MODULE);
-       return __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME);
-}
+int __must_check xenbus_register_frontend(struct xenbus_driver *);
+int __must_check xenbus_register_backend(struct xenbus_driver *);
 
 void xenbus_unregister_driver(struct xenbus_driver *drv);
 
diff --git a/include/xen/xenbus_dev.h b/include/xen/xenbus_dev.h
new file mode 100644 (file)
index 0000000..ac5f0fe
--- /dev/null
@@ -0,0 +1,41 @@
+/******************************************************************************
+ * evtchn.h
+ *
+ * Interface to /dev/xen/xenbus_backend.
+ *
+ * Copyright (c) 2011 Bastian Blank <waldi@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __LINUX_XEN_XENBUS_DEV_H__
+#define __LINUX_XEN_XENBUS_DEV_H__
+
+#include <linux/ioctl.h>
+
+#define IOCTL_XENBUS_BACKEND_EVTCHN                    \
+       _IOC(_IOC_NONE, 'B', 0, 0)
+
+#endif /* __LINUX_XEN_XENBUS_DEV_H__ */
index b2eee02e0f83fd720f38cf83a2b5e58ea27997b1..2974c8b3b351b4a15b38acca4478e4032fece9fa 100644 (file)
@@ -400,15 +400,42 @@ out:
 }
  
 #ifdef CONFIG_ROOT_NFS
+
+#define NFSROOT_TIMEOUT_MIN    5
+#define NFSROOT_TIMEOUT_MAX    30
+#define NFSROOT_RETRY_MAX      5
+
 static int __init mount_nfs_root(void)
 {
        char *root_dev, *root_data;
+       unsigned int timeout;
+       int try, err;
 
-       if (nfs_root_data(&root_dev, &root_data) != 0)
-               return 0;
-       if (do_mount_root(root_dev, "nfs", root_mountflags, root_data) != 0)
+       err = nfs_root_data(&root_dev, &root_data);
+       if (err != 0)
                return 0;
-       return 1;
+
+       /*
+        * The server or network may not be ready, so try several
+        * times.  Stop after a few tries in case the client wants
+        * to fall back to other boot methods.
+        */
+       timeout = NFSROOT_TIMEOUT_MIN;
+       for (try = 1; ; try++) {
+               err = do_mount_root(root_dev, "nfs",
+                                       root_mountflags, root_data);
+               if (err == 0)
+                       return 1;
+               if (try > NFSROOT_RETRY_MAX)
+                       break;
+
+               /* Wait, in case the server refused us immediately */
+               ssleep(timeout);
+               timeout <<= 1;
+               if (timeout > NFSROOT_TIMEOUT_MAX)
+                       timeout = NFSROOT_TIMEOUT_MAX;
+       }
+       return 0;
 }
 #endif
 
index 9a142a290749f2af819c47ac487f2192c6502675..9b7c8ab7d75cad27e92272c0a0c81927c9fbaa03 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/nsproxy.h>
 #include <linux/pid.h>
 #include <linux/ipc_namespace.h>
+#include <linux/user_namespace.h>
 #include <linux/slab.h>
 
 #include <net/sock.h>
@@ -542,9 +543,13 @@ static void __do_notify(struct mqueue_inode_info *info)
                        sig_i.si_errno = 0;
                        sig_i.si_code = SI_MESGQ;
                        sig_i.si_value = info->notify.sigev_value;
+                       /* map current pid/uid into info->owner's namespaces */
+                       rcu_read_lock();
                        sig_i.si_pid = task_tgid_nr_ns(current,
                                                ns_of_pid(info->notify_owner));
-                       sig_i.si_uid = current_uid();
+                       sig_i.si_uid = user_ns_map_uid(info->user->user_ns,
+                                               current_cred(), current_uid());
+                       rcu_read_unlock();
 
                        kill_pid_info(info->notify.sigev_signo,
                                      &sig_i, info->notify_owner);
index d9eab2e4b430a08a33a13e93f31a6d37a5d15794..94ed6e20bb532840e69070f3d623568e17cbe0c1 100644 (file)
@@ -51,6 +51,7 @@
 #include <trace/events/sched.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/oom.h>
+#include <linux/writeback.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -1035,6 +1036,8 @@ NORET_TYPE void do_exit(long code)
        validate_creds_for_do_exit(tsk);
 
        preempt_disable();
+       if (tsk->nr_dirtied)
+               __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
        exit_rcu();
        /* causes final put_task_struct in finish_task_switch(). */
        tsk->state = TASK_DEAD;
index b00711ce7c13222715f08b83a047afdcb1a5d02a..443f5125f11e39435929072e3abbd19d7a408ea9 100644 (file)
@@ -76,6 +76,9 @@
 
 #include <trace/events/sched.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/task.h>
+
 /*
  * Protected counters by write_lock_irq(&tasklist_lock)
  */
@@ -1291,6 +1294,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 
        p->nr_dirtied = 0;
        p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
+       p->dirty_paused_when = 0;
 
        /*
         * Ok, make it visible to the rest of the system.
@@ -1370,6 +1374,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        if (clone_flags & CLONE_THREAD)
                threadgroup_change_end(current);
        perf_event_fork(p);
+
+       trace_task_newtask(p, clone_flags);
+
        return p;
 
 bad_fork_free_pid:
index 30c3c770813275ed7c9f9a47ca47275a00fc5fbf..01d3b70fc98a346c3bf619016f12f1a7ed5d897a 100644 (file)
@@ -71,6 +71,7 @@ void jump_label_inc(struct jump_label_key *key)
        atomic_inc(&key->enabled);
        jump_label_unlock();
 }
+EXPORT_SYMBOL_GPL(jump_label_inc);
 
 static void __jump_label_dec(struct jump_label_key *key,
                unsigned long rate_limit, struct delayed_work *work)
@@ -86,6 +87,7 @@ static void __jump_label_dec(struct jump_label_key *key,
 
        jump_label_unlock();
 }
+EXPORT_SYMBOL_GPL(jump_label_dec);
 
 static void jump_label_update_timeout(struct work_struct *work)
 {
index cbe2c14413927c665f25d778fb03fde84af73421..1cf88900ec4fdc162b6ad1250d2c4d13d3895458 100644 (file)
@@ -858,6 +858,9 @@ static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
            PageReserved(page))
                return NULL;
 
+       if (page_is_guard(page))
+               return NULL;
+
        return page;
 }
 
@@ -920,6 +923,9 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn)
            && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
                return NULL;
 
+       if (page_is_guard(page))
+               return NULL;
+
        return page;
 }
 
index bb0efa5705ed3295bee197ae4dcbae8cfb5673f1..c73c4284160e1edd642a5fe2bd1d0067dca77539 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/freezer.h>
 #include <linux/pid_namespace.h>
 #include <linux/nsproxy.h>
+#include <linux/user_namespace.h>
 #define CREATE_TRACE_POINTS
 #include <trace/events/signal.h>
 
@@ -1019,6 +1020,34 @@ static inline int legacy_queue(struct sigpending *signals, int sig)
        return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
 }
 
+/*
+ * map the uid in struct cred into user namespace *ns
+ */
+static inline uid_t map_cred_ns(const struct cred *cred,
+                               struct user_namespace *ns)
+{
+       return user_ns_map_uid(ns, cred, cred->uid);
+}
+
+#ifdef CONFIG_USER_NS
+static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
+{
+       if (current_user_ns() == task_cred_xxx(t, user_ns))
+               return;
+
+       if (SI_FROMKERNEL(info))
+               return;
+
+       info->si_uid = user_ns_map_uid(task_cred_xxx(t, user_ns),
+                                       current_cred(), info->si_uid);
+}
+#else
+static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
+{
+       return;
+}
+#endif
+
 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
                        int group, int from_ancestor_ns)
 {
@@ -1088,6 +1117,9 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
                                q->info.si_pid = 0;
                        break;
                }
+
+               userns_fixup_signal_uid(&q->info, t);
+
        } else if (!is_si_special(info)) {
                if (sig >= SIGRTMIN && info->si_code != SI_USER) {
                        /*
@@ -1626,7 +1658,8 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
         */
        rcu_read_lock();
        info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
-       info.si_uid = __task_cred(tsk)->uid;
+       info.si_uid = map_cred_ns(__task_cred(tsk),
+                       task_cred_xxx(tsk->parent, user_ns));
        rcu_read_unlock();
 
        info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime);
@@ -1709,7 +1742,8 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
         */
        rcu_read_lock();
        info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
-       info.si_uid = __task_cred(tsk)->uid;
+       info.si_uid = map_cred_ns(__task_cred(tsk),
+                       task_cred_xxx(parent, user_ns));
        rcu_read_unlock();
 
        info.si_utime = cputime_to_clock_t(tsk->utime);
@@ -2125,8 +2159,11 @@ static int ptrace_signal(int signr, siginfo_t *info,
                info->si_signo = signr;
                info->si_errno = 0;
                info->si_code = SI_USER;
+               rcu_read_lock();
                info->si_pid = task_pid_vnr(current->parent);
-               info->si_uid = task_uid(current->parent);
+               info->si_uid = map_cred_ns(__task_cred(current->parent),
+                               current_user_ns());
+               rcu_read_unlock();
        }
 
        /* If the (new) signal is now blocked, requeue it.  */
@@ -2318,6 +2355,27 @@ relock:
        return signr;
 }
 
+/**
+ * block_sigmask - add @ka's signal mask to current->blocked
+ * @ka: action for @signr
+ * @signr: signal that has been successfully delivered
+ *
+ * This function should be called when a signal has succesfully been
+ * delivered. It adds the mask of signals for @ka to current->blocked
+ * so that they are blocked during the execution of the signal
+ * handler. In addition, @signr will be blocked unless %SA_NODEFER is
+ * set in @ka->sa.sa_flags.
+ */
+void block_sigmask(struct k_sigaction *ka, int signr)
+{
+       sigset_t blocked;
+
+       sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
+       if (!(ka->sa.sa_flags & SA_NODEFER))
+               sigaddset(&blocked, signr);
+       set_current_blocked(&blocked);
+}
+
 /*
  * It could be that complete_signal() picked us to notify about the
  * group-wide signal. Other threads should be notified now to take
index 42fa9ad0a810482494b5782f4ba9a23f4aa32b86..bec7b5b53e03db1d443a2f221255fd22344f4de4 100644 (file)
@@ -242,10 +242,10 @@ struct workqueue_struct {
 
        int                     nr_drainers;    /* W: drain in progress */
        int                     saved_max_active; /* W: saved cwq max_active */
-       const char              *name;          /* I: workqueue name */
 #ifdef CONFIG_LOCKDEP
        struct lockdep_map      lockdep_map;
 #endif
+       char                    name[];         /* I: workqueue name */
 };
 
 struct workqueue_struct *system_wq __read_mostly;
@@ -2954,14 +2954,29 @@ static int wq_clamp_max_active(int max_active, unsigned int flags,
        return clamp_val(max_active, 1, lim);
 }
 
-struct workqueue_struct *__alloc_workqueue_key(const char *name,
+struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
                                               unsigned int flags,
                                               int max_active,
                                               struct lock_class_key *key,
-                                              const char *lock_name)
+                                              const char *lock_name, ...)
 {
+       va_list args, args1;
        struct workqueue_struct *wq;
        unsigned int cpu;
+       size_t namelen;
+
+       /* determine namelen, allocate wq and format name */
+       va_start(args, lock_name);
+       va_copy(args1, args);
+       namelen = vsnprintf(NULL, 0, fmt, args) + 1;
+
+       wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
+       if (!wq)
+               goto err;
+
+       vsnprintf(wq->name, namelen, fmt, args1);
+       va_end(args);
+       va_end(args1);
 
        /*
         * Workqueues which may be used during memory reclaim should
@@ -2978,12 +2993,9 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
                flags |= WQ_HIGHPRI;
 
        max_active = max_active ?: WQ_DFL_ACTIVE;
-       max_active = wq_clamp_max_active(max_active, flags, name);
-
-       wq = kzalloc(sizeof(*wq), GFP_KERNEL);
-       if (!wq)
-               goto err;
+       max_active = wq_clamp_max_active(max_active, flags, wq->name);
 
+       /* init wq */
        wq->flags = flags;
        wq->saved_max_active = max_active;
        mutex_init(&wq->flush_mutex);
@@ -2991,7 +3003,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
        INIT_LIST_HEAD(&wq->flusher_queue);
        INIT_LIST_HEAD(&wq->flusher_overflow);
 
-       wq->name = name;
        lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
        INIT_LIST_HEAD(&wq->list);
 
@@ -3020,7 +3031,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
                if (!rescuer)
                        goto err;
 
-               rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
+               rescuer->task = kthread_create(rescuer_thread, wq, "%s",
+                                              wq->name);
                if (IS_ERR(rescuer->task))
                        goto err;
 
index f34be6417d712b2cf524de7e0897d1496520f8a6..7f6b8bca8c25b51bd0f528a8feb136d703c422ae 100644 (file)
@@ -19,6 +19,13 @@ config RATIONAL
 config GENERIC_FIND_FIRST_BIT
        bool
 
+config GENERIC_PCI_IOMAP
+       bool
+
+config GENERIC_IOMAP
+       bool
+       select GENERIC_PCI_IOMAP
+
 config CRC_CCITT
        tristate "CRC-CCITT functions"
        help
index c0ffaaff653474ad553ceec7ff26dfa6634b0792..884ed376164d2ea688d7251fa503a4e86db3ca61 100644 (file)
@@ -33,6 +33,7 @@ endif
 
 lib-$(CONFIG_HOTPLUG) += kobject_uevent.o
 obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
+obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
 obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
 obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
 obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
index 2a34392bcecc3680dfb10fbbc7ae126792aa6713..e5ec1e9c1aa52cc08c710a4dcc4c1815cad9c67b 100644 (file)
@@ -357,6 +357,7 @@ miss:
        }
        return NULL;
 }
+EXPORT_SYMBOL_GPL(btree_get_prev);
 
 static int getpos(struct btree_geo *geo, unsigned long *node,
                unsigned long *key)
index a6e633a48cea887fbba14b7c1cbab84bd485c67b..4b35d2b4437cc76b3b75b45ae2d21ac6076f7809 100644 (file)
@@ -51,20 +51,21 @@ static inline u32
 crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
 {
 # ifdef __LITTLE_ENDIAN
-#  define DO_CRC(x) crc = tab[0][(crc ^ (x)) & 255] ^ (crc >> 8)
-#  define DO_CRC4 crc = tab[3][(crc) & 255] ^ \
-               tab[2][(crc >> 8) & 255] ^ \
-               tab[1][(crc >> 16) & 255] ^ \
-               tab[0][(crc >> 24) & 255]
+#  define DO_CRC(x) crc = t0[(crc ^ (x)) & 255] ^ (crc >> 8)
+#  define DO_CRC4 crc = t3[(crc) & 255] ^ \
+               t2[(crc >> 8) & 255] ^ \
+               t1[(crc >> 16) & 255] ^ \
+               t0[(crc >> 24) & 255]
 # else
-#  define DO_CRC(x) crc = tab[0][((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
-#  define DO_CRC4 crc = tab[0][(crc) & 255] ^ \
-               tab[1][(crc >> 8) & 255] ^ \
-               tab[2][(crc >> 16) & 255] ^ \
-               tab[3][(crc >> 24) & 255]
+#  define DO_CRC(x) crc = t0[((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
+#  define DO_CRC4 crc = t0[(crc) & 255] ^ \
+               t1[(crc >> 8) & 255] ^  \
+               t2[(crc >> 16) & 255] ^ \
+               t3[(crc >> 24) & 255]
 # endif
        const u32 *b;
        size_t    rem_len;
+       const u32 *t0=tab[0], *t1=tab[1], *t2=tab[2], *t3=tab[3];
 
        /* Align it */
        if (unlikely((long)buf & 3 && len)) {
index 5dbcb4b2d864b244d97d289fb7c0f7bc18651ea0..ada922a808e6d328983fa5406284eacf3eb93cf4 100644 (file)
@@ -242,45 +242,11 @@ EXPORT_SYMBOL(ioport_unmap);
 #endif /* CONFIG_HAS_IOPORT */
 
 #ifdef CONFIG_PCI
-/**
- * pci_iomap - create a virtual mapping cookie for a PCI BAR
- * @dev: PCI device that owns the BAR
- * @bar: BAR number
- * @maxlen: length of the memory to map
- *
- * Using this function you will get a __iomem address to your device BAR.
- * You can access it using ioread*() and iowrite*(). These functions hide
- * the details if this is a MMIO or PIO address space and will just do what
- * you expect from them in the correct way.
- *
- * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR without checking for its length first, pass %0 here.
- * */
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
-       resource_size_t start = pci_resource_start(dev, bar);
-       resource_size_t len = pci_resource_len(dev, bar);
-       unsigned long flags = pci_resource_flags(dev, bar);
-
-       if (!len || !start)
-               return NULL;
-       if (maxlen && len > maxlen)
-               len = maxlen;
-       if (flags & IORESOURCE_IO)
-               return ioport_map(start, len);
-       if (flags & IORESOURCE_MEM) {
-               if (flags & IORESOURCE_CACHEABLE)
-                       return ioremap(start, len);
-               return ioremap_nocache(start, len);
-       }
-       /* What? */
-       return NULL;
-}
-
+/* Hide the details if this is a MMIO or PIO address space and just do what
+ * you expect in the correct way. */
 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
 {
        IO_COND(addr, /* nothing */, iounmap(addr));
 }
-EXPORT_SYMBOL(pci_iomap);
 EXPORT_SYMBOL(pci_iounmap);
 #endif /* CONFIG_PCI */
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
new file mode 100644 (file)
index 0000000..4b0fdc2
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Implement the default iomap interfaces
+ *
+ * (C) Copyright 2004 Linus Torvalds
+ */
+#include <linux/pci.h>
+#include <linux/io.h>
+
+#include <linux/export.h>
+
+#ifdef CONFIG_PCI
+/**
+ * pci_iomap - create a virtual mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @maxlen: length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR without checking for its length first, pass %0 here.
+ * */
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+       resource_size_t start = pci_resource_start(dev, bar);
+       resource_size_t len = pci_resource_len(dev, bar);
+       unsigned long flags = pci_resource_flags(dev, bar);
+
+       if (!len || !start)
+               return NULL;
+       if (maxlen && len > maxlen)
+               len = maxlen;
+       if (flags & IORESOURCE_IO)
+               return ioport_map(start, len);
+       if (flags & IORESOURCE_MEM) {
+               if (flags & IORESOURCE_CACHEABLE)
+                       return ioremap(start, len);
+               return ioremap_nocache(start, len);
+       }
+       /* What? */
+       return NULL;
+}
+
+EXPORT_SYMBOL(pci_iomap);
+#endif /* CONFIG_PCI */
index 99093b396145957d52b5e88a183ee1eb9cedeb60..058935ef39752e7f64e4a37baedba75fce76e4b8 100644 (file)
@@ -110,11 +110,11 @@ setup_io_tlb_npages(char *str)
 __setup("swiotlb=", setup_io_tlb_npages);
 /* make io_tlb_overflow tunable too? */
 
-unsigned long swioltb_nr_tbl(void)
+unsigned long swiotlb_nr_tbl(void)
 {
        return io_tlb_nslabs;
 }
-
+EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
 /* Note that this doesn't work with highmem page */
 static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
                                      volatile void *address)
@@ -321,6 +321,7 @@ void __init swiotlb_free(void)
                free_bootmem_late(__pa(io_tlb_start),
                                  PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
        }
+       io_tlb_nslabs = 0;
 }
 
 static int is_swiotlb_buffer(phys_addr_t paddr)
index 8b1a477162dc07242e7a461951bca291cff3dde7..4b2443254de260d2e05cdecb8d3eea903d91734f 100644 (file)
@@ -4,6 +4,7 @@ config DEBUG_PAGEALLOC
        depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
        depends on !KMEMCHECK
        select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
+       select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC
        ---help---
          Unmap pages from the kernel linear mapping after free_pages().
          This results in a large slowdown, but helps to find certain types
@@ -22,3 +23,7 @@ config WANT_PAGE_DEBUG_FLAGS
 config PAGE_POISONING
        bool
        select WANT_PAGE_DEBUG_FLAGS
+
+config PAGE_GUARD
+       bool
+       select WANT_PAGE_DEBUG_FLAGS
index 1a77012ecdb3c56a92d53ef5b779c9648d54c6a3..668e94df8cf23ab1fa68ab54ff0cd8542a4c460c 100644 (file)
@@ -56,7 +56,7 @@ early_param("bootmem_debug", bootmem_debug_setup);
 
 static unsigned long __init bootmap_bytes(unsigned long pages)
 {
-       unsigned long bytes = (pages + 7) / 8;
+       unsigned long bytes = DIV_ROUND_UP(pages, 8);
 
        return ALIGN(bytes, sizeof(long));
 }
@@ -171,7 +171,6 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
 
 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
 {
-       int aligned;
        struct page *page;
        unsigned long start, end, pages, count = 0;
 
@@ -181,14 +180,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
        start = bdata->node_min_pfn;
        end = bdata->node_low_pfn;
 
-       /*
-        * If the start is aligned to the machines wordsize, we might
-        * be able to free pages in bulks of that order.
-        */
-       aligned = !(start & (BITS_PER_LONG - 1));
-
-       bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
-               bdata - bootmem_node_data, start, end, aligned);
+       bdebug("nid=%td start=%lx end=%lx\n",
+               bdata - bootmem_node_data, start, end);
 
        while (start < end) {
                unsigned long *map, idx, vec;
@@ -196,12 +189,17 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
                map = bdata->node_bootmem_map;
                idx = start - bdata->node_min_pfn;
                vec = ~map[idx / BITS_PER_LONG];
-
-               if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) {
+               /*
+                * If we have a properly aligned and fully unreserved
+                * BITS_PER_LONG block of pages in front of us, free
+                * it in one go.
+                */
+               if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
                        int order = ilog2(BITS_PER_LONG);
 
                        __free_pages_bootmem(pfn_to_page(start), order);
                        count += BITS_PER_LONG;
+                       start += BITS_PER_LONG;
                } else {
                        unsigned long off = 0;
 
@@ -214,8 +212,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
                                vec >>= 1;
                                off++;
                        }
+                       start = ALIGN(start + 1, BITS_PER_LONG);
                }
-               start += BITS_PER_LONG;
        }
 
        page = virt_to_page(bdata->node_bootmem_map);
index 1253d7ac332b5031515b5a2f3e9ba4bb4956a27f..e6670c34eb4950b8953eeedbe02fe52f29ec84bf 100644 (file)
@@ -365,8 +365,10 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                nr_isolated++;
 
                /* Avoid isolating too much */
-               if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
+               if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
+                       ++low_pfn;
                        break;
+               }
        }
 
        acct_isolated(zone, cc);
index 8d723c9e8b75b316041ea564f5e243b79a94e2b7..469491e0af79fed994d48fd4ada817f8e50c6d6a 100644 (file)
@@ -117,7 +117,8 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
                break;
        case POSIX_FADV_DONTNEED:
                if (!bdi_write_congested(mapping->backing_dev_info))
-                       filemap_flush(mapping);
+                       __filemap_fdatawrite_range(mapping, offset, endbyte,
+                                                  WB_SYNC_NONE);
 
                /* First and last FULL page! */
                start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT;
index a0701e6eec107ae5247f41885e62a73efb4b4bdc..c4ee2e918bea1c0aa140fde22d20164ba8eb357a 100644 (file)
@@ -2351,8 +2351,11 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
                                        pgoff_t index, unsigned flags)
 {
        int status;
+       gfp_t gfp_mask;
        struct page *page;
        gfp_t gfp_notmask = 0;
+
+       gfp_mask = mapping_gfp_mask(mapping) | __GFP_WRITE;
        if (flags & AOP_FLAG_NOFS)
                gfp_notmask = __GFP_FS;
 repeat:
@@ -2360,7 +2363,7 @@ repeat:
        if (page)
                goto found;
 
-       page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
+       page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
        if (!page)
                return NULL;
        status = add_to_page_cache_lru(page, mapping, index,
index 7acd12503f734bfcaff2c4fed244e594952962ea..ea8c3a4cd2ae8acdf52a7a4e862e277f2390c265 100644 (file)
@@ -800,7 +800,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
 
        if (page && arch_prepare_hugepage(page)) {
                __free_pages(page, huge_page_order(h));
-               return NULL;
+               page = NULL;
        }
 
        spin_lock(&hugetlb_lock);
@@ -2315,8 +2315,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
         * from page cache lookup which is in HPAGE_SIZE units.
         */
        address = address & huge_page_mask(h);
-       pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
-               + (vma->vm_pgoff >> PAGE_SHIFT);
+       pgoff = vma_hugecache_offset(h, vma, address);
        mapping = (struct address_space *)page_private(page);
 
        /*
@@ -2349,6 +2348,9 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
 
 /*
  * Hugetlb_cow() should be called with page lock of the original hugepage held.
+ * Called with hugetlb_instantiation_mutex held and pte_page locked so we
+ * cannot race with other handlers or page migration.
+ * Keep the pte_same checks anyway to make transition from the mutex easier.
  */
 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, pte_t *ptep, pte_t pte,
@@ -2408,7 +2410,14 @@ retry_avoidcopy:
                                BUG_ON(page_count(old_page) != 1);
                                BUG_ON(huge_pte_none(pte));
                                spin_lock(&mm->page_table_lock);
-                               goto retry_avoidcopy;
+                               ptep = huge_pte_offset(mm, address & huge_page_mask(h));
+                               if (likely(pte_same(huge_ptep_get(ptep), pte)))
+                                       goto retry_avoidcopy;
+                               /*
+                                * race occurs while re-acquiring page_table_lock, and
+                                * our job is done.
+                                */
+                               return 0;
                        }
                        WARN_ON_ONCE(1);
                }
@@ -2630,6 +2639,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        static DEFINE_MUTEX(hugetlb_instantiation_mutex);
        struct hstate *h = hstate_vma(vma);
 
+       address &= huge_page_mask(h);
+
        ptep = huge_pte_offset(mm, address);
        if (ptep) {
                entry = huge_ptep_get(ptep);
index c3fdbcb17658ce405131e5b0310e1857fd6558bc..e3d58f088466c3b798f9017b76351295313d2771 100644 (file)
@@ -1983,28 +1983,28 @@ struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
 }
 
 /* Slow path of a mempolicy comparison */
-int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
+bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
 {
        if (!a || !b)
-               return 0;
+               return false;
        if (a->mode != b->mode)
-               return 0;
+               return false;
        if (a->flags != b->flags)
-               return 0;
+               return false;
        if (mpol_store_user_nodemask(a))
                if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
-                       return 0;
+                       return false;
 
        switch (a->mode) {
        case MPOL_BIND:
                /* Fall through */
        case MPOL_INTERLEAVE:
-               return nodes_equal(a->v.nodes, b->v.nodes);
+               return !!nodes_equal(a->v.nodes, b->v.nodes);
        case MPOL_PREFERRED:
                return a->v.preferred_node == b->v.preferred_node;
        default:
                BUG();
-               return 0;
+               return false;
        }
 }
 
index e73641b79bb5f08fac06f40380a47eaeb765294f..d9049811f3521bc690ff1535831b87dc3b682fe4 100644 (file)
@@ -27,7 +27,15 @@ static void *remove_element(mempool_t *pool)
        return pool->elements[--pool->curr_nr];
 }
 
-static void free_pool(mempool_t *pool)
+/**
+ * mempool_destroy - deallocate a memory pool
+ * @pool:      pointer to the memory pool which was allocated via
+ *             mempool_create().
+ *
+ * Free all reserved elements in @pool and @pool itself.  This function
+ * only sleeps if the free_fn() function sleeps.
+ */
+void mempool_destroy(mempool_t *pool)
 {
        while (pool->curr_nr) {
                void *element = remove_element(pool);
@@ -36,6 +44,7 @@ static void free_pool(mempool_t *pool)
        kfree(pool->elements);
        kfree(pool);
 }
+EXPORT_SYMBOL(mempool_destroy);
 
 /**
  * mempool_create - create a memory pool
@@ -86,7 +95,7 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
 
                element = pool->alloc(GFP_KERNEL, pool->pool_data);
                if (unlikely(!element)) {
-                       free_pool(pool);
+                       mempool_destroy(pool);
                        return NULL;
                }
                add_element(pool, element);
@@ -171,23 +180,6 @@ out:
 }
 EXPORT_SYMBOL(mempool_resize);
 
-/**
- * mempool_destroy - deallocate a memory pool
- * @pool:      pointer to the memory pool which was allocated via
- *             mempool_create().
- *
- * this function only sleeps if the free_fn() function sleeps. The caller
- * has to guarantee that all elements have been returned to the pool (ie:
- * freed) prior to calling mempool_destroy().
- */
-void mempool_destroy(mempool_t *pool)
-{
-       /* Check for outstanding elements */
-       BUG_ON(pool->curr_nr != pool->min_nr);
-       free_pool(pool);
-}
-EXPORT_SYMBOL(mempool_destroy);
-
 /**
  * mempool_alloc - allocate an element from a specific memory pool
  * @pool:      pointer to the memory pool which was allocated via
@@ -224,28 +216,40 @@ repeat_alloc:
        if (likely(pool->curr_nr)) {
                element = remove_element(pool);
                spin_unlock_irqrestore(&pool->lock, flags);
+               /* paired with rmb in mempool_free(), read comment there */
+               smp_wmb();
                return element;
        }
-       spin_unlock_irqrestore(&pool->lock, flags);
 
-       /* We must not sleep in the GFP_ATOMIC case */
-       if (!(gfp_mask & __GFP_WAIT))
+       /*
+        * We use gfp mask w/o __GFP_WAIT or IO for the first round.  If
+        * alloc failed with that and @pool was empty, retry immediately.
+        */
+       if (gfp_temp != gfp_mask) {
+               spin_unlock_irqrestore(&pool->lock, flags);
+               gfp_temp = gfp_mask;
+               goto repeat_alloc;
+       }
+
+       /* We must not sleep if !__GFP_WAIT */
+       if (!(gfp_mask & __GFP_WAIT)) {
+               spin_unlock_irqrestore(&pool->lock, flags);
                return NULL;
+       }
 
-       /* Now start performing page reclaim */
-       gfp_temp = gfp_mask;
+       /* Let's wait for someone else to return an element to @pool */
        init_wait(&wait);
        prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
-       smp_mb();
-       if (!pool->curr_nr) {
-               /*
-                * FIXME: this should be io_schedule().  The timeout is there
-                * as a workaround for some DM problems in 2.6.18.
-                */
-               io_schedule_timeout(5*HZ);
-       }
-       finish_wait(&pool->wait, &wait);
 
+       spin_unlock_irqrestore(&pool->lock, flags);
+
+       /*
+        * FIXME: this should be io_schedule().  The timeout is there as a
+        * workaround for some DM problems in 2.6.18.
+        */
+       io_schedule_timeout(5*HZ);
+
+       finish_wait(&pool->wait, &wait);
        goto repeat_alloc;
 }
 EXPORT_SYMBOL(mempool_alloc);
@@ -265,7 +269,39 @@ void mempool_free(void *element, mempool_t *pool)
        if (unlikely(element == NULL))
                return;
 
-       smp_mb();
+       /*
+        * Paired with the wmb in mempool_alloc().  The preceding read is
+        * for @element and the following @pool->curr_nr.  This ensures
+        * that the visible value of @pool->curr_nr is from after the
+        * allocation of @element.  This is necessary for fringe cases
+        * where @element was passed to this task without going through
+        * barriers.
+        *
+        * For example, assume @p is %NULL at the beginning and one task
+        * performs "p = mempool_alloc(...);" while another task is doing
+        * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
+        * may end up using curr_nr value which is from before allocation
+        * of @p without the following rmb.
+        */
+       smp_rmb();
+
+       /*
+        * For correctness, we need a test which is guaranteed to trigger
+        * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
+        * without locking achieves that and refilling as soon as possible
+        * is desirable.
+        *
+        * Because curr_nr visible here is always a value after the
+        * allocation of @element, any task which decremented curr_nr below
+        * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
+        * incremented to min_nr afterwards.  If curr_nr gets incremented
+        * to min_nr after the allocation of @element, the elements
+        * allocated after that are subject to the same guarantee.
+        *
+        * Waiters happen iff curr_nr is 0 and the above guarantee also
+        * ensures that there will be frees which return elements to the
+        * pool waking up the waiters.
+        */
        if (pool->curr_nr < pool->min_nr) {
                spin_lock_irqsave(&pool->lock, flags);
                if (pool->curr_nr < pool->min_nr) {
index 177aca424a069ac1ae1b44d48a8e6d992cd42a4d..89ea0854332ec3fdf53123bbf7fe5f1de94f6599 100644 (file)
@@ -39,8 +39,6 @@
 
 #include "internal.h"
 
-#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
-
 /*
  * migrate_prep() needs to be called before we start compiling a list of pages
  * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
@@ -181,8 +179,6 @@ static void remove_migration_ptes(struct page *old, struct page *new)
  * Something used the pte of a page under migration. We need to
  * get to the page and wait until migration is finished.
  * When we return from this function the fault will be retried.
- *
- * This function is called from do_swap_page().
  */
 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
                                unsigned long address)
@@ -269,12 +265,12 @@ static int migrate_page_move_mapping(struct address_space *mapping,
 
        radix_tree_replace_slot(pslot, newpage);
 
-       page_unfreeze_refs(page, expected_count);
        /*
-        * Drop cache reference from old page.
+        * Drop cache reference from old page by unfreezing
+        * to one less reference.
         * We know this isn't the last reference.
         */
-       __put_page(page);
+       page_unfreeze_refs(page, expected_count - 1);
 
        /*
         * If moved to a different zone then also account
@@ -334,9 +330,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
 
        radix_tree_replace_slot(pslot, newpage);
 
-       page_unfreeze_refs(page, expected_count);
-
-       __put_page(page);
+       page_unfreeze_refs(page, expected_count - 1);
 
        spin_unlock_irq(&mapping->tree_lock);
        return 0;
index eae90af60ea62e066defeab345e5079f57a80dc2..3f758c7f4c815c2b0edf494526b2823b41fbc142 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1603,39 +1603,19 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 
 EXPORT_SYMBOL(find_vma);
 
-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
+/*
+ * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
+ * Note: pprev is set to NULL when return value is NULL.
+ */
 struct vm_area_struct *
 find_vma_prev(struct mm_struct *mm, unsigned long addr,
                        struct vm_area_struct **pprev)
 {
-       struct vm_area_struct *vma = NULL, *prev = NULL;
-       struct rb_node *rb_node;
-       if (!mm)
-               goto out;
-
-       /* Guard against addr being lower than the first VMA */
-       vma = mm->mmap;
-
-       /* Go through the RB tree quickly. */
-       rb_node = mm->mm_rb.rb_node;
-
-       while (rb_node) {
-               struct vm_area_struct *vma_tmp;
-               vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
-
-               if (addr < vma_tmp->vm_end) {
-                       rb_node = rb_node->rb_left;
-               } else {
-                       prev = vma_tmp;
-                       if (!prev->vm_next || (addr < prev->vm_next->vm_end))
-                               break;
-                       rb_node = rb_node->rb_right;
-               }
-       }
+       struct vm_area_struct *vma;
 
-out:
-       *pprev = prev;
-       return prev ? prev->vm_next : vma;
+       vma = find_vma(mm, addr);
+       *pprev = vma ? vma->vm_prev : NULL;
+       return vma;
 }
 
 /*
@@ -2322,13 +2302,16 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
        struct vm_area_struct *new_vma, *prev;
        struct rb_node **rb_link, *rb_parent;
        struct mempolicy *pol;
+       bool faulted_in_anon_vma = true;
 
        /*
         * If anonymous vma has not yet been faulted, update new pgoff
         * to match new location, to increase its chance of merging.
         */
-       if (!vma->vm_file && !vma->anon_vma)
+       if (unlikely(!vma->vm_file && !vma->anon_vma)) {
                pgoff = addr >> PAGE_SHIFT;
+               faulted_in_anon_vma = false;
+       }
 
        find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
        new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
@@ -2337,9 +2320,24 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                /*
                 * Source vma may have been merged into new_vma
                 */
-               if (vma_start >= new_vma->vm_start &&
-                   vma_start < new_vma->vm_end)
+               if (unlikely(vma_start >= new_vma->vm_start &&
+                            vma_start < new_vma->vm_end)) {
+                       /*
+                        * The only way we can get a vma_merge with
+                        * self during an mremap is if the vma hasn't
+                        * been faulted in yet and we were allowed to
+                        * reset the dst vma->vm_pgoff to the
+                        * destination address of the mremap to allow
+                        * the merge to happen. mremap must change the
+                        * vm_pgoff linearity between src and dst vmas
+                        * (in turn preventing a vma_merge) to be
+                        * safe. It is only safe to keep the vm_pgoff
+                        * linear if there are no pages mapped yet.
+                        */
+                       VM_BUG_ON(faulted_in_anon_vma);
                        *vmap = new_vma;
+               } else
+                       anon_vma_moveto_tail(new_vma);
        } else {
                new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
                if (new_vma) {
index d6959cb4df58f1d694c179898553bf7e3150cc49..87bb8393e7d238115a450139d24c090347c215a7 100644 (file)
@@ -220,6 +220,15 @@ static unsigned long move_vma(struct vm_area_struct *vma,
 
        moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len);
        if (moved_len < old_len) {
+               /*
+                * Before moving the page tables from the new vma to
+                * the old vma, we need to be sure the old vma is
+                * queued after new vma in the same_anon_vma list to
+                * prevent SMP races with rmap_walk (that could lead
+                * rmap_walk to miss some page table).
+                */
+               anon_vma_moveto_tail(vma);
+
                /*
                 * On error, move entries back from new area to old,
                 * which will succeed since page tables still there,
index eeb27e27dce3d9fe6e745812d79362e099424883..7c122faa05c5ba07fc11df6f5149909680c14f3c 100644 (file)
 #include <linux/security.h>
 #include <linux/ptrace.h>
 #include <linux/freezer.h>
+#include <linux/ftrace.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/oom.h>
 
 int sysctl_panic_on_oom;
 int sysctl_oom_kill_allocating_task;
@@ -55,6 +59,7 @@ void compare_swap_oom_score_adj(int old_val, int new_val)
        spin_lock_irq(&sighand->siglock);
        if (current->signal->oom_score_adj == old_val)
                current->signal->oom_score_adj = new_val;
+       trace_oom_score_adj_update(current);
        spin_unlock_irq(&sighand->siglock);
 }
 
@@ -74,6 +79,7 @@ int test_set_oom_score_adj(int new_val)
        spin_lock_irq(&sighand->siglock);
        old_val = current->signal->oom_score_adj;
        current->signal->oom_score_adj = new_val;
+       trace_oom_score_adj_update(current);
        spin_unlock_irq(&sighand->siglock);
 
        return old_val;
index 8616ef3025a44a80e59448f960620b858b7445e3..363ba7082ef59efab5e90d94184cbf593aeff7ea 100644 (file)
  */
 #define MAX_PAUSE              max(HZ/5, 1)
 
+/*
+ * Try to keep balance_dirty_pages() call intervals higher than this many pages
+ * by raising pause time to max_pause when falls below it.
+ */
+#define DIRTY_POLL_THRESH      (128 >> (PAGE_SHIFT - 10))
+
 /*
  * Estimate write bandwidth at 200ms intervals.
  */
@@ -129,6 +135,191 @@ unsigned long global_dirty_limit;
  */
 static struct prop_descriptor vm_completions;
 
+/*
+ * Work out the current dirty-memory clamping and background writeout
+ * thresholds.
+ *
+ * The main aim here is to lower them aggressively if there is a lot of mapped
+ * memory around.  To avoid stressing page reclaim with lots of unreclaimable
+ * pages.  It is better to clamp down on writers than to start swapping, and
+ * performing lots of scanning.
+ *
+ * We only allow 1/2 of the currently-unmapped memory to be dirtied.
+ *
+ * We don't permit the clamping level to fall below 5% - that is getting rather
+ * excessive.
+ *
+ * We make sure that the background writeout level is below the adjusted
+ * clamping level.
+ */
+
+/*
+ * In a memory zone, there is a certain amount of pages we consider
+ * available for the page cache, which is essentially the number of
+ * free and reclaimable pages, minus some zone reserves to protect
+ * lowmem and the ability to uphold the zone's watermarks without
+ * requiring writeback.
+ *
+ * This number of dirtyable pages is the base value of which the
+ * user-configurable dirty ratio is the effictive number of pages that
+ * are allowed to be actually dirtied.  Per individual zone, or
+ * globally by using the sum of dirtyable pages over all zones.
+ *
+ * Because the user is allowed to specify the dirty limit globally as
+ * absolute number of bytes, calculating the per-zone dirty limit can
+ * require translating the configured limit into a percentage of
+ * global dirtyable memory first.
+ */
+
+static unsigned long highmem_dirtyable_memory(unsigned long total)
+{
+#ifdef CONFIG_HIGHMEM
+       int node;
+       unsigned long x = 0;
+
+       for_each_node_state(node, N_HIGH_MEMORY) {
+               struct zone *z =
+                       &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
+
+               x += zone_page_state(z, NR_FREE_PAGES) +
+                    zone_reclaimable_pages(z) - z->dirty_balance_reserve;
+       }
+       /*
+        * Make sure that the number of highmem pages is never larger
+        * than the number of the total dirtyable memory. This can only
+        * occur in very strange VM situations but we want to make sure
+        * that this does not occur.
+        */
+       return min(x, total);
+#else
+       return 0;
+#endif
+}
+
+/**
+ * global_dirtyable_memory - number of globally dirtyable pages
+ *
+ * Returns the global number of pages potentially available for dirty
+ * page cache.  This is the base value for the global dirty limits.
+ */
+unsigned long global_dirtyable_memory(void)
+{
+       unsigned long x;
+
+       x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() -
+           dirty_balance_reserve;
+
+       if (!vm_highmem_is_dirtyable)
+               x -= highmem_dirtyable_memory(x);
+
+       return x + 1;   /* Ensure that we never return 0 */
+}
+
+/*
+ * global_dirty_limits - background-writeback and dirty-throttling thresholds
+ *
+ * Calculate the dirty thresholds based on sysctl parameters
+ * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
+ * - vm.dirty_ratio             or  vm.dirty_bytes
+ * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
+ * real-time tasks.
+ */
+void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
+{
+       unsigned long background;
+       unsigned long dirty;
+       unsigned long uninitialized_var(available_memory);
+       struct task_struct *tsk;
+
+       if (!vm_dirty_bytes || !dirty_background_bytes)
+               available_memory = global_dirtyable_memory();
+
+       if (vm_dirty_bytes)
+               dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
+       else
+               dirty = (vm_dirty_ratio * available_memory) / 100;
+
+       if (dirty_background_bytes)
+               background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
+       else
+               background = (dirty_background_ratio * available_memory) / 100;
+
+       if (background >= dirty)
+               background = dirty / 2;
+       tsk = current;
+       if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
+               background += background / 4;
+               dirty += dirty / 4;
+       }
+       *pbackground = background;
+       *pdirty = dirty;
+       trace_global_dirty_state(background, dirty);
+}
+
+/**
+ * zone_dirtyable_memory - number of dirtyable pages in a zone
+ * @zone: the zone
+ *
+ * Returns the zone's number of pages potentially available for dirty
+ * page cache.  This is the base value for the per-zone dirty limits.
+ */
+static unsigned long zone_dirtyable_memory(struct zone *zone)
+{
+       /*
+        * The effective global number of dirtyable pages may exclude
+        * highmem as a big-picture measure to keep the ratio between
+        * dirty memory and lowmem reasonable.
+        *
+        * But this function is purely about the individual zone and a
+        * highmem zone can hold its share of dirty pages, so we don't
+        * care about vm_highmem_is_dirtyable here.
+        */
+       return zone_page_state(zone, NR_FREE_PAGES) +
+              zone_reclaimable_pages(zone) -
+              zone->dirty_balance_reserve;
+}
+
+/**
+ * zone_dirty_limit - maximum number of dirty pages allowed in a zone
+ * @zone: the zone
+ *
+ * Returns the maximum number of dirty pages allowed in a zone, based
+ * on the zone's dirtyable memory.
+ */
+static unsigned long zone_dirty_limit(struct zone *zone)
+{
+       unsigned long zone_memory = zone_dirtyable_memory(zone);
+       struct task_struct *tsk = current;
+       unsigned long dirty;
+
+       if (vm_dirty_bytes)
+               dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
+                       zone_memory / global_dirtyable_memory();
+       else
+               dirty = vm_dirty_ratio * zone_memory / 100;
+
+       if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
+               dirty += dirty / 4;
+
+       return dirty;
+}
+
+/**
+ * zone_dirty_ok - tells whether a zone is within its dirty limits
+ * @zone: the zone to check
+ *
+ * Returns %true when the dirty pages in @zone are within the zone's
+ * dirty limit, %false if the limit is exceeded.
+ */
+bool zone_dirty_ok(struct zone *zone)
+{
+       unsigned long limit = zone_dirty_limit(zone);
+
+       return zone_page_state(zone, NR_FILE_DIRTY) +
+              zone_page_state(zone, NR_UNSTABLE_NFS) +
+              zone_page_state(zone, NR_WRITEBACK) <= limit;
+}
+
 /*
  * couple the period to the dirty_ratio:
  *
@@ -141,7 +332,7 @@ static int calc_period_shift(void)
        if (vm_dirty_bytes)
                dirty_total = vm_dirty_bytes / PAGE_SIZE;
        else
-               dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
+               dirty_total = (vm_dirty_ratio * global_dirtyable_memory()) /
                                100;
        return 2 + ilog2(dirty_total - 1);
 }
@@ -196,7 +387,6 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
        return ret;
 }
 
-
 int dirty_bytes_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
                loff_t *ppos)
@@ -291,67 +481,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
 }
 EXPORT_SYMBOL(bdi_set_max_ratio);
 
-/*
- * Work out the current dirty-memory clamping and background writeout
- * thresholds.
- *
- * The main aim here is to lower them aggressively if there is a lot of mapped
- * memory around.  To avoid stressing page reclaim with lots of unreclaimable
- * pages.  It is better to clamp down on writers than to start swapping, and
- * performing lots of scanning.
- *
- * We only allow 1/2 of the currently-unmapped memory to be dirtied.
- *
- * We don't permit the clamping level to fall below 5% - that is getting rather
- * excessive.
- *
- * We make sure that the background writeout level is below the adjusted
- * clamping level.
- */
-
-static unsigned long highmem_dirtyable_memory(unsigned long total)
-{
-#ifdef CONFIG_HIGHMEM
-       int node;
-       unsigned long x = 0;
-
-       for_each_node_state(node, N_HIGH_MEMORY) {
-               struct zone *z =
-                       &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
-
-               x += zone_page_state(z, NR_FREE_PAGES) +
-                    zone_reclaimable_pages(z);
-       }
-       /*
-        * Make sure that the number of highmem pages is never larger
-        * than the number of the total dirtyable memory. This can only
-        * occur in very strange VM situations but we want to make sure
-        * that this does not occur.
-        */
-       return min(x, total);
-#else
-       return 0;
-#endif
-}
-
-/**
- * determine_dirtyable_memory - amount of memory that may be used
- *
- * Returns the numebr of pages that can currently be freed and used
- * by the kernel for direct mappings.
- */
-unsigned long determine_dirtyable_memory(void)
-{
-       unsigned long x;
-
-       x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
-
-       if (!vm_highmem_is_dirtyable)
-               x -= highmem_dirtyable_memory(x);
-
-       return x + 1;   /* Ensure that we never return 0 */
-}
-
 static unsigned long dirty_freerun_ceiling(unsigned long thresh,
                                           unsigned long bg_thresh)
 {
@@ -363,47 +492,6 @@ static unsigned long hard_dirty_limit(unsigned long thresh)
        return max(thresh, global_dirty_limit);
 }
 
-/*
- * global_dirty_limits - background-writeback and dirty-throttling thresholds
- *
- * Calculate the dirty thresholds based on sysctl parameters
- * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
- * - vm.dirty_ratio             or  vm.dirty_bytes
- * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
- * real-time tasks.
- */
-void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
-{
-       unsigned long background;
-       unsigned long dirty;
-       unsigned long uninitialized_var(available_memory);
-       struct task_struct *tsk;
-
-       if (!vm_dirty_bytes || !dirty_background_bytes)
-               available_memory = determine_dirtyable_memory();
-
-       if (vm_dirty_bytes)
-               dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
-       else
-               dirty = (vm_dirty_ratio * available_memory) / 100;
-
-       if (dirty_background_bytes)
-               background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
-       else
-               background = (dirty_background_ratio * available_memory) / 100;
-
-       if (background >= dirty)
-               background = dirty / 2;
-       tsk = current;
-       if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
-               background += background / 4;
-               dirty += dirty / 4;
-       }
-       *pbackground = background;
-       *pdirty = dirty;
-       trace_global_dirty_state(background, dirty);
-}
-
 /**
  * bdi_dirty_limit - @bdi's share of dirty throttling threshold
  * @bdi: the backing_dev_info to query
@@ -816,6 +904,11 @@ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
         */
        balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
                                           dirty_rate | 1);
+       /*
+        * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
+        */
+       if (unlikely(balanced_dirty_ratelimit > write_bw))
+               balanced_dirty_ratelimit = write_bw;
 
        /*
         * We could safely do this and return immediately:
@@ -962,40 +1055,98 @@ static unsigned long dirty_poll_interval(unsigned long dirty,
        return 1;
 }
 
-static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
-                                  unsigned long bdi_dirty)
+static long bdi_max_pause(struct backing_dev_info *bdi,
+                         unsigned long bdi_dirty)
+{
+       long bw = bdi->avg_write_bandwidth;
+       long t;
+
+       /*
+        * Limit pause time for small memory systems. If sleeping for too long
+        * time, a small pool of dirty/writeback pages may go empty and disk go
+        * idle.
+        *
+        * 8 serves as the safety ratio.
+        */
+       t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
+       t++;
+
+       return min_t(long, t, MAX_PAUSE);
+}
+
+static long bdi_min_pause(struct backing_dev_info *bdi,
+                         long max_pause,
+                         unsigned long task_ratelimit,
+                         unsigned long dirty_ratelimit,
+                         int *nr_dirtied_pause)
 {
-       unsigned long bw = bdi->avg_write_bandwidth;
-       unsigned long hi = ilog2(bw);
-       unsigned long lo = ilog2(bdi->dirty_ratelimit);
-       unsigned long t;
+       long hi = ilog2(bdi->avg_write_bandwidth);
+       long lo = ilog2(bdi->dirty_ratelimit);
+       long t;         /* target pause */
+       long pause;     /* estimated next pause */
+       int pages;      /* target nr_dirtied_pause */
 
-       /* target for 20ms max pause on 1-dd case */
-       t = HZ / 50;
+       /* target for 10ms pause on 1-dd case */
+       t = max(1, HZ / 100);
 
        /*
         * Scale up pause time for concurrent dirtiers in order to reduce CPU
         * overheads.
         *
-        * (N * 20ms) on 2^N concurrent tasks.
+        * (N * 10ms) on 2^N concurrent tasks.
         */
        if (hi > lo)
-               t += (hi - lo) * (20 * HZ) / 1024;
+               t += (hi - lo) * (10 * HZ) / 1024;
 
        /*
-        * Limit pause time for small memory systems. If sleeping for too long
-        * time, a small pool of dirty/writeback pages may go empty and disk go
-        * idle.
+        * This is a bit convoluted. We try to base the next nr_dirtied_pause
+        * on the much more stable dirty_ratelimit. However the next pause time
+        * will be computed based on task_ratelimit and the two rate limits may
+        * depart considerably at some time. Especially if task_ratelimit goes
+        * below dirty_ratelimit/2 and the target pause is max_pause, the next
+        * pause time will be max_pause*2 _trimmed down_ to max_pause.  As a
+        * result task_ratelimit won't be executed faithfully, which could
+        * eventually bring down dirty_ratelimit.
         *
-        * 8 serves as the safety ratio.
+        * We apply two rules to fix it up:
+        * 1) try to estimate the next pause time and if necessary, use a lower
+        *    nr_dirtied_pause so as not to exceed max_pause. When this happens,
+        *    nr_dirtied_pause will be "dancing" with task_ratelimit.
+        * 2) limit the target pause time to max_pause/2, so that the normal
+        *    small fluctuations of task_ratelimit won't trigger rule (1) and
+        *    nr_dirtied_pause will remain as stable as dirty_ratelimit.
         */
-       t = min(t, bdi_dirty * HZ / (8 * bw + 1));
+       t = min(t, 1 + max_pause / 2);
+       pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
 
        /*
-        * The pause time will be settled within range (max_pause/4, max_pause).
-        * Apply a minimal value of 4 to get a non-zero max_pause/4.
+        * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
+        * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
+        * When the 16 consecutive reads are often interrupted by some dirty
+        * throttling pause during the async writes, cfq will go into idles
+        * (deadline is fine). So push nr_dirtied_pause as high as possible
+        * until reaches DIRTY_POLL_THRESH=32 pages.
         */
-       return clamp_val(t, 4, MAX_PAUSE);
+       if (pages < DIRTY_POLL_THRESH) {
+               t = max_pause;
+               pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
+               if (pages > DIRTY_POLL_THRESH) {
+                       pages = DIRTY_POLL_THRESH;
+                       t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
+               }
+       }
+
+       pause = HZ * pages / (task_ratelimit + 1);
+       if (pause > max_pause) {
+               t = max_pause;
+               pages = task_ratelimit * t / roundup_pow_of_two(HZ);
+       }
+
+       *nr_dirtied_pause = pages;
+       /*
+        * The minimal pause time will normally be half the target pause time.
+        */
+       return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
 }
 
 /*
@@ -1016,16 +1167,21 @@ static void balance_dirty_pages(struct address_space *mapping,
        unsigned long background_thresh;
        unsigned long dirty_thresh;
        unsigned long bdi_thresh;
-       long pause = 0;
-       long uninitialized_var(max_pause);
+       long period;
+       long pause;
+       long max_pause;
+       long min_pause;
+       int nr_dirtied_pause;
        bool dirty_exceeded = false;
        unsigned long task_ratelimit;
-       unsigned long uninitialized_var(dirty_ratelimit);
+       unsigned long dirty_ratelimit;
        unsigned long pos_ratio;
        struct backing_dev_info *bdi = mapping->backing_dev_info;
        unsigned long start_time = jiffies;
 
        for (;;) {
+               unsigned long now = jiffies;
+
                /*
                 * Unstable writes are a feature of certain networked
                 * filesystems (i.e. NFS) in which data may have been
@@ -1045,8 +1201,13 @@ static void balance_dirty_pages(struct address_space *mapping,
                 */
                freerun = dirty_freerun_ceiling(dirty_thresh,
                                                background_thresh);
-               if (nr_dirty <= freerun)
+               if (nr_dirty <= freerun) {
+                       current->dirty_paused_when = now;
+                       current->nr_dirtied = 0;
+                       current->nr_dirtied_pause =
+                               dirty_poll_interval(nr_dirty, dirty_thresh);
                        break;
+               }
 
                if (unlikely(!writeback_in_progress(bdi)))
                        bdi_start_background_writeback(bdi);
@@ -1086,7 +1247,7 @@ static void balance_dirty_pages(struct address_space *mapping,
                                    bdi_stat(bdi, BDI_WRITEBACK);
                }
 
-               dirty_exceeded = (bdi_dirty > bdi_thresh) ||
+               dirty_exceeded = (bdi_dirty > bdi_thresh) &&
                                  (nr_dirty > dirty_thresh);
                if (dirty_exceeded && !bdi->dirty_exceeded)
                        bdi->dirty_exceeded = 1;
@@ -1095,20 +1256,34 @@ static void balance_dirty_pages(struct address_space *mapping,
                                     nr_dirty, bdi_thresh, bdi_dirty,
                                     start_time);
 
-               max_pause = bdi_max_pause(bdi, bdi_dirty);
-
                dirty_ratelimit = bdi->dirty_ratelimit;
                pos_ratio = bdi_position_ratio(bdi, dirty_thresh,
                                               background_thresh, nr_dirty,
                                               bdi_thresh, bdi_dirty);
                task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >>
                                                        RATELIMIT_CALC_SHIFT;
+               max_pause = bdi_max_pause(bdi, bdi_dirty);
+               min_pause = bdi_min_pause(bdi, max_pause,
+                                         task_ratelimit, dirty_ratelimit,
+                                         &nr_dirtied_pause);
+
                if (unlikely(task_ratelimit == 0)) {
+                       period = max_pause;
                        pause = max_pause;
                        goto pause;
                }
-               pause = HZ * pages_dirtied / task_ratelimit;
-               if (unlikely(pause <= 0)) {
+               period = HZ * pages_dirtied / task_ratelimit;
+               pause = period;
+               if (current->dirty_paused_when)
+                       pause -= now - current->dirty_paused_when;
+               /*
+                * For less than 1s think time (ext3/4 may block the dirtier
+                * for up to 800ms from time to time on 1-HDD; so does xfs,
+                * however at much less frequency), try to compensate it in
+                * future periods by updating the virtual time; otherwise just
+                * do a reset, as it may be a light dirtier.
+                */
+               if (pause < min_pause) {
                        trace_balance_dirty_pages(bdi,
                                                  dirty_thresh,
                                                  background_thresh,
@@ -1118,12 +1293,24 @@ static void balance_dirty_pages(struct address_space *mapping,
                                                  dirty_ratelimit,
                                                  task_ratelimit,
                                                  pages_dirtied,
-                                                 pause,
+                                                 period,
+                                                 min(pause, 0L),
                                                  start_time);
-                       pause = 1; /* avoid resetting nr_dirtied_pause below */
+                       if (pause < -HZ) {
+                               current->dirty_paused_when = now;
+                               current->nr_dirtied = 0;
+                       } else if (period) {
+                               current->dirty_paused_when += period;
+                               current->nr_dirtied = 0;
+                       } else if (current->nr_dirtied_pause <= pages_dirtied)
+                               current->nr_dirtied_pause += pages_dirtied;
                        break;
                }
-               pause = min(pause, max_pause);
+               if (unlikely(pause > max_pause)) {
+                       /* for occasional dropped task_ratelimit */
+                       now += min(pause - max_pause, max_pause);
+                       pause = max_pause;
+               }
 
 pause:
                trace_balance_dirty_pages(bdi,
@@ -1135,11 +1322,16 @@ pause:
                                          dirty_ratelimit,
                                          task_ratelimit,
                                          pages_dirtied,
+                                         period,
                                          pause,
                                          start_time);
                __set_current_state(TASK_KILLABLE);
                io_schedule_timeout(pause);
 
+               current->dirty_paused_when = now + pause;
+               current->nr_dirtied = 0;
+               current->nr_dirtied_pause = nr_dirtied_pause;
+
                /*
                 * This is typically equal to (nr_dirty < dirty_thresh) and can
                 * also keep "1000+ dd on a slow USB stick" under control.
@@ -1167,23 +1359,6 @@ pause:
        if (!dirty_exceeded && bdi->dirty_exceeded)
                bdi->dirty_exceeded = 0;
 
-       current->nr_dirtied = 0;
-       if (pause == 0) { /* in freerun area */
-               current->nr_dirtied_pause =
-                               dirty_poll_interval(nr_dirty, dirty_thresh);
-       } else if (pause <= max_pause / 4 &&
-                  pages_dirtied >= current->nr_dirtied_pause) {
-               current->nr_dirtied_pause = clamp_val(
-                                       dirty_ratelimit * (max_pause / 2) / HZ,
-                                       pages_dirtied + pages_dirtied / 8,
-                                       pages_dirtied * 4);
-       } else if (pause >= max_pause) {
-               current->nr_dirtied_pause = 1 | clamp_val(
-                                       dirty_ratelimit * (max_pause / 2) / HZ,
-                                       pages_dirtied / 4,
-                                       pages_dirtied - pages_dirtied / 8);
-       }
-
        if (writeback_in_progress(bdi))
                return;
 
@@ -1214,6 +1389,22 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
 
 static DEFINE_PER_CPU(int, bdp_ratelimits);
 
+/*
+ * Normal tasks are throttled by
+ *     loop {
+ *             dirty tsk->nr_dirtied_pause pages;
+ *             take a snap in balance_dirty_pages();
+ *     }
+ * However there is a worst case. If every task exit immediately when dirtied
+ * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
+ * called to throttle the page dirties. The solution is to save the not yet
+ * throttled page dirties in dirty_throttle_leaks on task exit and charge them
+ * randomly into the running tasks. This works well for the above worst case,
+ * as the new task will pick up and accumulate the old task's leaked dirty
+ * count and eventually get throttled.
+ */
+DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
+
 /**
  * balance_dirty_pages_ratelimited_nr - balance dirty memory state
  * @mapping: address_space which was dirtied
@@ -1242,8 +1433,6 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
        if (bdi->dirty_exceeded)
                ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
 
-       current->nr_dirtied += nr_pages_dirtied;
-
        preempt_disable();
        /*
         * This prevents one CPU to accumulate too many dirtied pages without
@@ -1254,12 +1443,20 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
        p =  &__get_cpu_var(bdp_ratelimits);
        if (unlikely(current->nr_dirtied >= ratelimit))
                *p = 0;
-       else {
-               *p += nr_pages_dirtied;
-               if (unlikely(*p >= ratelimit_pages)) {
-                       *p = 0;
-                       ratelimit = 0;
-               }
+       else if (unlikely(*p >= ratelimit_pages)) {
+               *p = 0;
+               ratelimit = 0;
+       }
+       /*
+        * Pick up the dirtied pages by the exited tasks. This avoids lots of
+        * short-lived tasks (eg. gcc invocations in a kernel build) escaping
+        * the dirty throttling and livelock other long-run dirtiers.
+        */
+       p = &__get_cpu_var(dirty_throttle_leaks);
+       if (*p > 0 && current->nr_dirtied < ratelimit) {
+               nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
+               *p -= nr_pages_dirtied;
+               current->nr_dirtied += nr_pages_dirtied;
        }
        preempt_enable();
 
@@ -1741,6 +1938,8 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
                __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
                __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
                task_io_account_write(PAGE_CACHE_SIZE);
+               current->nr_dirtied++;
+               this_cpu_inc(bdp_ratelimits);
        }
 }
 EXPORT_SYMBOL(account_page_dirtied);
@@ -1800,6 +1999,24 @@ int __set_page_dirty_nobuffers(struct page *page)
 }
 EXPORT_SYMBOL(__set_page_dirty_nobuffers);
 
+/*
+ * Call this whenever redirtying a page, to de-account the dirty counters
+ * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written
+ * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to
+ * systematic errors in balanced_dirty_ratelimit and the dirty pages position
+ * control.
+ */
+void account_page_redirty(struct page *page)
+{
+       struct address_space *mapping = page->mapping;
+       if (mapping && mapping_cap_account_dirty(mapping)) {
+               current->nr_dirtied--;
+               dec_zone_page_state(page, NR_DIRTIED);
+               dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
+       }
+}
+EXPORT_SYMBOL(account_page_redirty);
+
 /*
  * When a writepage implementation decides that it doesn't want to write this
  * page for some reason, it should redirty the locked page via
@@ -1808,6 +2025,7 @@ EXPORT_SYMBOL(__set_page_dirty_nobuffers);
 int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
 {
        wbc->pages_skipped++;
+       account_page_redirty(page);
        return __set_page_dirty_nobuffers(page);
 }
 EXPORT_SYMBOL(redirty_page_for_writepage);
index 7990ca154d1b602408993f18c433831dab168d06..794e6715c22682efdf5dbf570e0434a0a5ef5eb0 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/ftrace_event.h>
 #include <linux/memcontrol.h>
 #include <linux/prefetch.h>
+#include <linux/page-debug-flags.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -96,6 +97,14 @@ EXPORT_SYMBOL(node_states);
 
 unsigned long totalram_pages __read_mostly;
 unsigned long totalreserve_pages __read_mostly;
+/*
+ * When calculating the number of globally allowed dirty pages, there
+ * is a certain number of per-zone reserves that should not be
+ * considered dirtyable memory.  This is the sum of those reserves
+ * over all existing zones that contribute dirtyable memory.
+ */
+unsigned long dirty_balance_reserve __read_mostly;
+
 int percpu_pagelist_fraction;
 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 
@@ -127,6 +136,13 @@ void pm_restrict_gfp_mask(void)
        saved_gfp_mask = gfp_allowed_mask;
        gfp_allowed_mask &= ~GFP_IOFS;
 }
+
+bool pm_suspended_storage(void)
+{
+       if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
+               return false;
+       return true;
+}
 #endif /* CONFIG_PM_SLEEP */
 
 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -381,6 +397,37 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
                clear_highpage(page + i);
 }
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+unsigned int _debug_guardpage_minorder;
+
+static int __init debug_guardpage_minorder_setup(char *buf)
+{
+       unsigned long res;
+
+       if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
+               printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
+               return 0;
+       }
+       _debug_guardpage_minorder = res;
+       printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
+       return 0;
+}
+__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
+
+static inline void set_page_guard_flag(struct page *page)
+{
+       __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+}
+
+static inline void clear_page_guard_flag(struct page *page)
+{
+       __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+}
+#else
+static inline void set_page_guard_flag(struct page *page) { }
+static inline void clear_page_guard_flag(struct page *page) { }
+#endif
+
 static inline void set_page_order(struct page *page, int order)
 {
        set_page_private(page, order);
@@ -438,6 +485,11 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
        if (page_zone_id(page) != page_zone_id(buddy))
                return 0;
 
+       if (page_is_guard(buddy) && page_order(buddy) == order) {
+               VM_BUG_ON(page_count(buddy) != 0);
+               return 1;
+       }
+
        if (PageBuddy(buddy) && page_order(buddy) == order) {
                VM_BUG_ON(page_count(buddy) != 0);
                return 1;
@@ -494,11 +546,19 @@ static inline void __free_one_page(struct page *page,
                buddy = page + (buddy_idx - page_idx);
                if (!page_is_buddy(page, buddy, order))
                        break;
-
-               /* Our buddy is free, merge with it and move up one order. */
-               list_del(&buddy->lru);
-               zone->free_area[order].nr_free--;
-               rmv_page_order(buddy);
+               /*
+                * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
+                * merge with it and move up one order.
+                */
+               if (page_is_guard(buddy)) {
+                       clear_page_guard_flag(buddy);
+                       set_page_private(page, 0);
+                       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+               } else {
+                       list_del(&buddy->lru);
+                       zone->free_area[order].nr_free--;
+                       rmv_page_order(buddy);
+               }
                combined_idx = buddy_idx & page_idx;
                page = page + (combined_idx - page_idx);
                page_idx = combined_idx;
@@ -632,7 +692,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
        int i;
        int bad = 0;
 
-       trace_mm_page_free_direct(page, order);
+       trace_mm_page_free(page, order);
        kmemcheck_free_shadow(page, order);
 
        if (PageAnon(page))
@@ -670,32 +730,23 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        local_irq_restore(flags);
 }
 
-/*
- * permit the bootmem allocator to evade page validation on high-order frees
- */
 void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
 {
-       if (order == 0) {
-               __ClearPageReserved(page);
-               set_page_count(page, 0);
-               set_page_refcounted(page);
-               __free_page(page);
-       } else {
-               int loop;
-
-               prefetchw(page);
-               for (loop = 0; loop < (1 << order); loop++) {
-                       struct page *p = &page[loop];
+       unsigned int nr_pages = 1 << order;
+       unsigned int loop;
 
-                       if (loop + 1 < (1 << order))
-                               prefetchw(p + 1);
-                       __ClearPageReserved(p);
-                       set_page_count(p, 0);
-               }
+       prefetchw(page);
+       for (loop = 0; loop < nr_pages; loop++) {
+               struct page *p = &page[loop];
 
-               set_page_refcounted(page);
-               __free_pages(page, order);
+               if (loop + 1 < nr_pages)
+                       prefetchw(p + 1);
+               __ClearPageReserved(p);
+               set_page_count(p, 0);
        }
+
+       set_page_refcounted(page);
+       __free_pages(page, order);
 }
 
 
@@ -724,6 +775,23 @@ static inline void expand(struct zone *zone, struct page *page,
                high--;
                size >>= 1;
                VM_BUG_ON(bad_range(zone, &page[size]));
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+               if (high < debug_guardpage_minorder()) {
+                       /*
+                        * Mark as guard pages (or page), that will allow to
+                        * merge back to allocator when buddy will be freed.
+                        * Corresponding page table entries will not be touched,
+                        * pages will stay not present in virtual address space
+                        */
+                       INIT_LIST_HEAD(&page[size].lru);
+                       set_page_guard_flag(&page[size]);
+                       set_page_private(&page[size], high);
+                       /* Guard pages are not available for any usage */
+                       __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high));
+                       continue;
+               }
+#endif
                list_add(&page[size].lru, &area->free_list[migratetype]);
                area->nr_free++;
                set_page_order(&page[size], high);
@@ -1188,6 +1256,19 @@ out:
        local_irq_restore(flags);
 }
 
+/*
+ * Free a list of 0-order pages
+ */
+void free_hot_cold_page_list(struct list_head *list, int cold)
+{
+       struct page *page, *next;
+
+       list_for_each_entry_safe(page, next, list, lru) {
+               trace_mm_page_free_batched(page, cold);
+               free_hot_cold_page(page, cold);
+       }
+}
+
 /*
  * split_page takes a non-compound higher-order page, and splits it into
  * n (1<<order) sub-pages: page[0..n]
@@ -1435,7 +1516,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
        long min = mark;
        int o;
 
-       free_pages -= (1 << order) + 1;
+       free_pages -= (1 << order) - 1;
        if (alloc_flags & ALLOC_HIGH)
                min -= min / 2;
        if (alloc_flags & ALLOC_HARDER)
@@ -1645,6 +1726,35 @@ zonelist_scan:
                if ((alloc_flags & ALLOC_CPUSET) &&
                        !cpuset_zone_allowed_softwall(zone, gfp_mask))
                                continue;
+               /*
+                * When allocating a page cache page for writing, we
+                * want to get it from a zone that is within its dirty
+                * limit, such that no single zone holds more than its
+                * proportional share of globally allowed dirty pages.
+                * The dirty limits take into account the zone's
+                * lowmem reserves and high watermark so that kswapd
+                * should be able to balance it without having to
+                * write pages from its LRU list.
+                *
+                * This may look like it could increase pressure on
+                * lower zones by failing allocations in higher zones
+                * before they are full.  But the pages that do spill
+                * over are limited as the lower zones are protected
+                * by this very same mechanism.  It should not become
+                * a practical burden to them.
+                *
+                * XXX: For now, allow allocations to potentially
+                * exceed the per-zone dirty limit in the slowpath
+                * (ALLOC_WMARK_LOW unset) before going into reclaim,
+                * which is important when on a NUMA setup the allowed
+                * zones are together not big enough to reach the
+                * global limit.  The proper fix for these situations
+                * will require awareness of zones in the
+                * dirty-throttling and the flusher threads.
+                */
+               if ((alloc_flags & ALLOC_WMARK_LOW) &&
+                   (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
+                       goto this_zone_full;
 
                BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
@@ -1734,7 +1844,8 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
 {
        unsigned int filter = SHOW_MEM_FILTER_NODES;
 
-       if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
+       if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
+           debug_guardpage_minorder() > 0)
                return;
 
        /*
@@ -1773,12 +1884,25 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
 
 static inline int
 should_alloc_retry(gfp_t gfp_mask, unsigned int order,
+                               unsigned long did_some_progress,
                                unsigned long pages_reclaimed)
 {
        /* Do not loop if specifically requested */
        if (gfp_mask & __GFP_NORETRY)
                return 0;
 
+       /* Always retry if specifically requested */
+       if (gfp_mask & __GFP_NOFAIL)
+               return 1;
+
+       /*
+        * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
+        * making forward progress without invoking OOM. Suspend also disables
+        * storage devices so kswapd will not help. Bail if we are suspending.
+        */
+       if (!did_some_progress && pm_suspended_storage())
+               return 0;
+
        /*
         * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
         * means __GFP_NOFAIL, but that may not be true in other
@@ -1797,13 +1921,6 @@ should_alloc_retry(gfp_t gfp_mask, unsigned int order,
        if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
                return 1;
 
-       /*
-        * Don't let big-order allocations loop unless the caller
-        * explicitly requests that.
-        */
-       if (gfp_mask & __GFP_NOFAIL)
-               return 1;
-
        return 0;
 }
 
@@ -2196,7 +2313,8 @@ rebalance:
 
        /* Check if we should retry the allocation */
        pages_reclaimed += did_some_progress;
-       if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
+       if (should_alloc_retry(gfp_mask, order, did_some_progress,
+                                               pages_reclaimed)) {
                /* Wait for some write requests to complete then retry */
                wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
                goto rebalance;
@@ -2306,16 +2424,6 @@ unsigned long get_zeroed_page(gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(get_zeroed_page);
 
-void __pagevec_free(struct pagevec *pvec)
-{
-       int i = pagevec_count(pvec);
-
-       while (--i >= 0) {
-               trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
-               free_hot_cold_page(pvec->pages[i], pvec->cold);
-       }
-}
-
 void __free_pages(struct page *page, unsigned int order)
 {
        if (put_page_testzero(page)) {
@@ -3385,25 +3493,33 @@ static void setup_zone_migrate_reserve(struct zone *zone)
                if (page_to_nid(page) != zone_to_nid(zone))
                        continue;
 
-               /* Blocks with reserved pages will never free, skip them. */
-               block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
-               if (pageblock_is_reserved(pfn, block_end_pfn))
-                       continue;
-
                block_migratetype = get_pageblock_migratetype(page);
 
-               /* If this block is reserved, account for it */
-               if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
-                       reserve--;
-                       continue;
-               }
+               /* Only test what is necessary when the reserves are not met */
+               if (reserve > 0) {
+                       /*
+                        * Blocks with reserved pages will never free, skip
+                        * them.
+                        */
+                       block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
+                       if (pageblock_is_reserved(pfn, block_end_pfn))
+                               continue;
 
-               /* Suitable for reserving if this block is movable */
-               if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
-                       set_pageblock_migratetype(page, MIGRATE_RESERVE);
-                       move_freepages_block(zone, page, MIGRATE_RESERVE);
-                       reserve--;
-                       continue;
+                       /* If this block is reserved, account for it */
+                       if (block_migratetype == MIGRATE_RESERVE) {
+                               reserve--;
+                               continue;
+                       }
+
+                       /* Suitable for reserving if this block is movable */
+                       if (block_migratetype == MIGRATE_MOVABLE) {
+                               set_pageblock_migratetype(page,
+                                                       MIGRATE_RESERVE);
+                               move_freepages_block(zone, page,
+                                                       MIGRATE_RESERVE);
+                               reserve--;
+                               continue;
+                       }
                }
 
                /*
@@ -4734,8 +4850,19 @@ static void calculate_totalreserve_pages(void)
                        if (max > zone->present_pages)
                                max = zone->present_pages;
                        reserve_pages += max;
+                       /*
+                        * Lowmem reserves are not available to
+                        * GFP_HIGHUSER page cache allocations and
+                        * kswapd tries to balance zones to their high
+                        * watermark.  As a result, neither should be
+                        * regarded as dirtyable memory, to prevent a
+                        * situation where reclaim has to clean pages
+                        * in order to balance the zones.
+                        */
+                       zone->dirty_balance_reserve = max;
                }
        }
+       dirty_balance_reserve = reserve_pages;
        totalreserve_pages = reserve_pages;
 }
 
index a4fd3680038be499a47cf747d1ba657646e5d36c..a2e5ce1fa08142f60a0abba7ea6512fe6713555c 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -271,6 +271,51 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
        return -ENOMEM;
 }
 
+/*
+ * Some rmap walk that needs to find all ptes/hugepmds without false
+ * negatives (like migrate and split_huge_page) running concurrent
+ * with operations that copy or move pagetables (like mremap() and
+ * fork()) to be safe. They depend on the anon_vma "same_anon_vma"
+ * list to be in a certain order: the dst_vma must be placed after the
+ * src_vma in the list. This is always guaranteed by fork() but
+ * mremap() needs to call this function to enforce it in case the
+ * dst_vma isn't newly allocated and chained with the anon_vma_clone()
+ * function but just an extension of a pre-existing vma through
+ * vma_merge.
+ *
+ * NOTE: the same_anon_vma list can still be changed by other
+ * processes while mremap runs because mremap doesn't hold the
+ * anon_vma mutex to prevent modifications to the list while it
+ * runs. All we need to enforce is that the relative order of this
+ * process vmas isn't changing (we don't care about other vmas
+ * order). Each vma corresponds to an anon_vma_chain structure so
+ * there's no risk that other processes calling anon_vma_moveto_tail()
+ * and changing the same_anon_vma list under mremap() will screw with
+ * the relative order of this process vmas in the list, because we
+ * they can't alter the order of any vma that belongs to this
+ * process. And there can't be another anon_vma_moveto_tail() running
+ * concurrently with mremap() coming from this process because we hold
+ * the mmap_sem for the whole mremap(). fork() ordering dependency
+ * also shouldn't be affected because fork() only cares that the
+ * parent vmas are placed in the list before the child vmas and
+ * anon_vma_moveto_tail() won't reorder vmas from either the fork()
+ * parent or child.
+ */
+void anon_vma_moveto_tail(struct vm_area_struct *dst)
+{
+       struct anon_vma_chain *pavc;
+       struct anon_vma *root = NULL;
+
+       list_for_each_entry_reverse(pavc, &dst->anon_vma_chain, same_vma) {
+               struct anon_vma *anon_vma = pavc->anon_vma;
+               VM_BUG_ON(pavc->vma != dst);
+               root = lock_anon_vma_root(root, anon_vma);
+               list_del(&pavc->same_anon_vma);
+               list_add_tail(&pavc->same_anon_vma, &anon_vma->head);
+       }
+       unlock_anon_vma_root(root);
+}
+
 /*
  * Attach vma to its own anon_vma, as well as to the anon_vmas that
  * the corresponding VMA in the parent process is attached to.
index 025f6ac515697354f171140cec6dfaed6eb4a7cb..d99acbf14e0179c766332b5f54f4f946b37a5d0f 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3654,6 +3654,9 @@ void __init kmem_cache_init(void)
        struct kmem_cache *temp_kmem_cache_node;
        unsigned long kmalloc_size;
 
+       if (debug_guardpage_minorder())
+               slub_max_order = 0;
+
        kmem_size = offsetof(struct kmem_cache, node) +
                                nr_node_ids * sizeof(struct kmem_cache_node *);
 
index a91caf754d9badb5f0b1f3c54b8e86be4723ff84..67a09a633a09baad606176f0899d94dbeb4d17d4 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -585,11 +585,10 @@ int lru_add_drain_all(void)
 void release_pages(struct page **pages, int nr, int cold)
 {
        int i;
-       struct pagevec pages_to_free;
+       LIST_HEAD(pages_to_free);
        struct zone *zone = NULL;
        unsigned long uninitialized_var(flags);
 
-       pagevec_init(&pages_to_free, cold);
        for (i = 0; i < nr; i++) {
                struct page *page = pages[i];
 
@@ -620,19 +619,12 @@ void release_pages(struct page **pages, int nr, int cold)
                        del_page_from_lru(zone, page);
                }
 
-               if (!pagevec_add(&pages_to_free, page)) {
-                       if (zone) {
-                               spin_unlock_irqrestore(&zone->lru_lock, flags);
-                               zone = NULL;
-                       }
-                       __pagevec_free(&pages_to_free);
-                       pagevec_reinit(&pages_to_free);
-               }
+               list_add(&page->lru, &pages_to_free);
        }
        if (zone)
                spin_unlock_irqrestore(&zone->lru_lock, flags);
 
-       pagevec_free(&pages_to_free);
+       free_hot_cold_page_list(&pages_to_free, cold);
 }
 EXPORT_SYMBOL(release_pages);
 
index b1cd120607230b0770c35e0823d6389782ae734a..9520592d4231e421ca15b160d8118f4cfa4a4d84 100644 (file)
@@ -667,10 +667,10 @@ int try_to_free_swap(struct page *page)
         * original page might be freed under memory pressure, then
         * later read back in from swap, now with the wrong data.
         *
-        * Hibernation clears bits from gfp_allowed_mask to prevent
-        * memory reclaim from writing to disk, so check that here.
+        * Hibration suspends storage while it is writing the image
+        * to disk so check that here.
         */
-       if (!(gfp_allowed_mask & __GFP_IO))
+       if (pm_suspended_storage())
                return 0;
 
        delete_from_swap_cache(page);
index 21fdf46ad5aac727111834d0955af07472daaec2..877ca046f43d0289ab47dd5471c78fbd16c29553 100644 (file)
@@ -256,7 +256,7 @@ struct vmap_area {
        struct rb_node rb_node;         /* address sorted rbtree */
        struct list_head list;          /* address sorted list */
        struct list_head purge_list;    /* "lazy purge" list */
-       void *private;
+       struct vm_struct *vm;
        struct rcu_head rcu_head;
 };
 
@@ -1285,7 +1285,7 @@ static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
        vm->addr = (void *)va->va_start;
        vm->size = va->va_end - va->va_start;
        vm->caller = caller;
-       va->private = vm;
+       va->vm = vm;
        va->flags |= VM_VM_AREA;
 }
 
@@ -1408,7 +1408,7 @@ static struct vm_struct *find_vm_area(const void *addr)
 
        va = find_vmap_area((unsigned long)addr);
        if (va && va->flags & VM_VM_AREA)
-               return va->private;
+               return va->vm;
 
        return NULL;
 }
@@ -1427,7 +1427,7 @@ struct vm_struct *remove_vm_area(const void *addr)
 
        va = find_vmap_area((unsigned long)addr);
        if (va && va->flags & VM_VM_AREA) {
-               struct vm_struct *vm = va->private;
+               struct vm_struct *vm = va->vm;
 
                if (!(vm->flags & VM_UNLIST)) {
                        struct vm_struct *tmp, **p;
index 11adc890ce30bb5f09c3a53765ce0b57474a893a..26f4a8a4e0c75caebff02e582d8b297f4ff2b35d 100644 (file)
@@ -715,7 +715,13 @@ static enum page_references page_check_references(struct page *page,
                 */
                SetPageReferenced(page);
 
-               if (referenced_page)
+               if (referenced_page || referenced_ptes > 1)
+                       return PAGEREF_ACTIVATE;
+
+               /*
+                * Activate file-backed executable pages after first usage.
+                */
+               if (vm_flags & VM_EXEC)
                        return PAGEREF_ACTIVATE;
 
                return PAGEREF_KEEP;
@@ -728,24 +734,6 @@ static enum page_references page_check_references(struct page *page,
        return PAGEREF_RECLAIM;
 }
 
-static noinline_for_stack void free_page_list(struct list_head *free_pages)
-{
-       struct pagevec freed_pvec;
-       struct page *page, *tmp;
-
-       pagevec_init(&freed_pvec, 1);
-
-       list_for_each_entry_safe(page, tmp, free_pages, lru) {
-               list_del(&page->lru);
-               if (!pagevec_add(&freed_pvec, page)) {
-                       __pagevec_free(&freed_pvec);
-                       pagevec_reinit(&freed_pvec);
-               }
-       }
-
-       pagevec_free(&freed_pvec);
-}
-
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
@@ -1009,7 +997,7 @@ keep_lumpy:
        if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc))
                zone_set_flag(zone, ZONE_CONGESTED);
 
-       free_page_list(&free_pages);
+       free_hot_cold_page_list(&free_pages, 1);
 
        list_splice(&ret_pages, page_list);
        count_vm_events(PGACTIVATE, pgactivate);
@@ -1178,14 +1166,14 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                         * anon page which don't already have a swap slot is
                         * pointless.
                         */
-                       if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
+                       if (nr_swap_pages <= 0 && PageSwapBacked(cursor_page) &&
                            !PageSwapCache(cursor_page))
                                break;
 
                        if (__isolate_lru_page(cursor_page, mode, file) == 0) {
                                list_move(&cursor_page->lru, dst);
                                mem_cgroup_del_lru(cursor_page);
-                               nr_taken += hpage_nr_pages(page);
+                               nr_taken += hpage_nr_pages(cursor_page);
                                nr_lumpy_taken++;
                                if (PageDirty(cursor_page))
                                        nr_lumpy_dirty++;
@@ -2012,8 +2000,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
         * inactive lists are large enough, continue reclaiming
         */
        pages_for_compaction = (2UL << sc->order);
-       inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON) +
-                               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+       inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+       if (nr_swap_pages > 0)
+               inactive_lru_pages += zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
        if (sc->nr_reclaimed < pages_for_compaction &&
                        inactive_lru_pages > pages_for_compaction)
                return true;
@@ -3448,9 +3437,10 @@ void scan_mapping_unevictable_pages(struct address_space *mapping)
 static void warn_scan_unevictable_pages(void)
 {
        printk_once(KERN_WARNING
-                   "The scan_unevictable_pages sysctl/node-interface has been "
+                   "%s: The scan_unevictable_pages sysctl/node-interface has been "
                    "disabled for lack of a legitimate use case.  If you have "
-                   "one, please send an email to linux-mm@kvack.org.\n");
+                   "one, please send an email to linux-mm@kvack.org.\n",
+                   current->comm);
 }
 
 /*
index 854ca7a911c427f753ebe2b5d93a71a1c10df041..776618cd2be5c122fecaf31934f06d74fff2b450 100644 (file)
@@ -23,6 +23,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
@@ -81,15 +83,15 @@ static int get_protocol_version(char *s)
 
        if (!strcmp(s, "9p2000")) {
                version = p9_proto_legacy;
-               P9_DPRINTK(P9_DEBUG_9P, "Protocol version: Legacy\n");
+               p9_debug(P9_DEBUG_9P, "Protocol version: Legacy\n");
        } else if (!strcmp(s, "9p2000.u")) {
                version = p9_proto_2000u;
-               P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.u\n");
+               p9_debug(P9_DEBUG_9P, "Protocol version: 9P2000.u\n");
        } else if (!strcmp(s, "9p2000.L")) {
                version = p9_proto_2000L;
-               P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.L\n");
+               p9_debug(P9_DEBUG_9P, "Protocol version: 9P2000.L\n");
        } else
-               printk(KERN_INFO "9p: Unknown protocol version %s.\n", s);
+               pr_info("Unknown protocol version %s\n", s);
 
        return version;
 }
@@ -119,8 +121,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
 
        tmp_options = kstrdup(opts, GFP_KERNEL);
        if (!tmp_options) {
-               P9_DPRINTK(P9_DEBUG_ERROR,
-                               "failed to allocate copy of option string\n");
+               p9_debug(P9_DEBUG_ERROR,
+                        "failed to allocate copy of option string\n");
                return -ENOMEM;
        }
        options = tmp_options;
@@ -134,8 +136,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
                case Opt_msize:
                        r = match_int(&args[0], &option);
                        if (r < 0) {
-                               P9_DPRINTK(P9_DEBUG_ERROR,
-                                          "integer field, but no integer?\n");
+                               p9_debug(P9_DEBUG_ERROR,
+                                        "integer field, but no integer?\n");
                                ret = r;
                                continue;
                        }
@@ -145,15 +147,14 @@ static int parse_opts(char *opts, struct p9_client *clnt)
                        s = match_strdup(&args[0]);
                        if (!s) {
                                ret = -ENOMEM;
-                               P9_DPRINTK(P9_DEBUG_ERROR,
-                                       "problem allocating copy of trans arg\n");
+                               p9_debug(P9_DEBUG_ERROR,
+                                        "problem allocating copy of trans arg\n");
                                goto free_and_return;
                         }
                        clnt->trans_mod = v9fs_get_trans_by_name(s);
                        if (clnt->trans_mod == NULL) {
-                               printk(KERN_INFO
-                                       "9p: Could not find "
-                                       "request transport: %s\n", s);
+                               pr_info("Could not find request transport: %s\n",
+                                       s);
                                ret = -EINVAL;
                                kfree(s);
                                goto free_and_return;
@@ -167,8 +168,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
                        s = match_strdup(&args[0]);
                        if (!s) {
                                ret = -ENOMEM;
-                               P9_DPRINTK(P9_DEBUG_ERROR,
-                                       "problem allocating copy of version arg\n");
+                               p9_debug(P9_DEBUG_ERROR,
+                                        "problem allocating copy of version arg\n");
                                goto free_and_return;
                        }
                        ret = get_protocol_version(s);
@@ -225,7 +226,7 @@ p9_tag_alloc(struct p9_client *c, u16 tag, unsigned int max_size)
                                        sizeof(struct p9_req_t), GFP_ATOMIC);
 
                        if (!c->reqs[row]) {
-                               printk(KERN_ERR "Couldn't grow tag array\n");
+                               pr_err("Couldn't grow tag array\n");
                                spin_unlock_irqrestore(&c->lock, flags);
                                return ERR_PTR(-ENOMEM);
                        }
@@ -244,7 +245,7 @@ p9_tag_alloc(struct p9_client *c, u16 tag, unsigned int max_size)
        if (!req->tc) {
                req->wq = kmalloc(sizeof(wait_queue_head_t), GFP_NOFS);
                if (!req->wq) {
-                       printk(KERN_ERR "Couldn't grow tag array\n");
+                       pr_err("Couldn't grow tag array\n");
                        return ERR_PTR(-ENOMEM);
                }
                init_waitqueue_head(req->wq);
@@ -253,7 +254,7 @@ p9_tag_alloc(struct p9_client *c, u16 tag, unsigned int max_size)
                req->rc = kmalloc(sizeof(struct p9_fcall) + alloc_msize,
                                  GFP_NOFS);
                if ((!req->tc) || (!req->rc)) {
-                       printk(KERN_ERR "Couldn't grow tag array\n");
+                       pr_err("Couldn't grow tag array\n");
                        kfree(req->tc);
                        kfree(req->rc);
                        kfree(req->wq);
@@ -343,9 +344,9 @@ static void p9_tag_cleanup(struct p9_client *c)
        for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) {
                for (col = 0; col < P9_ROW_MAXTAG; col++) {
                        if (c->reqs[row][col].status != REQ_STATUS_IDLE) {
-                               P9_DPRINTK(P9_DEBUG_MUX,
-                                 "Attempting to cleanup non-free tag %d,%d\n",
-                                 row, col);
+                               p9_debug(P9_DEBUG_MUX,
+                                        "Attempting to cleanup non-free tag %d,%d\n",
+                                        row, col);
                                /* TODO: delay execution of cleanup */
                                return;
                        }
@@ -379,7 +380,7 @@ static void p9_tag_cleanup(struct p9_client *c)
 static void p9_free_req(struct p9_client *c, struct p9_req_t *r)
 {
        int tag = r->tc->tag;
-       P9_DPRINTK(P9_DEBUG_MUX, "clnt %p req %p tag: %d\n", c, r, tag);
+       p9_debug(P9_DEBUG_MUX, "clnt %p req %p tag: %d\n", c, r, tag);
 
        r->status = REQ_STATUS_IDLE;
        if (tag != P9_NOTAG && p9_idpool_check(tag, c->tagpool))
@@ -394,9 +395,9 @@ static void p9_free_req(struct p9_client *c, struct p9_req_t *r)
  */
 void p9_client_cb(struct p9_client *c, struct p9_req_t *req)
 {
-       P9_DPRINTK(P9_DEBUG_MUX, " tag %d\n", req->tc->tag);
+       p9_debug(P9_DEBUG_MUX, " tag %d\n", req->tc->tag);
        wake_up(req->wq);
-       P9_DPRINTK(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag);
+       p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag);
 }
 EXPORT_SYMBOL(p9_client_cb);
 
@@ -431,8 +432,8 @@ p9_parse_header(struct p9_fcall *pdu, int32_t *size, int8_t *type, int16_t *tag,
        pdu->id = r_type;
        pdu->tag = r_tag;
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< size=%d type: %d tag: %d\n", pdu->size,
-                                                       pdu->id, pdu->tag);
+       p9_debug(P9_DEBUG_9P, "<<< size=%d type: %d tag: %d\n",
+                pdu->size, pdu->id, pdu->tag);
 
        if (type)
                *type = r_type;
@@ -473,7 +474,7 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
         */
        trace_9p_protocol_dump(c, req->rc);
        if (err) {
-               P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
+               p9_debug(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
                return err;
        }
        if (type != P9_RERROR && type != P9_RLERROR)
@@ -492,21 +493,21 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
                if (!err || !IS_ERR_VALUE(err)) {
                        err = p9_errstr2errno(ename, strlen(ename));
 
-                       P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
-                                  -ecode, ename);
+                       p9_debug(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
+                                -ecode, ename);
                }
                kfree(ename);
        } else {
                err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
                err = -ecode;
 
-               P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
+               p9_debug(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
        }
 
        return err;
 
 out_err:
-       P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
+       p9_debug(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
 
        return err;
 }
@@ -538,7 +539,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
         */
        trace_9p_protocol_dump(c, req->rc);
        if (err) {
-               P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
+               p9_debug(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
                return err;
        }
 
@@ -601,22 +602,22 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
                if (!err || !IS_ERR_VALUE(err)) {
                        err = p9_errstr2errno(ename, strlen(ename));
 
-                       P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
-                                  -ecode, ename);
+                       p9_debug(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
+                                -ecode, ename);
                }
                kfree(ename);
        } else {
                err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
                err = -ecode;
 
-               P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
+               p9_debug(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
        }
        return err;
 
 out_free:
        kfree(ename);
 out_err:
-       P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
+       p9_debug(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
        return err;
 }
 
@@ -645,7 +646,7 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
        if (err)
                return err;
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TFLUSH tag %d\n", oldtag);
+       p9_debug(P9_DEBUG_9P, ">>> TFLUSH tag %d\n", oldtag);
 
        req = p9_client_rpc(c, P9_TFLUSH, "w", oldtag);
        if (IS_ERR(req))
@@ -670,7 +671,7 @@ static struct p9_req_t *p9_client_prepare_req(struct p9_client *c,
        int tag, err;
        struct p9_req_t *req;
 
-       P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type);
+       p9_debug(P9_DEBUG_MUX, "client %p op %d\n", c, type);
 
        /* we allow for any status other than disconnected */
        if (c->status == Disconnected)
@@ -744,11 +745,11 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
                                       req->status >= REQ_STATUS_RCVD);
 
        if (req->status == REQ_STATUS_ERROR) {
-               P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
+               p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
                err = req->t_err;
        }
        if ((err == -ERESTARTSYS) && (c->status == Connected)) {
-               P9_DPRINTK(P9_DEBUG_MUX, "flushing\n");
+               p9_debug(P9_DEBUG_MUX, "flushing\n");
                sigpending = 1;
                clear_thread_flag(TIF_SIGPENDING);
 
@@ -827,11 +828,11 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
                goto reterr;
        }
        if (req->status == REQ_STATUS_ERROR) {
-               P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
+               p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
                err = req->t_err;
        }
        if ((err == -ERESTARTSYS) && (c->status == Connected)) {
-               P9_DPRINTK(P9_DEBUG_MUX, "flushing\n");
+               p9_debug(P9_DEBUG_MUX, "flushing\n");
                sigpending = 1;
                clear_thread_flag(TIF_SIGPENDING);
 
@@ -865,7 +866,7 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt)
        struct p9_fid *fid;
        unsigned long flags;
 
-       P9_DPRINTK(P9_DEBUG_FID, "clnt %p\n", clnt);
+       p9_debug(P9_DEBUG_FID, "clnt %p\n", clnt);
        fid = kmalloc(sizeof(struct p9_fid), GFP_KERNEL);
        if (!fid)
                return ERR_PTR(-ENOMEM);
@@ -898,7 +899,7 @@ static void p9_fid_destroy(struct p9_fid *fid)
        struct p9_client *clnt;
        unsigned long flags;
 
-       P9_DPRINTK(P9_DEBUG_FID, "fid %d\n", fid->fid);
+       p9_debug(P9_DEBUG_FID, "fid %d\n", fid->fid);
        clnt = fid->clnt;
        p9_idpool_put(fid->fid, clnt->fidpool);
        spin_lock_irqsave(&clnt->lock, flags);
@@ -915,8 +916,8 @@ static int p9_client_version(struct p9_client *c)
        char *version;
        int msize;
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",
-                                               c->msize, c->proto_version);
+       p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",
+                c->msize, c->proto_version);
 
        switch (c->proto_version) {
        case p9_proto_2000L:
@@ -941,12 +942,12 @@ static int p9_client_version(struct p9_client *c)
 
        err = p9pdu_readf(req->rc, c->proto_version, "ds", &msize, &version);
        if (err) {
-               P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err);
+               p9_debug(P9_DEBUG_9P, "version error %d\n", err);
                trace_9p_protocol_dump(c, req->rc);
                goto error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RVERSION msize %d %s\n", msize, version);
+       p9_debug(P9_DEBUG_9P, "<<< RVERSION msize %d %s\n", msize, version);
        if (!strncmp(version, "9P2000.L", 8))
                c->proto_version = p9_proto_2000L;
        else if (!strncmp(version, "9P2000.u", 8))
@@ -996,8 +997,8 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
 
        if (clnt->trans_mod == NULL) {
                err = -EPROTONOSUPPORT;
-               P9_DPRINTK(P9_DEBUG_ERROR,
-                               "No transport defined or default transport\n");
+               p9_debug(P9_DEBUG_ERROR,
+                        "No transport defined or default transport\n");
                goto destroy_tagpool;
        }
 
@@ -1007,8 +1008,8 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
                goto put_trans;
        }
 
-       P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d protocol %d\n",
-               clnt, clnt->trans_mod, clnt->msize, clnt->proto_version);
+       p9_debug(P9_DEBUG_MUX, "clnt %p trans %p msize %d protocol %d\n",
+                clnt, clnt->trans_mod, clnt->msize, clnt->proto_version);
 
        err = clnt->trans_mod->create(clnt, dev_name, options);
        if (err)
@@ -1041,7 +1042,7 @@ void p9_client_destroy(struct p9_client *clnt)
 {
        struct p9_fid *fid, *fidptr;
 
-       P9_DPRINTK(P9_DEBUG_MUX, "clnt %p\n", clnt);
+       p9_debug(P9_DEBUG_MUX, "clnt %p\n", clnt);
 
        if (clnt->trans_mod)
                clnt->trans_mod->close(clnt);
@@ -1049,7 +1050,7 @@ void p9_client_destroy(struct p9_client *clnt)
        v9fs_put_trans(clnt->trans_mod);
 
        list_for_each_entry_safe(fid, fidptr, &clnt->fidlist, flist) {
-               printk(KERN_INFO "Found fid %d not clunked\n", fid->fid);
+               pr_info("Found fid %d not clunked\n", fid->fid);
                p9_fid_destroy(fid);
        }
 
@@ -1064,14 +1065,14 @@ EXPORT_SYMBOL(p9_client_destroy);
 
 void p9_client_disconnect(struct p9_client *clnt)
 {
-       P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt);
+       p9_debug(P9_DEBUG_9P, "clnt %p\n", clnt);
        clnt->status = Disconnected;
 }
 EXPORT_SYMBOL(p9_client_disconnect);
 
 void p9_client_begin_disconnect(struct p9_client *clnt)
 {
-       P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt);
+       p9_debug(P9_DEBUG_9P, "clnt %p\n", clnt);
        clnt->status = BeginDisconnect;
 }
 EXPORT_SYMBOL(p9_client_begin_disconnect);
@@ -1085,8 +1086,8 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
        struct p9_qid qid;
 
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n",
-                  afid ? afid->fid : -1, uname, aname);
+       p9_debug(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n",
+                afid ? afid->fid : -1, uname, aname);
        fid = p9_fid_create(clnt);
        if (IS_ERR(fid)) {
                err = PTR_ERR(fid);
@@ -1108,10 +1109,8 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
                goto error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RATTACH qid %x.%llx.%x\n",
-                                       qid.type,
-                                       (unsigned long long)qid.path,
-                                       qid.version);
+       p9_debug(P9_DEBUG_9P, "<<< RATTACH qid %x.%llx.%x\n",
+                qid.type, (unsigned long long)qid.path, qid.version);
 
        memmove(&fid->qid, &qid, sizeof(struct p9_qid));
 
@@ -1151,8 +1150,8 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
                fid = oldfid;
 
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %ud wname[0] %s\n",
-               oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL);
+       p9_debug(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %ud wname[0] %s\n",
+                oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL);
 
        req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid,
                                                                nwname, wnames);
@@ -1169,7 +1168,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
        }
        p9_free_req(clnt, req);
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RWALK nwqid %d:\n", nwqids);
+       p9_debug(P9_DEBUG_9P, "<<< RWALK nwqid %d:\n", nwqids);
 
        if (nwqids != nwname) {
                err = -ENOENT;
@@ -1177,7 +1176,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
        }
 
        for (count = 0; count < nwqids; count++)
-               P9_DPRINTK(P9_DEBUG_9P, "<<<     [%d] %x.%llx.%x\n",
+               p9_debug(P9_DEBUG_9P, "<<<     [%d] %x.%llx.%x\n",
                        count, wqids[count].type,
                        (unsigned long long)wqids[count].path,
                        wqids[count].version);
@@ -1212,7 +1211,7 @@ int p9_client_open(struct p9_fid *fid, int mode)
        int iounit;
 
        clnt = fid->clnt;
-       P9_DPRINTK(P9_DEBUG_9P, ">>> %s fid %d mode %d\n",
+       p9_debug(P9_DEBUG_9P, ">>> %s fid %d mode %d\n",
                p9_is_proto_dotl(clnt) ? "TLOPEN" : "TOPEN", fid->fid, mode);
        err = 0;
 
@@ -1234,7 +1233,7 @@ int p9_client_open(struct p9_fid *fid, int mode)
                goto free_and_error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< %s qid %x.%llx.%x iounit %x\n",
+       p9_debug(P9_DEBUG_9P, "<<< %s qid %x.%llx.%x iounit %x\n",
                p9_is_proto_dotl(clnt) ? "RLOPEN" : "ROPEN",  qid.type,
                (unsigned long long)qid.path, qid.version, iounit);
 
@@ -1256,7 +1255,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
        struct p9_req_t *req;
        int iounit;
 
-       P9_DPRINTK(P9_DEBUG_9P,
+       p9_debug(P9_DEBUG_9P,
                        ">>> TLCREATE fid %d name %s flags %d mode %d gid %d\n",
                        ofid->fid, name, flags, mode, gid);
        clnt = ofid->clnt;
@@ -1277,7 +1276,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
                goto free_and_error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RLCREATE qid %x.%llx.%x iounit %x\n",
+       p9_debug(P9_DEBUG_9P, "<<< RLCREATE qid %x.%llx.%x iounit %x\n",
                        qid->type,
                        (unsigned long long)qid->path,
                        qid->version, iounit);
@@ -1301,7 +1300,7 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
        struct p9_qid qid;
        int iounit;
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TCREATE fid %d name %s perm %d mode %d\n",
+       p9_debug(P9_DEBUG_9P, ">>> TCREATE fid %d name %s perm %d mode %d\n",
                                                fid->fid, name, perm, mode);
        err = 0;
        clnt = fid->clnt;
@@ -1322,7 +1321,7 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
                goto free_and_error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RCREATE qid %x.%llx.%x iounit %x\n",
+       p9_debug(P9_DEBUG_9P, "<<< RCREATE qid %x.%llx.%x iounit %x\n",
                                qid.type,
                                (unsigned long long)qid.path,
                                qid.version, iounit);
@@ -1344,7 +1343,7 @@ int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, gid_t gid,
        struct p9_client *clnt;
        struct p9_req_t *req;
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TSYMLINK dfid %d name %s  symtgt %s\n",
+       p9_debug(P9_DEBUG_9P, ">>> TSYMLINK dfid %d name %s  symtgt %s\n",
                        dfid->fid, name, symtgt);
        clnt = dfid->clnt;
 
@@ -1361,7 +1360,7 @@ int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, gid_t gid,
                goto free_and_error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RSYMLINK qid %x.%llx.%x\n",
+       p9_debug(P9_DEBUG_9P, "<<< RSYMLINK qid %x.%llx.%x\n",
                        qid->type, (unsigned long long)qid->path, qid->version);
 
 free_and_error:
@@ -1376,7 +1375,7 @@ int p9_client_link(struct p9_fid *dfid, struct p9_fid *oldfid, char *newname)
        struct p9_client *clnt;
        struct p9_req_t *req;
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TLINK dfid %d oldfid %d newname %s\n",
+       p9_debug(P9_DEBUG_9P, ">>> TLINK dfid %d oldfid %d newname %s\n",
                        dfid->fid, oldfid->fid, newname);
        clnt = dfid->clnt;
        req = p9_client_rpc(clnt, P9_TLINK, "dds", dfid->fid, oldfid->fid,
@@ -1384,7 +1383,7 @@ int p9_client_link(struct p9_fid *dfid, struct p9_fid *oldfid, char *newname)
        if (IS_ERR(req))
                return PTR_ERR(req);
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RLINK\n");
+       p9_debug(P9_DEBUG_9P, "<<< RLINK\n");
        p9_free_req(clnt, req);
        return 0;
 }
@@ -1396,7 +1395,7 @@ int p9_client_fsync(struct p9_fid *fid, int datasync)
        struct p9_client *clnt;
        struct p9_req_t *req;
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TFSYNC fid %d datasync:%d\n",
+       p9_debug(P9_DEBUG_9P, ">>> TFSYNC fid %d datasync:%d\n",
                        fid->fid, datasync);
        err = 0;
        clnt = fid->clnt;
@@ -1407,7 +1406,7 @@ int p9_client_fsync(struct p9_fid *fid, int datasync)
                goto error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RFSYNC fid %d\n", fid->fid);
+       p9_debug(P9_DEBUG_9P, "<<< RFSYNC fid %d\n", fid->fid);
 
        p9_free_req(clnt, req);
 
@@ -1423,12 +1422,13 @@ int p9_client_clunk(struct p9_fid *fid)
        struct p9_req_t *req;
 
        if (!fid) {
-               P9_EPRINTK(KERN_WARNING, "Trying to clunk with NULL fid\n");
+               pr_warn("%s (%d): Trying to clunk with NULL fid\n",
+                       __func__, task_pid_nr(current));
                dump_stack();
                return 0;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TCLUNK fid %d\n", fid->fid);
+       p9_debug(P9_DEBUG_9P, ">>> TCLUNK fid %d\n", fid->fid);
        err = 0;
        clnt = fid->clnt;
 
@@ -1438,7 +1438,7 @@ int p9_client_clunk(struct p9_fid *fid)
                goto error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RCLUNK fid %d\n", fid->fid);
+       p9_debug(P9_DEBUG_9P, "<<< RCLUNK fid %d\n", fid->fid);
 
        p9_free_req(clnt, req);
 error:
@@ -1456,7 +1456,7 @@ int p9_client_remove(struct p9_fid *fid)
        struct p9_client *clnt;
        struct p9_req_t *req;
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TREMOVE fid %d\n", fid->fid);
+       p9_debug(P9_DEBUG_9P, ">>> TREMOVE fid %d\n", fid->fid);
        err = 0;
        clnt = fid->clnt;
 
@@ -1466,7 +1466,7 @@ int p9_client_remove(struct p9_fid *fid)
                goto error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RREMOVE fid %d\n", fid->fid);
+       p9_debug(P9_DEBUG_9P, "<<< RREMOVE fid %d\n", fid->fid);
 
        p9_free_req(clnt, req);
 error:
@@ -1481,7 +1481,7 @@ int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags)
        struct p9_req_t *req;
        struct p9_client *clnt;
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TUNLINKAT fid %d %s %d\n",
+       p9_debug(P9_DEBUG_9P, ">>> TUNLINKAT fid %d %s %d\n",
                   dfid->fid, name, flags);
 
        clnt = dfid->clnt;
@@ -1490,7 +1490,7 @@ int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags)
                err = PTR_ERR(req);
                goto error;
        }
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RUNLINKAT fid %d %s\n", dfid->fid, name);
+       p9_debug(P9_DEBUG_9P, "<<< RUNLINKAT fid %d %s\n", dfid->fid, name);
 
        p9_free_req(clnt, req);
 error:
@@ -1509,7 +1509,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
        int err, rsize, non_zc = 0;
 
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
+       p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
                   fid->fid, (long long unsigned) offset, count);
        err = 0;
        clnt = fid->clnt;
@@ -1552,7 +1552,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
                goto free_and_error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
+       p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
 
        if (non_zc) {
                if (data) {
@@ -1584,7 +1584,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
        struct p9_client *clnt;
        struct p9_req_t *req;
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n",
+       p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n",
                                fid->fid, (long long unsigned) offset, count);
        err = 0;
        clnt = fid->clnt;
@@ -1626,7 +1626,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
                goto free_and_error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
+       p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
 
        p9_free_req(clnt, req);
        return count;
@@ -1646,7 +1646,7 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
        struct p9_req_t *req;
        u16 ignored;
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TSTAT fid %d\n", fid->fid);
+       p9_debug(P9_DEBUG_9P, ">>> TSTAT fid %d\n", fid->fid);
 
        if (!ret)
                return ERR_PTR(-ENOMEM);
@@ -1667,7 +1667,7 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
                goto error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P,
+       p9_debug(P9_DEBUG_9P,
                "<<< RSTAT sz=%x type=%x dev=%x qid=%x.%llx.%x\n"
                "<<<    mode=%8.8x atime=%8.8x mtime=%8.8x length=%llx\n"
                "<<<    name=%s uid=%s gid=%s muid=%s extension=(%s)\n"
@@ -1696,7 +1696,7 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
                                                                GFP_KERNEL);
        struct p9_req_t *req;
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TGETATTR fid %d, request_mask %lld\n",
+       p9_debug(P9_DEBUG_9P, ">>> TGETATTR fid %d, request_mask %lld\n",
                                                        fid->fid, request_mask);
 
        if (!ret)
@@ -1718,7 +1718,7 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
                goto error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P,
+       p9_debug(P9_DEBUG_9P,
                "<<< RGETATTR st_result_mask=%lld\n"
                "<<< qid=%x.%llx.%x\n"
                "<<< st_mode=%8.8x st_nlink=%llu\n"
@@ -1784,8 +1784,8 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
        err = 0;
        clnt = fid->clnt;
        wst->size = p9_client_statsize(wst, clnt->proto_version);
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid);
-       P9_DPRINTK(P9_DEBUG_9P,
+       p9_debug(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid);
+       p9_debug(P9_DEBUG_9P,
                "     sz=%x type=%x dev=%x qid=%x.%llx.%x\n"
                "     mode=%8.8x atime=%8.8x mtime=%8.8x length=%llx\n"
                "     name=%s uid=%s gid=%s muid=%s extension=(%s)\n"
@@ -1802,7 +1802,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
                goto error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RWSTAT fid %d\n", fid->fid);
+       p9_debug(P9_DEBUG_9P, "<<< RWSTAT fid %d\n", fid->fid);
 
        p9_free_req(clnt, req);
 error:
@@ -1818,8 +1818,8 @@ int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *p9attr)
 
        err = 0;
        clnt = fid->clnt;
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TSETATTR fid %d\n", fid->fid);
-       P9_DPRINTK(P9_DEBUG_9P,
+       p9_debug(P9_DEBUG_9P, ">>> TSETATTR fid %d\n", fid->fid);
+       p9_debug(P9_DEBUG_9P,
                "    valid=%x mode=%x uid=%d gid=%d size=%lld\n"
                "    atime_sec=%lld atime_nsec=%lld\n"
                "    mtime_sec=%lld mtime_nsec=%lld\n",
@@ -1833,7 +1833,7 @@ int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *p9attr)
                err = PTR_ERR(req);
                goto error;
        }
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RSETATTR fid %d\n", fid->fid);
+       p9_debug(P9_DEBUG_9P, "<<< RSETATTR fid %d\n", fid->fid);
        p9_free_req(clnt, req);
 error:
        return err;
@@ -1849,7 +1849,7 @@ int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb)
        err = 0;
        clnt = fid->clnt;
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TSTATFS fid %d\n", fid->fid);
+       p9_debug(P9_DEBUG_9P, ">>> TSTATFS fid %d\n", fid->fid);
 
        req = p9_client_rpc(clnt, P9_TSTATFS, "d", fid->fid);
        if (IS_ERR(req)) {
@@ -1866,7 +1866,7 @@ int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb)
                goto error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RSTATFS fid %d type 0x%lx bsize %ld "
+       p9_debug(P9_DEBUG_9P, "<<< RSTATFS fid %d type 0x%lx bsize %ld "
                "blocks %llu bfree %llu bavail %llu files %llu ffree %llu "
                "fsid %llu namelen %ld\n",
                fid->fid, (long unsigned int)sb->type, (long int)sb->bsize,
@@ -1889,7 +1889,7 @@ int p9_client_rename(struct p9_fid *fid,
        err = 0;
        clnt = fid->clnt;
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TRENAME fid %d newdirfid %d name %s\n",
+       p9_debug(P9_DEBUG_9P, ">>> TRENAME fid %d newdirfid %d name %s\n",
                        fid->fid, newdirfid->fid, name);
 
        req = p9_client_rpc(clnt, P9_TRENAME, "dds", fid->fid,
@@ -1899,7 +1899,7 @@ int p9_client_rename(struct p9_fid *fid,
                goto error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RRENAME fid %d\n", fid->fid);
+       p9_debug(P9_DEBUG_9P, "<<< RRENAME fid %d\n", fid->fid);
 
        p9_free_req(clnt, req);
 error:
@@ -1917,7 +1917,7 @@ int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name,
        err = 0;
        clnt = olddirfid->clnt;
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TRENAMEAT olddirfid %d old name %s"
+       p9_debug(P9_DEBUG_9P, ">>> TRENAMEAT olddirfid %d old name %s"
                   " newdirfid %d new name %s\n", olddirfid->fid, old_name,
                   newdirfid->fid, new_name);
 
@@ -1928,7 +1928,7 @@ int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name,
                goto error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RRENAMEAT newdirfid %d new name %s\n",
+       p9_debug(P9_DEBUG_9P, "<<< RRENAMEAT newdirfid %d new name %s\n",
                   newdirfid->fid, new_name);
 
        p9_free_req(clnt, req);
@@ -1956,7 +1956,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
                attr_fid = NULL;
                goto error;
        }
-       P9_DPRINTK(P9_DEBUG_9P,
+       p9_debug(P9_DEBUG_9P,
                ">>> TXATTRWALK file_fid %d, attr_fid %d name %s\n",
                file_fid->fid, attr_fid->fid, attr_name);
 
@@ -1973,7 +1973,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
                goto clunk_fid;
        }
        p9_free_req(clnt, req);
-       P9_DPRINTK(P9_DEBUG_9P, "<<<  RXATTRWALK fid %d size %llu\n",
+       p9_debug(P9_DEBUG_9P, "<<<  RXATTRWALK fid %d size %llu\n",
                attr_fid->fid, *attr_size);
        return attr_fid;
 clunk_fid:
@@ -1994,7 +1994,7 @@ int p9_client_xattrcreate(struct p9_fid *fid, const char *name,
        struct p9_req_t *req;
        struct p9_client *clnt;
 
-       P9_DPRINTK(P9_DEBUG_9P,
+       p9_debug(P9_DEBUG_9P,
                ">>> TXATTRCREATE fid %d name  %s size %lld flag %d\n",
                fid->fid, name, (long long)attr_size, flags);
        err = 0;
@@ -2005,7 +2005,7 @@ int p9_client_xattrcreate(struct p9_fid *fid, const char *name,
                err = PTR_ERR(req);
                goto error;
        }
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RXATTRCREATE fid %d\n", fid->fid);
+       p9_debug(P9_DEBUG_9P, "<<< RXATTRCREATE fid %d\n", fid->fid);
        p9_free_req(clnt, req);
 error:
        return err;
@@ -2019,7 +2019,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
        struct p9_req_t *req;
        char *dataptr;
 
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
+       p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
                                fid->fid, (long long unsigned) offset, count);
 
        err = 0;
@@ -2056,7 +2056,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
                goto free_and_error;
        }
 
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
+       p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
 
        if (non_zc)
                memmove(data, dataptr, count);
@@ -2080,7 +2080,7 @@ int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode,
 
        err = 0;
        clnt = fid->clnt;
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TMKNOD fid %d name %s mode %d major %d "
+       p9_debug(P9_DEBUG_9P, ">>> TMKNOD fid %d name %s mode %d major %d "
                "minor %d\n", fid->fid, name, mode, MAJOR(rdev), MINOR(rdev));
        req = p9_client_rpc(clnt, P9_TMKNOD, "dsdddd", fid->fid, name, mode,
                MAJOR(rdev), MINOR(rdev), gid);
@@ -2092,7 +2092,7 @@ int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode,
                trace_9p_protocol_dump(clnt, req->rc);
                goto error;
        }
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n", qid->type,
+       p9_debug(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n", qid->type,
                                (unsigned long long)qid->path, qid->version);
 
 error:
@@ -2111,7 +2111,7 @@ int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
 
        err = 0;
        clnt = fid->clnt;
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TMKDIR fid %d name %s mode %d gid %d\n",
+       p9_debug(P9_DEBUG_9P, ">>> TMKDIR fid %d name %s mode %d gid %d\n",
                 fid->fid, name, mode, gid);
        req = p9_client_rpc(clnt, P9_TMKDIR, "dsdd", fid->fid, name, mode,
                gid);
@@ -2123,7 +2123,7 @@ int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
                trace_9p_protocol_dump(clnt, req->rc);
                goto error;
        }
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type,
+       p9_debug(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type,
                                (unsigned long long)qid->path, qid->version);
 
 error:
@@ -2141,7 +2141,7 @@ int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status)
 
        err = 0;
        clnt = fid->clnt;
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TLOCK fid %d type %i flags %d "
+       p9_debug(P9_DEBUG_9P, ">>> TLOCK fid %d type %i flags %d "
                        "start %lld length %lld proc_id %d client_id %s\n",
                        fid->fid, flock->type, flock->flags, flock->start,
                        flock->length, flock->proc_id, flock->client_id);
@@ -2158,7 +2158,7 @@ int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status)
                trace_9p_protocol_dump(clnt, req->rc);
                goto error;
        }
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status);
+       p9_debug(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status);
 error:
        p9_free_req(clnt, req);
        return err;
@@ -2174,7 +2174,7 @@ int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock)
 
        err = 0;
        clnt = fid->clnt;
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TGETLOCK fid %d, type %i start %lld "
+       p9_debug(P9_DEBUG_9P, ">>> TGETLOCK fid %d, type %i start %lld "
                "length %lld proc_id %d client_id %s\n", fid->fid, glock->type,
                glock->start, glock->length, glock->proc_id, glock->client_id);
 
@@ -2191,7 +2191,7 @@ int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock)
                trace_9p_protocol_dump(clnt, req->rc);
                goto error;
        }
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld "
+       p9_debug(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld "
                "proc_id %d client_id %s\n", glock->type, glock->start,
                glock->length, glock->proc_id, glock->client_id);
 error:
@@ -2208,7 +2208,7 @@ int p9_client_readlink(struct p9_fid *fid, char **target)
 
        err = 0;
        clnt = fid->clnt;
-       P9_DPRINTK(P9_DEBUG_9P, ">>> TREADLINK fid %d\n", fid->fid);
+       p9_debug(P9_DEBUG_9P, ">>> TREADLINK fid %d\n", fid->fid);
 
        req = p9_client_rpc(clnt, P9_TREADLINK, "d", fid->fid);
        if (IS_ERR(req))
@@ -2219,7 +2219,7 @@ int p9_client_readlink(struct p9_fid *fid, char **target)
                trace_9p_protocol_dump(clnt, req->rc);
                goto error;
        }
-       P9_DPRINTK(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target);
+       p9_debug(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target);
 error:
        p9_free_req(clnt, req);
        return err;
index 52518512a93e86eb261b34af3dddf6778a9ea50f..2ab2de76010f0d47f67ee4a79aaf567f97026ed9 100644 (file)
@@ -27,6 +27,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/list.h>
 #include <linux/jhash.h>
@@ -237,8 +239,8 @@ int p9_errstr2errno(char *errstr, int len)
        if (errno == 0) {
                /* TODO: if error isn't found, add it dynamically */
                errstr[len] = 0;
-               printk(KERN_ERR "%s: server reported unknown error %s\n",
-                       __func__, errstr);
+               pr_err("%s: server reported unknown error %s\n",
+                      __func__, errstr);
                errno = ESERVERFAULT;
        }
 
index 2664d1292291344e6a43964334df681d4e49e372..6ab36aea77275ef342b88f8e66e0e5c294086fd0 100644 (file)
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
 #include <linux/moduleparam.h>
 #include <net/9p/9p.h>
 #include <linux/fs.h>
@@ -39,6 +43,29 @@ unsigned int p9_debug_level = 0;     /* feature-rific global debug level  */
 EXPORT_SYMBOL(p9_debug_level);
 module_param_named(debug, p9_debug_level, uint, 0);
 MODULE_PARM_DESC(debug, "9P debugging level");
+
+void _p9_debug(enum p9_debug_flags level, const char *func,
+               const char *fmt, ...)
+{
+       struct va_format vaf;
+       va_list args;
+
+       if ((p9_debug_level & level) != level)
+               return;
+
+       va_start(args, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &args;
+
+       if (level == P9_DEBUG_9P)
+               pr_notice("(%8.8d) %pV", task_pid_nr(current), &vaf);
+       else
+               pr_notice("-- %s (%d): %pV", func, task_pid_nr(current), &vaf);
+
+       va_end(args);
+}
+EXPORT_SYMBOL(_p9_debug);
 #endif
 
 /*
@@ -147,7 +174,7 @@ static int __init init_p9(void)
        int ret = 0;
 
        p9_error_init();
-       printk(KERN_INFO "Installing 9P2000 support\n");
+       pr_info("Installing 9P2000 support\n");
        p9_trans_fd_init();
 
        return ret;
@@ -160,7 +187,7 @@ static int __init init_p9(void)
 
 static void __exit exit_p9(void)
 {
-       printk(KERN_INFO "Unloading 9P2000 support\n");
+       pr_info("Unloading 9P2000 support\n");
 
        p9_trans_fd_exit();
 }
index 55e10a96c9025e135c869fafbc328bfc6123c521..9ee48cb3017998f47928a3d8413c5df44ec4ac73 100644 (file)
@@ -534,7 +534,7 @@ int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st)
 
        ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "S", st);
        if (ret) {
-               P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
+               p9_debug(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
                trace_9p_protocol_dump(clnt, &fake_pdu);
        }
 
@@ -558,8 +558,8 @@ int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu)
        pdu->size = size;
 
        trace_9p_protocol_dump(clnt, pdu);
-       P9_DPRINTK(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", pdu->size,
-                                                       pdu->id, pdu->tag);
+       p9_debug(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n",
+                pdu->size, pdu->id, pdu->tag);
 
        return err;
 }
@@ -585,7 +585,7 @@ int p9dirent_read(struct p9_client *clnt, char *buf, int len,
        ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "Qqbs", &dirent->qid,
                          &dirent->d_off, &dirent->d_type, &nameptr);
        if (ret) {
-               P9_DPRINTK(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
+               p9_debug(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
                trace_9p_protocol_dump(clnt, &fake_pdu);
                goto out;
        }
index fdfdb5747f63b6492fc23eafa000dcd7d93c9c5b..fccae26fa67441bbdb5b239eda94fb09417d87a7 100644 (file)
@@ -25,6 +25,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/in.h>
 #include <linux/module.h>
 #include <linux/net.h>
@@ -191,7 +193,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
        unsigned long flags;
        LIST_HEAD(cancel_list);
 
-       P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
+       p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
 
        spin_lock_irqsave(&m->client->lock, flags);
 
@@ -217,7 +219,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
        spin_unlock_irqrestore(&m->client->lock, flags);
 
        list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
-               P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req);
+               p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
                list_del(&req->req_list);
                p9_client_cb(m->client, req);
        }
@@ -275,7 +277,7 @@ static int p9_fd_read(struct p9_client *client, void *v, int len)
                return -EREMOTEIO;
 
        if (!(ts->rd->f_flags & O_NONBLOCK))
-               P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n");
+               p9_debug(P9_DEBUG_ERROR, "blocking read ...\n");
 
        ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
        if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
@@ -299,7 +301,7 @@ static void p9_read_work(struct work_struct *work)
        if (m->err < 0)
                return;
 
-       P9_DPRINTK(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos);
+       p9_debug(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos);
 
        if (!m->rbuf) {
                m->rbuf = m->tmp_buf;
@@ -308,11 +310,11 @@ static void p9_read_work(struct work_struct *work)
        }
 
        clear_bit(Rpending, &m->wsched);
-       P9_DPRINTK(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n", m,
-                                       m->rpos, m->rsize, m->rsize-m->rpos);
+       p9_debug(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n",
+                m, m->rpos, m->rsize, m->rsize-m->rpos);
        err = p9_fd_read(m->client, m->rbuf + m->rpos,
                                                m->rsize - m->rpos);
-       P9_DPRINTK(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
+       p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
        if (err == -EAGAIN) {
                clear_bit(Rworksched, &m->wsched);
                return;
@@ -325,25 +327,25 @@ static void p9_read_work(struct work_struct *work)
 
        if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */
                u16 tag;
-               P9_DPRINTK(P9_DEBUG_TRANS, "got new header\n");
+               p9_debug(P9_DEBUG_TRANS, "got new header\n");
 
                n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */
                if (n >= m->client->msize) {
-                       P9_DPRINTK(P9_DEBUG_ERROR,
-                               "requested packet size too big: %d\n", n);
+                       p9_debug(P9_DEBUG_ERROR,
+                                "requested packet size too big: %d\n", n);
                        err = -EIO;
                        goto error;
                }
 
                tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */
-               P9_DPRINTK(P9_DEBUG_TRANS,
-                       "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
+               p9_debug(P9_DEBUG_TRANS,
+                        "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
 
                m->req = p9_tag_lookup(m->client, tag);
                if (!m->req || (m->req->status != REQ_STATUS_SENT &&
                                        m->req->status != REQ_STATUS_FLSH)) {
-                       P9_DPRINTK(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
-                                                                tag);
+                       p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
+                                tag);
                        err = -EIO;
                        goto error;
                }
@@ -364,7 +366,7 @@ static void p9_read_work(struct work_struct *work)
 
        /* not an else because some packets (like clunk) have no payload */
        if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */
-               P9_DPRINTK(P9_DEBUG_TRANS, "got new packet\n");
+               p9_debug(P9_DEBUG_TRANS, "got new packet\n");
                spin_lock(&m->client->lock);
                if (m->req->status != REQ_STATUS_ERROR)
                        m->req->status = REQ_STATUS_RCVD;
@@ -384,7 +386,7 @@ static void p9_read_work(struct work_struct *work)
                        n = p9_fd_poll(m->client, NULL);
 
                if (n & POLLIN) {
-                       P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
+                       p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
                        schedule_work(&m->rq);
                } else
                        clear_bit(Rworksched, &m->wsched);
@@ -418,7 +420,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
                return -EREMOTEIO;
 
        if (!(ts->wr->f_flags & O_NONBLOCK))
-               P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n");
+               p9_debug(P9_DEBUG_ERROR, "blocking write ...\n");
 
        oldfs = get_fs();
        set_fs(get_ds());
@@ -460,7 +462,7 @@ static void p9_write_work(struct work_struct *work)
                req = list_entry(m->unsent_req_list.next, struct p9_req_t,
                               req_list);
                req->status = REQ_STATUS_SENT;
-               P9_DPRINTK(P9_DEBUG_TRANS, "move req %p\n", req);
+               p9_debug(P9_DEBUG_TRANS, "move req %p\n", req);
                list_move_tail(&req->req_list, &m->req_list);
 
                m->wbuf = req->tc->sdata;
@@ -469,11 +471,11 @@ static void p9_write_work(struct work_struct *work)
                spin_unlock(&m->client->lock);
        }
 
-       P9_DPRINTK(P9_DEBUG_TRANS, "mux %p pos %d size %d\n", m, m->wpos,
-                                                               m->wsize);
+       p9_debug(P9_DEBUG_TRANS, "mux %p pos %d size %d\n",
+                m, m->wpos, m->wsize);
        clear_bit(Wpending, &m->wsched);
        err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
-       P9_DPRINTK(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
+       p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
        if (err == -EAGAIN) {
                clear_bit(Wworksched, &m->wsched);
                return;
@@ -497,7 +499,7 @@ static void p9_write_work(struct work_struct *work)
                        n = p9_fd_poll(m->client, NULL);
 
                if (n & POLLOUT) {
-                       P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
+                       p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
                        schedule_work(&m->wq);
                } else
                        clear_bit(Wworksched, &m->wsched);
@@ -551,7 +553,7 @@ p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
        }
 
        if (!pwait) {
-               P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
+               p9_debug(P9_DEBUG_ERROR, "not enough wait_address slots\n");
                return;
        }
 
@@ -573,8 +575,7 @@ static struct p9_conn *p9_conn_create(struct p9_client *client)
        int n;
        struct p9_conn *m;
 
-       P9_DPRINTK(P9_DEBUG_TRANS, "client %p msize %d\n", client,
-                                                               client->msize);
+       p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize);
        m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
        if (!m)
                return ERR_PTR(-ENOMEM);
@@ -591,12 +592,12 @@ static struct p9_conn *p9_conn_create(struct p9_client *client)
 
        n = p9_fd_poll(client, &m->pt);
        if (n & POLLIN) {
-               P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
+               p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
                set_bit(Rpending, &m->wsched);
        }
 
        if (n & POLLOUT) {
-               P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
+               p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
                set_bit(Wpending, &m->wsched);
        }
 
@@ -618,7 +619,7 @@ static void p9_poll_mux(struct p9_conn *m)
 
        n = p9_fd_poll(m->client, NULL);
        if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
-               P9_DPRINTK(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
+               p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
                if (n >= 0)
                        n = -ECONNRESET;
                p9_conn_cancel(m, n);
@@ -626,19 +627,19 @@ static void p9_poll_mux(struct p9_conn *m)
 
        if (n & POLLIN) {
                set_bit(Rpending, &m->wsched);
-               P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
+               p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
                if (!test_and_set_bit(Rworksched, &m->wsched)) {
-                       P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
+                       p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
                        schedule_work(&m->rq);
                }
        }
 
        if (n & POLLOUT) {
                set_bit(Wpending, &m->wsched);
-               P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
+               p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
                if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
                    !test_and_set_bit(Wworksched, &m->wsched)) {
-                       P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
+                       p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
                        schedule_work(&m->wq);
                }
        }
@@ -661,8 +662,8 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
        struct p9_trans_fd *ts = client->trans;
        struct p9_conn *m = ts->conn;
 
-       P9_DPRINTK(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n", m,
-                                               current, req->tc, req->tc->id);
+       p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n",
+                m, current, req->tc, req->tc->id);
        if (m->err < 0)
                return m->err;
 
@@ -686,7 +687,7 @@ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
 {
        int ret = 1;
 
-       P9_DPRINTK(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
+       p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
 
        spin_lock(&client->lock);
 
@@ -726,8 +727,8 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
 
        tmp_options = kstrdup(params, GFP_KERNEL);
        if (!tmp_options) {
-               P9_DPRINTK(P9_DEBUG_ERROR,
-                               "failed to allocate copy of option string\n");
+               p9_debug(P9_DEBUG_ERROR,
+                        "failed to allocate copy of option string\n");
                return -ENOMEM;
        }
        options = tmp_options;
@@ -741,8 +742,8 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
                if (token != Opt_err) {
                        r = match_int(&args[0], &option);
                        if (r < 0) {
-                               P9_DPRINTK(P9_DEBUG_ERROR,
-                               "integer field, but no integer?\n");
+                               p9_debug(P9_DEBUG_ERROR,
+                                        "integer field, but no integer?\n");
                                continue;
                        }
                }
@@ -801,7 +802,8 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
        csocket->sk->sk_allocation = GFP_NOIO;
        fd = sock_map_fd(csocket, 0);
        if (fd < 0) {
-               P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n");
+               pr_err("%s (%d): failed to map fd\n",
+                      __func__, task_pid_nr(current));
                sock_release(csocket);
                kfree(p);
                return fd;
@@ -837,8 +839,8 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
 
 static void p9_conn_destroy(struct p9_conn *m)
 {
-       P9_DPRINTK(P9_DEBUG_TRANS, "mux %p prev %p next %p\n", m,
-               m->mux_list.prev, m->mux_list.next);
+       p9_debug(P9_DEBUG_TRANS, "mux %p prev %p next %p\n",
+                m, m->mux_list.prev, m->mux_list.next);
 
        p9_mux_poll_stop(m);
        cancel_work_sync(&m->rq);
@@ -919,7 +921,8 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
        err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_INET,
                            SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
        if (err) {
-               P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
+               pr_err("%s (%d): problem creating socket\n",
+                      __func__, task_pid_nr(current));
                return err;
        }
 
@@ -927,9 +930,8 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
                                    (struct sockaddr *)&sin_server,
                                    sizeof(struct sockaddr_in), 0);
        if (err < 0) {
-               P9_EPRINTK(KERN_ERR,
-                       "p9_trans_tcp: problem connecting socket to %s\n",
-                       addr);
+               pr_err("%s (%d): problem connecting socket to %s\n",
+                      __func__, task_pid_nr(current), addr);
                sock_release(csocket);
                return err;
        }
@@ -947,8 +949,8 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
        csocket = NULL;
 
        if (strlen(addr) >= UNIX_PATH_MAX) {
-               P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
-                       addr);
+               pr_err("%s (%d): address too long: %s\n",
+                      __func__, task_pid_nr(current), addr);
                return -ENAMETOOLONG;
        }
 
@@ -957,15 +959,16 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
        err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_UNIX,
                            SOCK_STREAM, 0, &csocket, 1);
        if (err < 0) {
-               P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n");
+               pr_err("%s (%d): problem creating socket\n",
+                      __func__, task_pid_nr(current));
+
                return err;
        }
        err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
                        sizeof(struct sockaddr_un) - 1, 0);
        if (err < 0) {
-               P9_EPRINTK(KERN_ERR,
-                       "p9_trans_unix: problem connecting socket: %s: %d\n",
-                       addr, err);
+               pr_err("%s (%d): problem connecting socket: %s: %d\n",
+                      __func__, task_pid_nr(current), addr, err);
                sock_release(csocket);
                return err;
        }
@@ -983,7 +986,7 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args)
        parse_opts(args, &opts);
 
        if (opts.rfd == ~0 || opts.wfd == ~0) {
-               printk(KERN_ERR "v9fs: Insufficient options for proto=fd\n");
+               pr_err("Insufficient options for proto=fd\n");
                return -ENOPROTOOPT;
        }
 
@@ -1050,7 +1053,7 @@ static void p9_poll_workfn(struct work_struct *work)
 {
        unsigned long flags;
 
-       P9_DPRINTK(P9_DEBUG_TRANS, "start %p\n", current);
+       p9_debug(P9_DEBUG_TRANS, "start %p\n", current);
 
        spin_lock_irqsave(&p9_poll_lock, flags);
        while (!list_empty(&p9_poll_pending_list)) {
@@ -1066,7 +1069,7 @@ static void p9_poll_workfn(struct work_struct *work)
        }
        spin_unlock_irqrestore(&p9_poll_lock, flags);
 
-       P9_DPRINTK(P9_DEBUG_TRANS, "finish\n");
+       p9_debug(P9_DEBUG_TRANS, "finish\n");
 }
 
 int p9_trans_fd_init(void)
index 159c50f1c6bf672c24b0d13f8e4f52dd40a18589..2c69ddd691a16a0056ed071695fb3ebc6293a917 100644 (file)
@@ -26,6 +26,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/in.h>
 #include <linux/module.h>
 #include <linux/net.h>
@@ -178,8 +180,8 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
 
        tmp_options = kstrdup(params, GFP_KERNEL);
        if (!tmp_options) {
-               P9_DPRINTK(P9_DEBUG_ERROR,
-                          "failed to allocate copy of option string\n");
+               p9_debug(P9_DEBUG_ERROR,
+                        "failed to allocate copy of option string\n");
                return -ENOMEM;
        }
        options = tmp_options;
@@ -192,8 +194,8 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
                token = match_token(p, tokens, args);
                r = match_int(&args[0], &option);
                if (r < 0) {
-                       P9_DPRINTK(P9_DEBUG_ERROR,
-                                  "integer field, but no integer?\n");
+                       p9_debug(P9_DEBUG_ERROR,
+                                "integer field, but no integer?\n");
                        continue;
                }
                switch (token) {
@@ -301,8 +303,7 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
        return;
 
  err_out:
-       P9_DPRINTK(P9_DEBUG_ERROR, "req %p err %d status %d\n",
-                  req, err, status);
+       p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n", req, err, status);
        rdma->state = P9_RDMA_FLUSHING;
        client->status = Disconnected;
 }
@@ -318,8 +319,8 @@ handle_send(struct p9_client *client, struct p9_trans_rdma *rdma,
 
 static void qp_event_handler(struct ib_event *event, void *context)
 {
-       P9_DPRINTK(P9_DEBUG_ERROR, "QP event %d context %p\n", event->event,
-                                                               context);
+       p9_debug(P9_DEBUG_ERROR, "QP event %d context %p\n",
+                event->event, context);
 }
 
 static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
@@ -345,8 +346,7 @@ static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
                        break;
 
                default:
-                       printk(KERN_ERR "9prdma: unexpected completion type, "
-                              "c->wc_op=%d, wc.opcode=%d, status=%d\n",
+                       pr_err("unexpected completion type, c->wc_op=%d, wc.opcode=%d, status=%d\n",
                               c->wc_op, wc.opcode, wc.status);
                        break;
                }
@@ -356,7 +356,7 @@ static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
 
 static void cq_event_handler(struct ib_event *e, void *v)
 {
-       P9_DPRINTK(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v);
+       p9_debug(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v);
 }
 
 static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
@@ -407,7 +407,7 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c)
        return ib_post_recv(rdma->qp, &wr, &bad_wr);
 
  error:
-       P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
+       p9_debug(P9_DEBUG_ERROR, "EIO\n");
        return -EIO;
 }
 
@@ -500,7 +500,7 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
        kfree(c);
        kfree(rpl_context->rc);
        kfree(rpl_context);
-       P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
+       p9_debug(P9_DEBUG_ERROR, "EIO\n");
        return -EIO;
  err_free1:
        kfree(rpl_context->rc);
index 32aa9834229cf6a9748fba543c47f5ebb81f5d31..330421e547139db18eef027495cfe2cee9538bc7 100644 (file)
@@ -26,6 +26,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/in.h>
 #include <linux/module.h>
 #include <linux/net.h>
@@ -145,7 +147,7 @@ static void req_done(struct virtqueue *vq)
        struct p9_req_t *req;
        unsigned long flags;
 
-       P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n");
+       p9_debug(P9_DEBUG_TRANS, ": request done\n");
 
        while (1) {
                spin_lock_irqsave(&chan->lock, flags);
@@ -158,8 +160,8 @@ static void req_done(struct virtqueue *vq)
                spin_unlock_irqrestore(&chan->lock, flags);
                /* Wakeup if anyone waiting for VirtIO ring space. */
                wake_up(chan->vc_wq);
-               P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
-               P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
+               p9_debug(P9_DEBUG_TRANS, ": rc %p\n", rc);
+               p9_debug(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
                req = p9_tag_lookup(chan->client, rc->tag);
                req->status = REQ_STATUS_RCVD;
                p9_client_cb(chan->client, req);
@@ -257,7 +259,7 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
        unsigned long flags;
        struct virtio_chan *chan = client->trans;
 
-       P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
+       p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n");
 
        req->status = REQ_STATUS_SENT;
 req_retry:
@@ -280,20 +282,19 @@ req_retry:
                        if (err  == -ERESTARTSYS)
                                return err;
 
-                       P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
+                       p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n");
                        goto req_retry;
                } else {
                        spin_unlock_irqrestore(&chan->lock, flags);
-                       P9_DPRINTK(P9_DEBUG_TRANS,
-                                       "9p debug: "
-                                       "virtio rpc add_buf returned failure");
+                       p9_debug(P9_DEBUG_TRANS,
+                                "virtio rpc add_buf returned failure\n");
                        return -EIO;
                }
        }
        virtqueue_kick(chan->vq);
        spin_unlock_irqrestore(&chan->lock, flags);
 
-       P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
+       p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
        return 0;
 }
 
@@ -354,7 +355,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
        struct page **in_pages = NULL, **out_pages = NULL;
        struct virtio_chan *chan = client->trans;
 
-       P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
+       p9_debug(P9_DEBUG_TRANS, "virtio request\n");
 
        if (uodata) {
                out_nr_pages = p9_nr_pages(uodata, outlen);
@@ -423,20 +424,19 @@ req_retry_pinned:
                        if (err  == -ERESTARTSYS)
                                goto err_out;
 
-                       P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
+                       p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n");
                        goto req_retry_pinned;
                } else {
                        spin_unlock_irqrestore(&chan->lock, flags);
-                       P9_DPRINTK(P9_DEBUG_TRANS,
-                                  "9p debug: "
-                                  "virtio rpc add_buf returned failure");
+                       p9_debug(P9_DEBUG_TRANS,
+                                "virtio rpc add_buf returned failure\n");
                        err = -EIO;
                        goto err_out;
                }
        }
        virtqueue_kick(chan->vq);
        spin_unlock_irqrestore(&chan->lock, flags);
-       P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
+       p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
        err = wait_event_interruptible(*req->wq,
                                       req->status >= REQ_STATUS_RCVD);
        /*
@@ -491,7 +491,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
 
        chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL);
        if (!chan) {
-               printk(KERN_ERR "9p: Failed to allocate virtio 9P channel\n");
+               pr_err("Failed to allocate virtio 9P channel\n");
                err = -ENOMEM;
                goto fail;
        }
@@ -592,7 +592,7 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
        mutex_unlock(&virtio_9p_lock);
 
        if (!found) {
-               printk(KERN_ERR "9p: no channels available\n");
+               pr_err("no channels available\n");
                return ret;
        }
 
index 9c1c9348ac35d3410ccb255266f9367bbf5d2549..6ceeeb384de73cbd4583b9b22a41c27b8b0885bb 100644 (file)
@@ -106,7 +106,7 @@ retry:
        else if (error)
                return -1;
 
-       P9_DPRINTK(P9_DEBUG_MUX, " id %d pool %p\n", i, p);
+       p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", i, p);
        return i;
 }
 EXPORT_SYMBOL(p9_idpool_get);
@@ -124,7 +124,7 @@ void p9_idpool_put(int id, struct p9_idpool *p)
 {
        unsigned long flags;
 
-       P9_DPRINTK(P9_DEBUG_MUX, " id %d pool %p\n", id, p);
+       p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", id, p);
 
        spin_lock_irqsave(&p->lock, flags);
        idr_remove(&p->pool, id);
index e010a015d99671f27a22573a31eae75ae6cda955..1426ec3d0a531ecd4ec0b227c5f7aa22843a1750 100644 (file)
@@ -41,15 +41,17 @@ EXPORT_SYMBOL_GPL(rpc_lookup_cred);
 /*
  * Public call interface for looking up machine creds.
  */
-struct rpc_cred *rpc_lookup_machine_cred(void)
+struct rpc_cred *rpc_lookup_machine_cred(const char *service_name)
 {
        struct auth_cred acred = {
                .uid = RPC_MACHINE_CRED_USERID,
                .gid = RPC_MACHINE_CRED_GROUPID,
+               .principal = service_name,
                .machine_cred = 1,
        };
 
-       dprintk("RPC:       looking up machine cred\n");
+       dprintk("RPC:       looking up machine cred for service %s\n",
+                       service_name);
        return generic_auth.au_ops->lookup_cred(&generic_auth, &acred, 0);
 }
 EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred);
index afb56553dfe72d630aea0737020ff973a815f34f..28d72d298735e0f3d72d161220a1241c241fb98d 100644 (file)
@@ -392,7 +392,8 @@ static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg)
 }
 
 static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
-                               struct rpc_clnt *clnt, int machine_cred)
+                               struct rpc_clnt *clnt,
+                               const char *service_name)
 {
        struct gss_api_mech *mech = gss_msg->auth->mech;
        char *p = gss_msg->databuf;
@@ -407,12 +408,8 @@ static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
                p += len;
                gss_msg->msg.len += len;
        }
-       if (machine_cred) {
-               len = sprintf(p, "service=* ");
-               p += len;
-               gss_msg->msg.len += len;
-       } else if (!strcmp(clnt->cl_program->name, "nfs4_cb")) {
-               len = sprintf(p, "service=nfs ");
+       if (service_name != NULL) {
+               len = sprintf(p, "service=%s ", service_name);
                p += len;
                gss_msg->msg.len += len;
        }
@@ -429,17 +426,18 @@ static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
 }
 
 static void gss_encode_msg(struct gss_upcall_msg *gss_msg,
-                               struct rpc_clnt *clnt, int machine_cred)
+                               struct rpc_clnt *clnt,
+                               const char *service_name)
 {
        if (pipe_version == 0)
                gss_encode_v0_msg(gss_msg);
        else /* pipe_version == 1 */
-               gss_encode_v1_msg(gss_msg, clnt, machine_cred);
+               gss_encode_v1_msg(gss_msg, clnt, service_name);
 }
 
-static inline struct gss_upcall_msg *
-gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid, struct rpc_clnt *clnt,
-               int machine_cred)
+static struct gss_upcall_msg *
+gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt,
+               uid_t uid, const char *service_name)
 {
        struct gss_upcall_msg *gss_msg;
        int vers;
@@ -459,7 +457,7 @@ gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid, struct rpc_clnt *clnt,
        atomic_set(&gss_msg->count, 1);
        gss_msg->uid = uid;
        gss_msg->auth = gss_auth;
-       gss_encode_msg(gss_msg, clnt, machine_cred);
+       gss_encode_msg(gss_msg, clnt, service_name);
        return gss_msg;
 }
 
@@ -471,7 +469,7 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr
        struct gss_upcall_msg *gss_new, *gss_msg;
        uid_t uid = cred->cr_uid;
 
-       gss_new = gss_alloc_msg(gss_auth, uid, clnt, gss_cred->gc_machine_cred);
+       gss_new = gss_alloc_msg(gss_auth, clnt, uid, gss_cred->gc_principal);
        if (IS_ERR(gss_new))
                return gss_new;
        gss_msg = gss_add_msg(gss_new);
@@ -995,7 +993,9 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
         */
        cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
        cred->gc_service = gss_auth->service;
-       cred->gc_machine_cred = acred->machine_cred;
+       cred->gc_principal = NULL;
+       if (acred->machine_cred)
+               cred->gc_principal = acred->principal;
        kref_get(&gss_auth->kref);
        return &cred->gc_base;
 
@@ -1030,7 +1030,12 @@ gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
        if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags))
                return 0;
 out:
-       if (acred->machine_cred != gss_cred->gc_machine_cred)
+       if (acred->principal != NULL) {
+               if (gss_cred->gc_principal == NULL)
+                       return 0;
+               return strcmp(acred->principal, gss_cred->gc_principal) == 0;
+       }
+       if (gss_cred->gc_principal != NULL)
                return 0;
        return rc->cr_uid == acred->uid;
 }
@@ -1104,7 +1109,8 @@ static int gss_renew_cred(struct rpc_task *task)
        struct rpc_auth *auth = oldcred->cr_auth;
        struct auth_cred acred = {
                .uid = oldcred->cr_uid,
-               .machine_cred = gss_cred->gc_machine_cred,
+               .principal = gss_cred->gc_principal,
+               .machine_cred = (gss_cred->gc_principal != NULL ? 1 : 0),
        };
        struct rpc_cred *new;
 
index 277ebd4bf095fd973af455363a2bf0fc476511bd..593f4c605305d7b0a51465ae42b09a7eaa6fcadd 100644 (file)
@@ -296,7 +296,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
  * Copies data into an arbitrary memory location from an array of pages
  * The copy is assumed to be non-overlapping.
  */
-static void
+void
 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
 {
        struct page **pgfrom;
@@ -324,6 +324,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
 
        } while ((len -= copy) != 0);
 }
+EXPORT_SYMBOL_GPL(_copy_from_pages);
 
 /*
  * xdr_shrink_bufhead
index 8fda3b3f7be87c01958c80823aea23cc22c790bf..e3bfcbe8a520b63639b04115bf148fb4ddd144eb 100755 (executable)
@@ -227,7 +227,7 @@ our $Inline = qr{inline|__always_inline|noinline};
 our $Member    = qr{->$Ident|\.$Ident|\[[^]]*\]};
 our $Lval      = qr{$Ident(?:$Member)*};
 
-our $Constant  = qr{(?:[0-9]+|0x[0-9a-fA-F]+)[UL]*};
+our $Constant  = qr{(?i:(?:[0-9]+|0x[0-9a-f]+)[ul]*)};
 our $Assignment        = qr{(?:\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=)};
 our $Compare    = qr{<=|>=|==|!=|<|>};
 our $Operators = qr{
@@ -315,7 +315,7 @@ sub build_types {
        $NonptrType     = qr{
                        (?:$Modifier\s+|const\s+)*
                        (?:
-                               (?:typeof|__typeof__)\s*\(\s*\**\s*$Ident\s*\)|
+                               (?:typeof|__typeof__)\s*\([^\)]*\)|
                                (?:$typeTypedefs\b)|
                                (?:${all}\b)
                        )
@@ -334,6 +334,7 @@ our $match_balanced_parentheses = qr/(\((?:[^\(\)]+|(-1))*\))/;
 
 our $Typecast  = qr{\s*(\(\s*$NonptrType\s*\)){0,1}\s*};
 our $LvalOrFunc        = qr{($Lval)\s*($match_balanced_parentheses{0,1})\s*};
+our $FuncArg = qr{$Typecast{0,1}($LvalOrFunc|$Constant)};
 
 sub deparenthesize {
        my ($string) = @_;
@@ -676,6 +677,10 @@ sub ctx_statement_block {
                        if ($off >= $len) {
                                last;
                        }
+                       if ($level == 0 && substr($blk, $off) =~ /^.\s*#\s*define/) {
+                               $level++;
+                               $type = '#';
+                       }
                }
                $p = $c;
                $c = substr($blk, $off, 1);
@@ -738,6 +743,13 @@ sub ctx_statement_block {
                                last;
                        }
                }
+               # Preprocessor commands end at the newline unless escaped.
+               if ($type eq '#' && $c eq "\n" && $p ne "\\") {
+                       $level--;
+                       $type = '';
+                       $off++;
+                       last;
+               }
                $off++;
        }
        # We are truly at the end, so shuffle to the next line.
@@ -1020,7 +1032,7 @@ sub annotate_values {
                } elsif ($cur =~ /^(\(\s*$Type\s*)\)/ && $av_pending eq '_') {
                        print "CAST($1)\n" if ($dbg_values > 1);
                        push(@av_paren_type, $type);
-                       $type = 'C';
+                       $type = 'c';
 
                } elsif ($cur =~ /^($Type)\s*(?:$Ident|,|\)|\(|\s*$)/) {
                        print "DECLARE($1)\n" if ($dbg_values > 1);
@@ -1212,7 +1224,9 @@ sub possible {
                        case|
                        else|
                        asm|__asm__|
-                       do
+                       do|
+                       \#|
+                       \#\#|
                )(?:\s|$)|
                ^(?:typedef|struct|enum)\b
            )}x;
@@ -1359,6 +1373,7 @@ sub process {
        my %suppress_ifbraces;
        my %suppress_whiletrailers;
        my %suppress_export;
+       my $suppress_statement = 0;
 
        # Pre-scan the patch sanitizing the lines.
        # Pre-scan the patch looking for any __setup documentation.
@@ -1468,6 +1483,7 @@ sub process {
                        %suppress_ifbraces = ();
                        %suppress_whiletrailers = ();
                        %suppress_export = ();
+                       $suppress_statement = 0;
                        next;
 
 # track the line number as we move through the hunk, note that
@@ -1504,9 +1520,11 @@ sub process {
                if ($line =~ /^diff --git.*?(\S+)$/) {
                        $realfile = $1;
                        $realfile =~ s@^([^/]*)/@@;
+                       $in_commit_log = 0;
                } elsif ($line =~ /^\+\+\+\s+(\S+)/) {
                        $realfile = $1;
                        $realfile =~ s@^([^/]*)/@@;
+                       $in_commit_log = 0;
 
                        $p1_prefix = $1;
                        if (!$file && $tree && $p1_prefix ne '' &&
@@ -1546,7 +1564,8 @@ sub process {
                }
 
 # Check signature styles
-               if ($line =~ /^(\s*)($signature_tags)(\s*)(.*)/) {
+               if (!$in_header_lines &&
+                   $line =~ /^(\s*)($signature_tags)(\s*)(.*)/) {
                        my $space_before = $1;
                        my $sign_off = $2;
                        my $space_after = $3;
@@ -1623,7 +1642,7 @@ sub process {
 # Check if it's the start of a commit log
 # (not a header line and we haven't seen the patch filename)
                if ($in_header_lines && $realfile =~ /^$/ &&
-                   $rawline !~ /^(commit\b|from\b|\w+:).+$/i) {
+                   $rawline !~ /^(commit\b|from\b|[\w-]+:).+$/i) {
                        $in_header_lines = 0;
                        $in_commit_log = 1;
                }
@@ -1655,19 +1674,26 @@ sub process {
 # Only applies when adding the entry originally, after that we do not have
 # sufficient context to determine whether it is indeed long enough.
                if ($realfile =~ /Kconfig/ &&
-                   $line =~ /\+\s*(?:---)?help(?:---)?$/) {
+                   $line =~ /.\s*config\s+/) {
                        my $length = 0;
                        my $cnt = $realcnt;
                        my $ln = $linenr + 1;
                        my $f;
+                       my $is_start = 0;
                        my $is_end = 0;
-                       while ($cnt > 0 && defined $lines[$ln - 1]) {
+                       for (; $cnt > 0 && defined $lines[$ln - 1]; $ln++) {
                                $f = $lines[$ln - 1];
                                $cnt-- if ($lines[$ln - 1] !~ /^-/);
                                $is_end = $lines[$ln - 1] =~ /^\+/;
-                               $ln++;
 
                                next if ($f =~ /^-/);
+
+                               if ($lines[$ln - 1] =~ /.\s*(?:bool|tristate)\s*\"/) {
+                                       $is_start = 1;
+                               } elsif ($lines[$ln - 1] =~ /.\s*(?:---)?help(?:---)?$/) {
+                                       $length = -1;
+                               }
+
                                $f =~ s/^.//;
                                $f =~ s/#.*//;
                                $f =~ s/^\s+//;
@@ -1679,8 +1705,8 @@ sub process {
                                $length++;
                        }
                        WARN("CONFIG_DESCRIPTION",
-                            "please write a paragraph that describes the config symbol fully\n" . $herecurr) if ($is_end && $length < 4);
-                       #print "is_end<$is_end> length<$length>\n";
+                            "please write a paragraph that describes the config symbol fully\n" . $herecurr) if ($is_start && $is_end && $length < 4);
+                       #print "is_start<$is_start> is_end<$is_end> length<$length>\n";
                }
 
                if (($realfile =~ /Makefile.*/ || $realfile =~ /Kbuild.*/) &&
@@ -1792,12 +1818,24 @@ sub process {
 # Check for potential 'bare' types
                my ($stat, $cond, $line_nr_next, $remain_next, $off_next,
                    $realline_next);
-               if ($realcnt && $line =~ /.\s*\S/) {
+#print "LINE<$line>\n";
+               if ($linenr >= $suppress_statement &&
+                   $realcnt && $line =~ /.\s*\S/) {
                        ($stat, $cond, $line_nr_next, $remain_next, $off_next) =
                                ctx_statement_block($linenr, $realcnt, 0);
                        $stat =~ s/\n./\n /g;
                        $cond =~ s/\n./\n /g;
 
+#print "linenr<$linenr> <$stat>\n";
+                       # If this statement has no statement boundaries within
+                       # it there is no point in retrying a statement scan
+                       # until we hit end of it.
+                       my $frag = $stat; $frag =~ s/;+\s*$//;
+                       if ($frag !~ /(?:{|;)/) {
+#print "skip<$line_nr_next>\n";
+                               $suppress_statement = $line_nr_next;
+                       }
+
                        # Find the real next line.
                        $realline_next = $line_nr_next;
                        if (defined $realline_next &&
@@ -1923,6 +1961,9 @@ sub process {
 
 # Check relative indent for conditionals and blocks.
                if ($line =~ /\b(?:(?:if|while|for)\s*\(|do\b)/ && $line !~ /^.\s*#/ && $line !~ /\}\s*while\s*/) {
+                       ($stat, $cond, $line_nr_next, $remain_next, $off_next) =
+                               ctx_statement_block($linenr, $realcnt, 0)
+                                       if (!defined $stat);
                        my ($s, $c) = ($stat, $cond);
 
                        substr($s, 0, length($c), '');
@@ -2090,7 +2131,7 @@ sub process {
                        #   XXX(foo);
                        #   EXPORT_SYMBOL(something_foo);
                        my $name = $1;
-                       if ($stat =~ /^.([A-Z_]+)\s*\(\s*($Ident)/ &&
+                       if ($stat =~ /^(?:.\s*}\s*\n)?.([A-Z_]+)\s*\(\s*($Ident)/ &&
                            $name =~ /^${Ident}_$2/) {
 #print "FOO C name<$name>\n";
                                $suppress_export{$realline_next} = 1;
@@ -2168,8 +2209,9 @@ sub process {
 
 # * goes on variable not on type
                # (char*[ const])
-               if ($line =~ m{\($NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)\)}) {
-                       my ($from, $to) = ($1, $1);
+               while ($line =~ m{(\($NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)\))}g) {
+                       #print "AA<$1>\n";
+                       my ($from, $to) = ($2, $2);
 
                        # Should start with a space.
                        $to =~ s/^(\S)/ $1/;
@@ -2184,8 +2226,10 @@ sub process {
                                ERROR("POINTER_LOCATION",
                                      "\"(foo$from)\" should be \"(foo$to)\"\n" .  $herecurr);
                        }
-               } elsif ($line =~ m{\b$NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)($Ident)}) {
-                       my ($from, $to, $ident) = ($1, $1, $2);
+               }
+               while ($line =~ m{(\b$NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)($Ident))}g) {
+                       #print "BB<$1>\n";
+                       my ($from, $to, $ident) = ($2, $2, $3);
 
                        # Should start with a space.
                        $to =~ s/^(\S)/ $1/;
@@ -2568,7 +2612,7 @@ sub process {
                        # Flatten any parentheses
                        $value =~ s/\(/ \(/g;
                        $value =~ s/\)/\) /g;
-                       while ($value =~ s/\[[^\{\}]*\]/1/ ||
+                       while ($value =~ s/\[[^\[\]]*\]/1/ ||
                               $value !~ /(?:$Ident|-?$Constant)\s*
                                             $Compare\s*
                                             (?:$Ident|-?$Constant)/x &&
@@ -2593,28 +2637,6 @@ sub process {
                        }
                }
 
-# typecasts on min/max could be min_t/max_t
-               if ($line =~ /^\+(?:.*?)\b(min|max)\s*\($Typecast{0,1}($LvalOrFunc)\s*,\s*$Typecast{0,1}($LvalOrFunc)\s*\)/) {
-                       if (defined $2 || defined $8) {
-                               my $call = $1;
-                               my $cast1 = deparenthesize($2);
-                               my $arg1 = $3;
-                               my $cast2 = deparenthesize($8);
-                               my $arg2 = $9;
-                               my $cast;
-
-                               if ($cast1 ne "" && $cast2 ne "") {
-                                       $cast = "$cast1 or $cast2";
-                               } elsif ($cast1 ne "") {
-                                       $cast = $cast1;
-                               } else {
-                                       $cast = $cast2;
-                               }
-                               WARN("MINMAX",
-                                    "$call() should probably be ${call}_t($cast, $arg1, $arg2)\n" . $herecurr);
-                       }
-               }
-
 # Need a space before open parenthesis after if, while etc
                if ($line=~/\b(if|while|for|switch)\(/) {
                        ERROR("SPACING", "space required before the open parenthesis '('\n" . $herecurr);
@@ -2623,6 +2645,9 @@ sub process {
 # Check for illegal assignment in if conditional -- and check for trailing
 # statements after the conditional.
                if ($line =~ /do\s*(?!{)/) {
+                       ($stat, $cond, $line_nr_next, $remain_next, $off_next) =
+                               ctx_statement_block($linenr, $realcnt, 0)
+                                       if (!defined $stat);
                        my ($stat_next) = ctx_statement_block($line_nr_next,
                                                $remain_next, $off_next);
                        $stat_next =~ s/\n./\n /g;
@@ -2778,47 +2803,13 @@ sub process {
                        my $cnt = $realcnt;
                        my ($off, $dstat, $dcond, $rest);
                        my $ctx = '';
-
-                       my $args = defined($1);
-
-                       # Find the end of the macro and limit our statement
-                       # search to that.
-                       while ($cnt > 0 && defined $lines[$ln - 1] &&
-                               $lines[$ln - 1] =~ /^(?:-|..*\\$)/)
-                       {
-                               $ctx .= $rawlines[$ln - 1] . "\n";
-                               $cnt-- if ($lines[$ln - 1] !~ /^-/);
-                               $ln++;
-                       }
-                       $ctx .= $rawlines[$ln - 1];
-
                        ($dstat, $dcond, $ln, $cnt, $off) =
-                               ctx_statement_block($linenr, $ln - $linenr + 1, 0);
+                               ctx_statement_block($linenr, $realcnt, 0);
+                       $ctx = $dstat;
                        #print "dstat<$dstat> dcond<$dcond> cnt<$cnt> off<$off>\n";
                        #print "LINE<$lines[$ln-1]> len<" . length($lines[$ln-1]) . "\n";
 
-                       # Extract the remainder of the define (if any) and
-                       # rip off surrounding spaces, and trailing \'s.
-                       $rest = '';
-                       while ($off != 0 || ($cnt > 0 && $rest =~ /\\\s*$/)) {
-                               #print "ADDING cnt<$cnt> $off <" . substr($lines[$ln - 1], $off) . "> rest<$rest>\n";
-                               if ($off != 0 || $lines[$ln - 1] !~ /^-/) {
-                                       $rest .= substr($lines[$ln - 1], $off) . "\n";
-                                       $cnt--;
-                               }
-                               $ln++;
-                               $off = 0;
-                       }
-                       $rest =~ s/\\\n.//g;
-                       $rest =~ s/^\s*//s;
-                       $rest =~ s/\s*$//s;
-
-                       # Clean up the original statement.
-                       if ($args) {
-                               substr($dstat, 0, length($dcond), '');
-                       } else {
-                               $dstat =~ s/^.\s*\#\s*define\s+$Ident\s*//;
-                       }
+                       $dstat =~ s/^.\s*\#\s*define\s+$Ident(?:\([^\)]*\))?\s*//;
                        $dstat =~ s/$;//g;
                        $dstat =~ s/\\\n.//g;
                        $dstat =~ s/^\s*//s;
@@ -2827,7 +2818,7 @@ sub process {
                        # Flatten any parentheses and braces
                        while ($dstat =~ s/\([^\(\)]*\)/1/ ||
                               $dstat =~ s/\{[^\{\}]*\}/1/ ||
-                              $dstat =~ s/\[[^\{\}]*\]/1/)
+                              $dstat =~ s/\[[^\[\]]*\]/1/)
                        {
                        }
 
@@ -2844,23 +2835,32 @@ sub process {
                                ^\"|\"$
                        }x;
                        #print "REST<$rest> dstat<$dstat> ctx<$ctx>\n";
-                       if ($rest ne '' && $rest ne ',') {
-                               if ($rest !~ /while\s*\(/ &&
-                                   $dstat !~ /$exceptions/)
-                               {
-                                       ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE",
-                                             "Macros with multiple statements should be enclosed in a do - while loop\n" . "$here\n$ctx\n");
+                       if ($dstat ne '' &&
+                           $dstat !~ /^(?:$Ident|-?$Constant),$/ &&                    # 10, // foo(),
+                           $dstat !~ /^(?:$Ident|-?$Constant);$/ &&                    # foo();
+                           $dstat !~ /^(?:$Ident|-?$Constant)$/ &&                     # 10 // foo()
+                           $dstat !~ /$exceptions/ &&
+                           $dstat !~ /^\.$Ident\s*=/ &&                                # .foo =
+                           $dstat !~ /^do\s*$Constant\s*while\s*$Constant;?$/ &&       # do {...} while (...); // do {...} while (...)
+                           $dstat !~ /^for\s*$Constant$/ &&                            # for (...)
+                           $dstat !~ /^for\s*$Constant\s+(?:$Ident|-?$Constant)$/ &&   # for (...) bar()
+                           $dstat !~ /^do\s*{/ &&                                      # do {...
+                           $dstat !~ /^\({/)                                           # ({...
+                       {
+                               $ctx =~ s/\n*$//;
+                               my $herectx = $here . "\n";
+                               my $cnt = statement_rawlines($ctx);
+
+                               for (my $n = 0; $n < $cnt; $n++) {
+                                       $herectx .= raw_line($linenr, $n) . "\n";
                                }
 
-                       } elsif ($ctx !~ /;/) {
-                               if ($dstat ne '' &&
-                                   $dstat !~ /^(?:$Ident|-?$Constant)$/ &&
-                                   $dstat !~ /$exceptions/ &&
-                                   $dstat !~ /^\.$Ident\s*=/ &&
-                                   $dstat =~ /$Operators/)
-                               {
+                               if ($dstat =~ /;/) {
+                                       ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE",
+                                             "Macros with multiple statements should be enclosed in a do - while loop\n" . "$herectx");
+                               } else {
                                        ERROR("COMPLEX_MACRO",
-                                             "Macros with complex values should be enclosed in parenthesis\n" . "$here\n$ctx\n");
+                                             "Macros with complex values should be enclosed in parenthesis\n" . "$herectx");
                                }
                        }
                }
@@ -3111,6 +3111,12 @@ sub process {
                             "__aligned(size) is preferred over __attribute__((aligned(size)))\n" . $herecurr);
                }
 
+# Check for __attribute__ format(printf, prefer __printf
+               if ($line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf/) {
+                       WARN("PREFER_PRINTF",
+                            "__printf(string-index, first-to-check) is preferred over __attribute__((format(printf, string-index, first-to-check)))\n" . $herecurr);
+               }
+
 # check for sizeof(&)
                if ($line =~ /\bsizeof\s*\(\s*\&/) {
                        WARN("SIZEOF_ADDRESS",
@@ -3123,6 +3129,46 @@ sub process {
                             "Avoid line continuations in quoted strings\n" . $herecurr);
                }
 
+# Check for misused memsets
+               if (defined $stat &&
+                   $stat =~ /^\+(?:.*?)\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*$FuncArg\s*\)/s) {
+
+                       my $ms_addr = $2;
+                       my $ms_val = $8;
+                       my $ms_size = $14;
+
+                       if ($ms_size =~ /^(0x|)0$/i) {
+                               ERROR("MEMSET",
+                                     "memset to 0's uses 0 as the 2nd argument, not the 3rd\n" . "$here\n$stat\n");
+                       } elsif ($ms_size =~ /^(0x|)1$/i) {
+                               WARN("MEMSET",
+                                    "single byte memset is suspicious. Swapped 2nd/3rd argument?\n" . "$here\n$stat\n");
+                       }
+               }
+
+# typecasts on min/max could be min_t/max_t
+               if (defined $stat &&
+                   $stat =~ /^\+(?:.*?)\b(min|max)\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\)/) {
+                       if (defined $2 || defined $8) {
+                               my $call = $1;
+                               my $cast1 = deparenthesize($2);
+                               my $arg1 = $3;
+                               my $cast2 = deparenthesize($8);
+                               my $arg2 = $9;
+                               my $cast;
+
+                               if ($cast1 ne "" && $cast2 ne "") {
+                                       $cast = "$cast1 or $cast2";
+                               } elsif ($cast1 ne "") {
+                                       $cast = $cast1;
+                               } else {
+                                       $cast = $cast2;
+                               }
+                               WARN("MINMAX",
+                                    "$call() should probably be ${call}_t($cast, $arg1, $arg2)\n" . "$here\n$stat\n");
+                       }
+               }
+
 # check for new externs in .c files.
                if ($realfile =~ /\.c$/ && defined $stat &&
                    $stat =~ /^.\s*(?:extern\s+)?$Type\s+($Ident)(\s*)\(/s)
@@ -3294,12 +3340,6 @@ sub process {
                        WARN("EXPORTED_WORLD_WRITABLE",
                             "Exporting world writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr);
                }
-
-               # Check for memset with swapped arguments
-               if ($line =~ /memset.*\,(\ |)(0x|)0(\ |0|)\);/) {
-                       ERROR("MEMSET",
-                             "memset size is 3rd argument, not the second.\n" . $herecurr);
-               }
        }
 
        # If we have no input at all, then there is nothing to report on
index 4594f334105110fefba64883186a2cd546641a82..f32a04c4c5bc1c56a88e0f7fba3cb521aa997e52 100755 (executable)
@@ -95,7 +95,7 @@ my %VCS_cmds_git = (
     "execute_cmd" => \&git_execute_cmd,
     "available" => '(which("git") ne "") && (-d ".git")',
     "find_signers_cmd" =>
-       "git log --no-color --since=\$email_git_since " .
+       "git log --no-color --follow --since=\$email_git_since " .
            '--format="GitCommit: %H%n' .
                      'GitAuthor: %an <%ae>%n' .
                      'GitDate: %aD%n' .
index bbcf921166f7470fad24577d2aa52d1cf85258f4..f6b6551940ab29b80ec2a806f720801e6a72b10c 100644 (file)
@@ -833,7 +833,7 @@ static int ldo_regulator_register(struct snd_soc_codec *codec,
        ldo->voltage = voltage;
 
        ldo->dev = regulator_register(&ldo->desc, codec->dev,
-                                         init_data, ldo);
+                                         init_data, ldo, NULL);
        if (IS_ERR(ldo->dev)) {
                int ret = PTR_ERR(ldo->dev);
 
index 8eb6c489fb152c006d1cd781800cfe7c73059cd8..77f952762426661615d3d0180ab2728010e6069f 100644 (file)
@@ -17,8 +17,8 @@ titan:~> perf list
   kmem:kmem_cache_alloc_node               [Tracepoint event]
   kmem:kfree                               [Tracepoint event]
   kmem:kmem_cache_free                     [Tracepoint event]
-  kmem:mm_page_free_direct                 [Tracepoint event]
-  kmem:mm_pagevec_free                     [Tracepoint event]
+  kmem:mm_page_free                        [Tracepoint event]
+  kmem:mm_page_free_batched                [Tracepoint event]
   kmem:mm_page_alloc                       [Tracepoint event]
   kmem:mm_page_alloc_zone_locked           [Tracepoint event]
   kmem:mm_page_pcpu_drain                  [Tracepoint event]
@@ -29,15 +29,15 @@ measured. For example the page alloc/free properties of a 'hackbench
 run' are:
 
  titan:~> perf stat -e kmem:mm_page_pcpu_drain -e kmem:mm_page_alloc
- -e kmem:mm_pagevec_free -e kmem:mm_page_free_direct ./hackbench 10
+ -e kmem:mm_page_free_batched -e kmem:mm_page_free ./hackbench 10
  Time: 0.575
 
  Performance counter stats for './hackbench 10':
 
           13857  kmem:mm_page_pcpu_drain
           27576  kmem:mm_page_alloc
-           6025  kmem:mm_pagevec_free
-          20934  kmem:mm_page_free_direct
+           6025  kmem:mm_page_free_batched
+          20934  kmem:mm_page_free
 
     0.613972165  seconds time elapsed
 
@@ -45,8 +45,8 @@ You can observe the statistical properties as well, by using the
 'repeat the workload N times' feature of perf stat:
 
  titan:~> perf stat --repeat 5 -e kmem:mm_page_pcpu_drain -e
-   kmem:mm_page_alloc -e kmem:mm_pagevec_free -e
-   kmem:mm_page_free_direct ./hackbench 10
+   kmem:mm_page_alloc -e kmem:mm_page_free_batched -e
+   kmem:mm_page_free ./hackbench 10
  Time: 0.627
  Time: 0.644
  Time: 0.564
@@ -57,8 +57,8 @@ You can observe the statistical properties as well, by using the
 
           12920  kmem:mm_page_pcpu_drain    ( +-   3.359% )
           25035  kmem:mm_page_alloc         ( +-   3.783% )
-           6104  kmem:mm_pagevec_free       ( +-   0.934% )
-          18376  kmem:mm_page_free_direct   ( +-   4.941% )
+           6104  kmem:mm_page_free_batched  ( +-   0.934% )
+          18376  kmem:mm_page_free         ( +-   4.941% )
 
     0.643954516  seconds time elapsed   ( +-   2.363% )
 
@@ -158,15 +158,15 @@ Or you can observe the whole system's page allocations for 10
 seconds:
 
 titan:~/git> perf stat -a -e kmem:mm_page_pcpu_drain -e
-kmem:mm_page_alloc -e kmem:mm_pagevec_free -e
-kmem:mm_page_free_direct sleep 10
+kmem:mm_page_alloc -e kmem:mm_page_free_batched -e
+kmem:mm_page_free sleep 10
 
  Performance counter stats for 'sleep 10':
 
          171585  kmem:mm_page_pcpu_drain
          322114  kmem:mm_page_alloc
-          73623  kmem:mm_pagevec_free
-         254115  kmem:mm_page_free_direct
+          73623  kmem:mm_page_free_batched
+         254115  kmem:mm_page_free
 
    10.000591410  seconds time elapsed
 
@@ -174,15 +174,15 @@ Or observe how fluctuating the page allocations are, via statistical
 analysis done over ten 1-second intervals:
 
  titan:~/git> perf stat --repeat 10 -a -e kmem:mm_page_pcpu_drain -e
-   kmem:mm_page_alloc -e kmem:mm_pagevec_free -e
-   kmem:mm_page_free_direct sleep 1
+   kmem:mm_page_alloc -e kmem:mm_page_free_batched -e
+   kmem:mm_page_free sleep 1
 
  Performance counter stats for 'sleep 1' (10 runs):
 
           17254  kmem:mm_page_pcpu_drain    ( +-   3.709% )
           34394  kmem:mm_page_alloc         ( +-   4.617% )
-           7509  kmem:mm_pagevec_free       ( +-   4.820% )
-          25653  kmem:mm_page_free_direct   ( +-   3.672% )
+           7509  kmem:mm_page_free_batched  ( +-   4.820% )
+          25653  kmem:mm_page_free         ( +-   3.672% )
 
     1.058135029  seconds time elapsed   ( +-   3.089% )
 
index a6ec206f36ba2f16f43c90680535ff6bcc45d56f..88b2fe3ddf42a3c60bba0a3fbc1d7bd3172a8730 100644 (file)
@@ -28,9 +28,15 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
         * (addr,len) is fully included in
         * (zone->addr, zone->size)
         */
-
-       return (dev->zone.addr <= addr &&
-               addr + len <= dev->zone.addr + dev->zone.size);
+       if (len < 0)
+               return 0;
+       if (addr + len < addr)
+               return 0;
+       if (addr < dev->zone.addr)
+               return 0;
+       if (addr + len > dev->zone.addr + dev->zone.size)
+               return 0;
+       return 1;
 }
 
 static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
index 3eed61eb48675a63dd1f31b0095217ab6bc5f646..dcaf272c26c0e232d01b265e652488642e7cc496 100644 (file)
@@ -185,7 +185,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
                irqe.dest_mode = 0; /* Physical mode. */
                /* need to read apic_id from apic regiest since
                 * it can be rewritten */
-               irqe.dest_id = ioapic->kvm->bsp_vcpu->vcpu_id;
+               irqe.dest_id = ioapic->kvm->bsp_vcpu_id;
        }
 #endif
        return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
@@ -332,9 +332,18 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
                     (void*)addr, len, val);
        ASSERT(!(addr & 0xf));  /* check alignment */
 
-       if (len == 4 || len == 8)
+       switch (len) {
+       case 8:
+       case 4:
                data = *(u32 *) val;
-       else {
+               break;
+       case 2:
+               data = *(u16 *) val;
+               break;
+       case 1:
+               data = *(u8  *) val;
+               break;
+       default:
                printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
                return 0;
        }
@@ -343,7 +352,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
        spin_lock(&ioapic->lock);
        switch (addr) {
        case IOAPIC_REG_SELECT:
-               ioapic->ioregsel = data;
+               ioapic->ioregsel = data & 0xFF; /* 8-bit register */
                break;
 
        case IOAPIC_REG_WINDOW:
index a195c07fa8290b44cb435f5ac06d841507e33503..0fb448e6a1a33288694e78ae764540df7e7a80bb 100644 (file)
@@ -113,7 +113,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
 
                /* Map into IO address space */
                r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
-                             get_order(page_size), flags);
+                             page_size, flags);
                if (r) {
                        printk(KERN_ERR "kvm_iommu_map_address:"
                               "iommu failed to map pfn=%llx\n", pfn);
@@ -134,14 +134,15 @@ unmap_pages:
 
 static int kvm_iommu_map_memslots(struct kvm *kvm)
 {
-       int i, idx, r = 0;
+       int idx, r = 0;
        struct kvm_memslots *slots;
+       struct kvm_memory_slot *memslot;
 
        idx = srcu_read_lock(&kvm->srcu);
        slots = kvm_memslots(kvm);
 
-       for (i = 0; i < slots->nmemslots; i++) {
-               r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
+       kvm_for_each_memslot(memslot, slots) {
+               r = kvm_iommu_map_pages(kvm, memslot);
                if (r)
                        break;
        }
@@ -292,15 +293,15 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
 
        while (gfn < end_gfn) {
                unsigned long unmap_pages;
-               int order;
+               size_t size;
 
                /* Get physical address */
                phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
                pfn  = phys >> PAGE_SHIFT;
 
                /* Unmap address from IO address space */
-               order       = iommu_unmap(domain, gfn_to_gpa(gfn), 0);
-               unmap_pages = 1ULL << order;
+               size       = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
+               unmap_pages = 1ULL << get_order(size);
 
                /* Unpin all pages we just unmapped to not leak any memory */
                kvm_unpin_pages(kvm, pfn, unmap_pages);
@@ -311,16 +312,16 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
 
 static int kvm_iommu_unmap_memslots(struct kvm *kvm)
 {
-       int i, idx;
+       int idx;
        struct kvm_memslots *slots;
+       struct kvm_memory_slot *memslot;
 
        idx = srcu_read_lock(&kvm->srcu);
        slots = kvm_memslots(kvm);
 
-       for (i = 0; i < slots->nmemslots; i++) {
-               kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
-                                   slots->memslots[i].npages);
-       }
+       kvm_for_each_memslot(memslot, slots)
+               kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages);
+
        srcu_read_unlock(&kvm->srcu, idx);
 
        return 0;
index d9cfb782cb81a87fc5ef4f1734df1a308a55539b..7287bf5d1c9edc1fa84681aea4f989c9c750fa9c 100644 (file)
@@ -440,6 +440,15 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
 
 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
 
+static void kvm_init_memslots_id(struct kvm *kvm)
+{
+       int i;
+       struct kvm_memslots *slots = kvm->memslots;
+
+       for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
+               slots->id_to_index[i] = slots->memslots[i].id = i;
+}
+
 static struct kvm *kvm_create_vm(void)
 {
        int r, i;
@@ -465,6 +474,7 @@ static struct kvm *kvm_create_vm(void)
        kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
        if (!kvm->memslots)
                goto out_err_nosrcu;
+       kvm_init_memslots_id(kvm);
        if (init_srcu_struct(&kvm->srcu))
                goto out_err_nosrcu;
        for (i = 0; i < KVM_NR_BUSES; i++) {
@@ -547,11 +557,11 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
 
 void kvm_free_physmem(struct kvm *kvm)
 {
-       int i;
        struct kvm_memslots *slots = kvm->memslots;
+       struct kvm_memory_slot *memslot;
 
-       for (i = 0; i < slots->nmemslots; ++i)
-               kvm_free_physmem_slot(&slots->memslots[i], NULL);
+       kvm_for_each_memslot(memslot, slots)
+               kvm_free_physmem_slot(memslot, NULL);
 
        kfree(kvm->memslots);
 }
@@ -625,10 +635,69 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
                return -ENOMEM;
 
        memslot->dirty_bitmap_head = memslot->dirty_bitmap;
+       memslot->nr_dirty_pages = 0;
        return 0;
 }
 #endif /* !CONFIG_S390 */
 
+static struct kvm_memory_slot *
+search_memslots(struct kvm_memslots *slots, gfn_t gfn)
+{
+       struct kvm_memory_slot *memslot;
+
+       kvm_for_each_memslot(memslot, slots)
+               if (gfn >= memslot->base_gfn &&
+                     gfn < memslot->base_gfn + memslot->npages)
+                       return memslot;
+
+       return NULL;
+}
+
+static int cmp_memslot(const void *slot1, const void *slot2)
+{
+       struct kvm_memory_slot *s1, *s2;
+
+       s1 = (struct kvm_memory_slot *)slot1;
+       s2 = (struct kvm_memory_slot *)slot2;
+
+       if (s1->npages < s2->npages)
+               return 1;
+       if (s1->npages > s2->npages)
+               return -1;
+
+       return 0;
+}
+
+/*
+ * Sort the memslots base on its size, so the larger slots
+ * will get better fit.
+ */
+static void sort_memslots(struct kvm_memslots *slots)
+{
+       int i;
+
+       sort(slots->memslots, KVM_MEM_SLOTS_NUM,
+             sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
+
+       for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
+               slots->id_to_index[slots->memslots[i].id] = i;
+}
+
+void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
+{
+       if (new) {
+               int id = new->id;
+               struct kvm_memory_slot *old = id_to_memslot(slots, id);
+               unsigned long npages = old->npages;
+
+               *old = *new;
+               if (new->npages != npages)
+                       sort_memslots(slots);
+       }
+
+       slots->generation++;
+}
+
 /*
  * Allocate some memory and give it an address in the guest physical address
  * space.
@@ -662,12 +731,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
                        (void __user *)(unsigned long)mem->userspace_addr,
                        mem->memory_size)))
                goto out;
-       if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+       if (mem->slot >= KVM_MEM_SLOTS_NUM)
                goto out;
        if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
                goto out;
 
-       memslot = &kvm->memslots->memslots[mem->slot];
+       memslot = id_to_memslot(kvm->memslots, mem->slot);
        base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
        npages = mem->memory_size >> PAGE_SHIFT;
 
@@ -774,15 +843,17 @@ skip_lpage:
 #endif /* not defined CONFIG_S390 */
 
        if (!npages) {
+               struct kvm_memory_slot *slot;
+
                r = -ENOMEM;
-               slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+               slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
+                               GFP_KERNEL);
                if (!slots)
                        goto out_free;
-               memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
-               if (mem->slot >= slots->nmemslots)
-                       slots->nmemslots = mem->slot + 1;
-               slots->generation++;
-               slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
+               slot = id_to_memslot(slots, mem->slot);
+               slot->flags |= KVM_MEMSLOT_INVALID;
+
+               update_memslots(slots, NULL);
 
                old_memslots = kvm->memslots;
                rcu_assign_pointer(kvm->memslots, slots);
@@ -810,13 +881,10 @@ skip_lpage:
        }
 
        r = -ENOMEM;
-       slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+       slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
+                       GFP_KERNEL);
        if (!slots)
                goto out_free;
-       memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
-       if (mem->slot >= slots->nmemslots)
-               slots->nmemslots = mem->slot + 1;
-       slots->generation++;
 
        /* actual memory is freed via old in kvm_free_physmem_slot below */
        if (!npages) {
@@ -826,7 +894,7 @@ skip_lpage:
                        new.lpage_info[i] = NULL;
        }
 
-       slots->memslots[mem->slot] = new;
+       update_memslots(slots, &new);
        old_memslots = kvm->memslots;
        rcu_assign_pointer(kvm->memslots, slots);
        synchronize_srcu_expedited(&kvm->srcu);
@@ -888,7 +956,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
        if (log->slot >= KVM_MEMORY_SLOTS)
                goto out;
 
-       memslot = &kvm->memslots->memslots[log->slot];
+       memslot = id_to_memslot(kvm->memslots, log->slot);
        r = -ENOENT;
        if (!memslot->dirty_bitmap)
                goto out;
@@ -966,16 +1034,7 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva);
 static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
                                                gfn_t gfn)
 {
-       int i;
-
-       for (i = 0; i < slots->nmemslots; ++i) {
-               struct kvm_memory_slot *memslot = &slots->memslots[i];
-
-               if (gfn >= memslot->base_gfn
-                   && gfn < memslot->base_gfn + memslot->npages)
-                       return memslot;
-       }
-       return NULL;
+       return search_memslots(slots, gfn);
 }
 
 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
@@ -986,20 +1045,13 @@ EXPORT_SYMBOL_GPL(gfn_to_memslot);
 
 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
 {
-       int i;
-       struct kvm_memslots *slots = kvm_memslots(kvm);
+       struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
 
-       for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
-               struct kvm_memory_slot *memslot = &slots->memslots[i];
-
-               if (memslot->flags & KVM_MEMSLOT_INVALID)
-                       continue;
+       if (!memslot || memslot->id >= KVM_MEMORY_SLOTS ||
+             memslot->flags & KVM_MEMSLOT_INVALID)
+               return 0;
 
-               if (gfn >= memslot->base_gfn
-                   && gfn < memslot->base_gfn + memslot->npages)
-                       return 1;
-       }
-       return 0;
+       return 1;
 }
 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
 
@@ -1491,7 +1543,8 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
        if (memslot && memslot->dirty_bitmap) {
                unsigned long rel_gfn = gfn - memslot->base_gfn;
 
-               __set_bit_le(rel_gfn, memslot->dirty_bitmap);
+               if (!__test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap))
+                       memslot->nr_dirty_pages++;
        }
 }
 
@@ -1690,10 +1743,6 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
        smp_wmb();
        atomic_inc(&kvm->online_vcpus);
 
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
-       if (kvm->bsp_vcpu_id == id)
-               kvm->bsp_vcpu = vcpu;
-#endif
        mutex_unlock(&kvm->lock);
        return r;
 
@@ -1768,12 +1817,11 @@ out_free1:
                struct kvm_regs *kvm_regs;
 
                r = -ENOMEM;
-               kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
-               if (!kvm_regs)
+               kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
+               if (IS_ERR(kvm_regs)) {
+                       r = PTR_ERR(kvm_regs);
                        goto out;
-               r = -EFAULT;
-               if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
-                       goto out_free2;
+               }
                r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
                if (r)
                        goto out_free2;
@@ -1797,13 +1845,11 @@ out_free2:
                break;
        }
        case KVM_SET_SREGS: {
-               kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
-               r = -ENOMEM;
-               if (!kvm_sregs)
-                       goto out;
-               r = -EFAULT;
-               if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
+               kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
+               if (IS_ERR(kvm_sregs)) {
+                       r = PTR_ERR(kvm_sregs);
                        goto out;
+               }
                r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
                if (r)
                        goto out;
@@ -1899,13 +1945,11 @@ out_free2:
                break;
        }
        case KVM_SET_FPU: {
-               fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
-               r = -ENOMEM;
-               if (!fpu)
-                       goto out;
-               r = -EFAULT;
-               if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
+               fpu = memdup_user(argp, sizeof(*fpu));
+               if (IS_ERR(fpu)) {
+                       r = PTR_ERR(fpu);
                        goto out;
+               }
                r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
                if (r)
                        goto out;
@@ -2520,10 +2564,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
        if (bus->dev_count > NR_IOBUS_DEVS-1)
                return -ENOSPC;
 
-       new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
+       new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
        if (!new_bus)
                return -ENOMEM;
-       memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
        kvm_io_bus_insert_dev(new_bus, dev, addr, len);
        rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
        synchronize_srcu_expedited(&kvm->srcu);
@@ -2539,13 +2582,12 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
        int i, r;
        struct kvm_io_bus *new_bus, *bus;
 
-       new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
+       bus = kvm->buses[bus_idx];
+
+       new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
        if (!new_bus)
                return -ENOMEM;
 
-       bus = kvm->buses[bus_idx];
-       memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
-
        r = -ENOENT;
        for (i = 0; i < new_bus->dev_count; i++)
                if (new_bus->range[i].dev == dev) {
@@ -2612,15 +2654,29 @@ static const struct file_operations *stat_fops[] = {
        [KVM_STAT_VM]   = &vm_stat_fops,
 };
 
-static void kvm_init_debug(void)
+static int kvm_init_debug(void)
 {
+       int r = -EFAULT;
        struct kvm_stats_debugfs_item *p;
 
        kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
-       for (p = debugfs_entries; p->name; ++p)
+       if (kvm_debugfs_dir == NULL)
+               goto out;
+
+       for (p = debugfs_entries; p->name; ++p) {
                p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
                                                (void *)(long)p->offset,
                                                stat_fops[p->kind]);
+               if (p->dentry == NULL)
+                       goto out_dir;
+       }
+
+       return 0;
+
+out_dir:
+       debugfs_remove_recursive(kvm_debugfs_dir);
+out:
+       return r;
 }
 
 static void kvm_exit_debug(void)
@@ -2764,10 +2820,16 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
        kvm_preempt_ops.sched_in = kvm_sched_in;
        kvm_preempt_ops.sched_out = kvm_sched_out;
 
-       kvm_init_debug();
+       r = kvm_init_debug();
+       if (r) {
+               printk(KERN_ERR "kvm: create debugfs files failed\n");
+               goto out_undebugfs;
+       }
 
        return 0;
 
+out_undebugfs:
+       unregister_syscore_ops(&kvm_syscore_ops);
 out_unreg:
        kvm_async_pf_deinit();
 out_free: